diff --git a/.changes/1006.json b/.changes/1006.json new file mode 100644 index 000000000..3ae8fbca8 --- /dev/null +++ b/.changes/1006.json @@ -0,0 +1,5 @@ +{ + "type": "added", + "description": "add CentOS7-compatible target for aarch64", + "issues": [528] +} diff --git a/.changes/1018.json b/.changes/1018.json new file mode 100644 index 000000000..0b99a1fa3 --- /dev/null +++ b/.changes/1018.json @@ -0,0 +1,4 @@ +{ + "description": "deny installation of armhf debian packages for the arm-unknown-linux-gnueabihf target.", + "type": "fixed" +} diff --git a/.changes/1023.json b/.changes/1023.json new file mode 100644 index 000000000..c4aeb55f4 --- /dev/null +++ b/.changes/1023.json @@ -0,0 +1,4 @@ +{ + "description": "support different Android NDK, API, and Android versions using Docker build args.", + "type": "added" +} diff --git a/.changes/1024.json b/.changes/1024.json new file mode 100644 index 000000000..599064c0e --- /dev/null +++ b/.changes/1024.json @@ -0,0 +1,5 @@ +{ + "description": "`rust-std` is no longer downloaded when using `build-std = true`", + "type": "fixed", + "breaking": false +} \ No newline at end of file diff --git a/.changes/1028-1132.json b/.changes/1028-1132.json new file mode 100644 index 000000000..61d1aaac9 --- /dev/null +++ b/.changes/1028-1132.json @@ -0,0 +1,33 @@ +[ + { + "description": "link to libgcc for armv5te-unknown-linux-musleabi.", + "type": "fixed" + }, + { + "description": "add C++ support for FreeBSD targets.", + "type": "added" + }, + { + "description": "test dynamic library support for Android targets in CI.", + "type": "internal" + }, + { + "description": "test partial C++ support for mips64el-unknown-linux-muslabi64 in CI.", + "type": "internal" + }, + { + "description": "convert mips64el-unknown-linux-muslabi64 to a hard-float toolchain to match the rust target.", + "type": "changed", + "breaking": true + }, + { + "description": "convert mips64el-unknown-linux-muslabi64 to use the mips64r2 architecture, identical to the rust target.", + "type": "changed", + "breaking": true + }, + { + "description": "convert mips-unknown-linux-musl and mipsel-unknown-linux-musl to use the mips32r2 architecture, identical to the rust targets.", + "type": "changed", + "breaking": true + } +] diff --git a/.changes/1032.json b/.changes/1032.json new file mode 100644 index 000000000..c046d37ec --- /dev/null +++ b/.changes/1032.json @@ -0,0 +1,4 @@ +{ + "description": "allow disabling buildkit for container engines lacking buildkit support.", + "type": "added" +} diff --git a/.changes/1033.json b/.changes/1033.json new file mode 100644 index 000000000..435d43df6 --- /dev/null +++ b/.changes/1033.json @@ -0,0 +1,5 @@ +{ + "description": "fix --cache-from using podman.", + "type": "fixed", + "issues": [1031] +} diff --git a/.changes/1038-1220-1482.json b/.changes/1038-1220-1482.json new file mode 100644 index 000000000..0a59298a8 --- /dev/null +++ b/.changes/1038-1220-1482.json @@ -0,0 +1,6 @@ +[ + { + "description": "bump MSRV to 1.77.2", + "type": "internal" + } +] diff --git a/.changes/1039.json b/.changes/1039.json new file mode 100644 index 000000000..9053daaba --- /dev/null +++ b/.changes/1039.json @@ -0,0 +1,4 @@ +{ + "description": "support overlay and fuse-overlayfs storage drivers", + "type": "added" +} diff --git a/.changes/1049-1142.json b/.changes/1049-1142.json new file mode 100644 index 000000000..b2e4d7a9d --- /dev/null +++ b/.changes/1049-1142.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "stop parsing arguments to `cross run` after `--`", + "issues": [1048, 1141] +} diff --git a/.changes/1054.json b/.changes/1054.json new file mode 100644 index 000000000..18394ca0f --- /dev/null +++ b/.changes/1054.json @@ -0,0 +1,4 @@ +{ + "type": "internal", + "description": "change the unique container ID to be unique based off the toolchain and system time." +} diff --git a/.changes/1057.json b/.changes/1057.json new file mode 100644 index 000000000..285dcb971 --- /dev/null +++ b/.changes/1057.json @@ -0,0 +1,5 @@ +{ + "description": "fix mount paths outside of the workspace mount directory on Windows and those provided as a WSL path.", + "type": "fixed", + "issues": [1145, 1156] +} diff --git a/.changes/1063-1125-1134.json b/.changes/1063-1125-1134.json new file mode 100644 index 000000000..9deadf9d2 --- /dev/null +++ b/.changes/1063-1125-1134.json @@ -0,0 +1,18 @@ +[ + { + "type": "changed", + "description": "changed musl targets to use static-pie linkage by default, consistent with Alpine.", + "breaking": true + }, + { + "type": "fixed", + "description": "fixed C++ support for musl targets.", + "issues": [902], + "breaking": true + }, + { + "type": "fixed", + "description": "use a linker script for musl libstdc++ to ensure the archive links to libc, libm, and libgcc as needed.", + "issues": [1124] + } +] diff --git a/.changes/1073.json b/.changes/1073.json new file mode 100644 index 000000000..8cd67b98b --- /dev/null +++ b/.changes/1073.json @@ -0,0 +1,4 @@ +{ + "type": "added", + "description": "passthrough cross environment variables by default" +} diff --git a/.changes/1078.json b/.changes/1078.json new file mode 100644 index 000000000..bfecb4ff4 --- /dev/null +++ b/.changes/1078.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "Fix custom image names for images with a trailing '-' character.\ncustom images in packages with the `^[^0-9-].*([^A-Za-z_]*-)|(-[^A-Za-z_]*)$` package name pattern are now supported.", + "issues": [1077] +} diff --git a/.changes/1084.json b/.changes/1084.json new file mode 100644 index 000000000..39d80eadb --- /dev/null +++ b/.changes/1084.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "fixed remote docker data volume paths on windows", + "issues": [1081] +} diff --git a/.changes/1085.json b/.changes/1085.json new file mode 100644 index 000000000..373c30613 --- /dev/null +++ b/.changes/1085.json @@ -0,0 +1,4 @@ +{ + "type": "added", + "description": "support custom toolchains without rustup." +} diff --git a/.changes/1105.json b/.changes/1105.json new file mode 100644 index 000000000..36ab770bb --- /dev/null +++ b/.changes/1105.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "explicitly prefer `-ar` to `-gcc-ar`.", + "issues": [1100] +} diff --git a/.changes/1112.json b/.changes/1112.json new file mode 100644 index 000000000..ea4fa3525 --- /dev/null +++ b/.changes/1112.json @@ -0,0 +1,11 @@ +[ + { + "description": "fixed CMake support for Android and newlib targets.", + "type": "fixed", + "issues": [1110] + }, + { + "description": "added C++ support for newlib targets.", + "type": "added" + } +] diff --git a/.changes/1118.json b/.changes/1118.json new file mode 100644 index 000000000..46b59163a --- /dev/null +++ b/.changes/1118.json @@ -0,0 +1,5 @@ +{ + "type": "added", + "description": "added ARMv8-M newlib targets.", + "issues": [1116] +} diff --git a/.changes/1123.json b/.changes/1123.json new file mode 100644 index 000000000..6c8e5d309 --- /dev/null +++ b/.changes/1123.json @@ -0,0 +1,6 @@ +{ + "description": "support external C/C++ dependencies using C11/C++11 threads for MinGW targets by switching linkers from `*-gcc` to `*gcc-posix`.", + "type": "added", + "issues": [1122], + "breaking": true +} diff --git a/.changes/1138.json b/.changes/1138.json new file mode 100644 index 000000000..14e43c91c --- /dev/null +++ b/.changes/1138.json @@ -0,0 +1,5 @@ +{ + "description": "explicitly specify ar for all toolchains", + "type": "changed", + "issues": [1137] +} diff --git a/.changes/1159.json b/.changes/1159.json new file mode 100644 index 000000000..123fb4910 --- /dev/null +++ b/.changes/1159.json @@ -0,0 +1,4 @@ +{ + "type": "changed", + "description": "use `[workspace.metadata.cross]` as an alternative to `Cross.toml`" +} diff --git a/.changes/1160.json b/.changes/1160.json new file mode 100644 index 000000000..147b9d19d --- /dev/null +++ b/.changes/1160.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "don't warn when toolchain version is explicitly provided.", + "issues": [1148] +} diff --git a/.changes/1164.json b/.changes/1164.json new file mode 100644 index 000000000..df4b74215 --- /dev/null +++ b/.changes/1164.json @@ -0,0 +1,5 @@ +{ + "type": "internal", + "description": "add fallback mirrors for FreeBSD packages.", + "issues": [1162] +} diff --git a/.changes/1166.json b/.changes/1166.json new file mode 100644 index 000000000..44eeb432e --- /dev/null +++ b/.changes/1166.json @@ -0,0 +1,4 @@ +{ + "description": "freebsd: include memstat in build image to fix build with libc 0.2.138 and up.", + "type": "fixed" +} diff --git a/.changes/1183.json b/.changes/1183.json new file mode 100644 index 000000000..1613ea53a --- /dev/null +++ b/.changes/1183.json @@ -0,0 +1,5 @@ +{ + "description": "resolve issue when using `pre-build` and `image.toolchain` in `Cargo.toml`", + "type": "fixed", + "issues": [1182] +} diff --git a/.changes/1199.json b/.changes/1199.json new file mode 100644 index 000000000..b40c00590 --- /dev/null +++ b/.changes/1199.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "use current target_dir path when copying build artifacts back", + "issues": [1103] +} diff --git a/.changes/1207.json b/.changes/1207.json new file mode 100644 index 000000000..feaaec72c --- /dev/null +++ b/.changes/1207.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "properly copy directories when using `CROSS_REMOTE`", + "issues": [1206] +} diff --git a/.changes/1220.json b/.changes/1220.json new file mode 100644 index 000000000..78b4ea1ce --- /dev/null +++ b/.changes/1220.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "don't pass `--target-dir` in remote context when it's not needed", + "issues": [1218] +} diff --git a/.changes/1246.json b/.changes/1246.json new file mode 100644 index 000000000..3e6b50bd4 --- /dev/null +++ b/.changes/1246.json @@ -0,0 +1,4 @@ +{ + "description": "fix support for bare metal targets other than thumb", + "type": "fixed" +} diff --git a/.changes/1248.json b/.changes/1248.json new file mode 100644 index 000000000..fdc3f13de --- /dev/null +++ b/.changes/1248.json @@ -0,0 +1,4 @@ +{ + "description": "customize `--cache-from` and `--cache-to` options for `build-docker-image`", + "type": "internal" +} diff --git a/.changes/1260.json b/.changes/1260.json new file mode 100644 index 000000000..3e29d43d8 --- /dev/null +++ b/.changes/1260.json @@ -0,0 +1,6 @@ +{ + "description": "fix podman bind mounts on macOS by removing SELinux labels.", + "issues": [756], + "type": "fixed", + "breaking": false +} diff --git a/.changes/1265.json b/.changes/1265.json new file mode 100644 index 000000000..b8f138d3a --- /dev/null +++ b/.changes/1265.json @@ -0,0 +1,4 @@ +{ + "description": "replace `ctrlc` signal handler with `signal-hook`", + "type": "changed" +} diff --git a/.changes/1271.json b/.changes/1271.json new file mode 100644 index 000000000..034bb1fa3 --- /dev/null +++ b/.changes/1271.json @@ -0,0 +1,5 @@ +{ + "description": "add aarch64-unknown-freebsd image.", + "type": "added", + "breaking": false +} diff --git a/.changes/1280.json b/.changes/1280.json new file mode 100644 index 000000000..e06cce5d5 --- /dev/null +++ b/.changes/1280.json @@ -0,0 +1,6 @@ +{ + "description": "Allow to run arbitrary commands in containers using `cross-util run ...`", + "issues": [1266], + "type": "added", + "breaking": false +} \ No newline at end of file diff --git a/.changes/1317.json b/.changes/1317.json new file mode 100644 index 000000000..2103844f3 --- /dev/null +++ b/.changes/1317.json @@ -0,0 +1,6 @@ +{ + "description": "Re-add PKG_CONFIG_PATH for the arm-unknown-linux-gnueabihf target.", + "issues": [1316], + "type": "fixed", + "breaking": false +} diff --git a/.changes/1325.json b/.changes/1325.json new file mode 100644 index 000000000..5c00fa8ad --- /dev/null +++ b/.changes/1325.json @@ -0,0 +1,5 @@ +{ + "description": "create CACHEDIR.TAG during custom docker build", + "type": "fixed", + "issues": [1324] +} diff --git a/.changes/1330-1349.json b/.changes/1330-1349.json new file mode 100644 index 000000000..d966462c4 --- /dev/null +++ b/.changes/1330-1349.json @@ -0,0 +1,4 @@ +{ + "description": "update cargo-zigbuild from 0.17.2 to 0.17.3", + "type": "changed" +} diff --git a/.changes/1333.json b/.changes/1333.json new file mode 100644 index 000000000..d9353285e --- /dev/null +++ b/.changes/1333.json @@ -0,0 +1,4 @@ +{ + "description": "set arm-unknown-linux-gnueabihf to use glibc 2.31", + "type": "changed" +} diff --git a/.changes/1340.json b/.changes/1340.json new file mode 100644 index 000000000..3c1fc6dfa --- /dev/null +++ b/.changes/1340.json @@ -0,0 +1,4 @@ +{ + "description": "don't error when a non-provided target is used with only a dockerfile specified", + "type": "fixed" +} diff --git a/.changes/1342.json b/.changes/1342.json new file mode 100644 index 000000000..93b0faf08 --- /dev/null +++ b/.changes/1342.json @@ -0,0 +1,10 @@ +[ + { + "description": "fix `--list` showing cross commands for the host", + "type": "fixed" + }, + { + "description": "add `rustdoc` as a supported cargo subcommand", + "type": "added" + } +] diff --git a/.changes/1346.json b/.changes/1346.json new file mode 100644 index 000000000..5b9519f67 --- /dev/null +++ b/.changes/1346.json @@ -0,0 +1,4 @@ +{ + "description": "bump musl to 1.2.3, like done in rust 1.71", + "type": "changed" +} diff --git a/.changes/1348.json b/.changes/1348.json new file mode 100644 index 000000000..c4860057a --- /dev/null +++ b/.changes/1348.json @@ -0,0 +1,4 @@ +{ + "description": "add `libexecinfo.so` in netbsd", + "type": "added" +} diff --git a/.changes/1373.json b/.changes/1373.json new file mode 100644 index 000000000..3f060dabd --- /dev/null +++ b/.changes/1373.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "fix wineboot silently failing on qemu", + "issues": [1372] +} diff --git a/.changes/1374-1490.json b/.changes/1374-1490.json new file mode 100644 index 000000000..bfab11033 --- /dev/null +++ b/.changes/1374-1490.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "update wine to 9.0.0.0", + "issues": [1372] +} diff --git a/.changes/1385.json b/.changes/1385.json new file mode 100644 index 000000000..deb6f0f16 --- /dev/null +++ b/.changes/1385.json @@ -0,0 +1,6 @@ +{ + "description": "Set PKG_CONFIG_PATH in the FreeBSD Dockerfile.", + "issues": [1384], + "type": "fixed" +} + diff --git a/.changes/1391.json b/.changes/1391.json new file mode 100644 index 000000000..415c52939 --- /dev/null +++ b/.changes/1391.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "Add `-idirafter/usr/include` to bindgen's clang invocation on GNU/Linux", + "issues": [1389] +} diff --git a/.changes/1399.json b/.changes/1399.json new file mode 100644 index 000000000..e14349a34 --- /dev/null +++ b/.changes/1399.json @@ -0,0 +1,5 @@ +{ + "description": "fix creating `initrd` when using debian `ports` mirror and compressed kernel modules", + "issues": [1399], + "type": "fixed" +} diff --git a/.changes/1403-1411.json b/.changes/1403-1411.json new file mode 100644 index 000000000..f14532e8a --- /dev/null +++ b/.changes/1403-1411.json @@ -0,0 +1,9 @@ +[{ + "type": "fixed", + "description": "switch to freebsd 13.2 following 12.4 EoL", + "issues": [1390] +}, + { + "type": "added", + "description": "Added helper script `/freebsd-install-package.sh` and friends to install freebsd packages, see [wiki](https://github.com/cross-rs/cross/wiki/FAQ#custom-images)" + }] diff --git a/.changes/1420.json b/.changes/1420.json new file mode 100644 index 000000000..befcd0e58 --- /dev/null +++ b/.changes/1420.json @@ -0,0 +1,4 @@ +{ + "description": "set the required environment variables in the runner", + "type": "fixed" +} diff --git a/.changes/1458.json b/.changes/1458.json new file mode 100644 index 000000000..46b948118 --- /dev/null +++ b/.changes/1458.json @@ -0,0 +1,6 @@ +{ + "description": "add gfortran for target *-gnu*, *-musl*, *-freebsd*, *-solaris*, *-dragonfly*, *-illumos*, *-netbsd*", + "issues": [1457], + "type": "added" + +} diff --git a/.changes/1465.json b/.changes/1465.json new file mode 100644 index 000000000..0b5c551f7 --- /dev/null +++ b/.changes/1465.json @@ -0,0 +1,5 @@ +{ + "type": "added", + "description": "add loongarch64 support", + "issues": [1404] +} diff --git a/.changes/1466.json b/.changes/1466.json new file mode 100644 index 000000000..a93ff497e --- /dev/null +++ b/.changes/1466.json @@ -0,0 +1,5 @@ +{ + "type": "added", + "description": "Upgrade qemu and integrate qemu-user runners for loongarch64-linux-gnu", + "issues": [1467] +} diff --git a/.changes/1468.json b/.changes/1468.json new file mode 100644 index 000000000..27f54aeef --- /dev/null +++ b/.changes/1468.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "use defconfig for ct-ng, minimizing the config", + "issues": [1335] +} diff --git a/.changes/1481.json b/.changes/1481.json new file mode 100644 index 000000000..96649d0d7 --- /dev/null +++ b/.changes/1481.json @@ -0,0 +1,4 @@ +{ + "description": "allow pass-through environment variables to contain numbers", + "type": "fixed" +} \ No newline at end of file diff --git a/.changes/1483.json b/.changes/1483.json new file mode 100644 index 000000000..8e55ce3d5 --- /dev/null +++ b/.changes/1483.json @@ -0,0 +1,4 @@ +{ + "description": "Fix paths when using `CROSS_CONTAINER_IN_CONTAINER`", + "type": "fixed" +} \ No newline at end of file diff --git a/.changes/1485.json b/.changes/1485.json new file mode 100644 index 000000000..3b0038013 --- /dev/null +++ b/.changes/1485.json @@ -0,0 +1,5 @@ +{ + "description": "Use `/proc/self/mountinfo` as a fallback for `docker inspect` if using `HOSTNAME` fails", + "issues": [1321], + "type": "changed" +} diff --git a/.changes/1488.json b/.changes/1488.json new file mode 100644 index 000000000..4853f5178 --- /dev/null +++ b/.changes/1488.json @@ -0,0 +1,6 @@ +{ + "description": "Allow `build-std` to take an array of crate names", + "issues": [896], + "type": "changed", + "breaking": true +} diff --git a/.changes/1489.json b/.changes/1489.json new file mode 100644 index 000000000..a34bae916 --- /dev/null +++ b/.changes/1489.json @@ -0,0 +1,4 @@ +{ + "description": "Simplify config internals", + "type": "internal" +} diff --git a/.changes/1491.json b/.changes/1491.json new file mode 100644 index 000000000..b0cd7b98e --- /dev/null +++ b/.changes/1491.json @@ -0,0 +1,5 @@ +{ + "description": "Allow specifying only a tag or subtarget for images in config", + "issues": [1169], + "type": "changed" +} diff --git a/.changes/1494.json b/.changes/1494.json new file mode 100644 index 000000000..6f24c706f --- /dev/null +++ b/.changes/1494.json @@ -0,0 +1,5 @@ +{ + "description": "Fix `zig.image` precedence", + "type": "fixed", + "breaking": true +} diff --git a/.changes/1495.json b/.changes/1495.json new file mode 100644 index 000000000..f5c10bb5c --- /dev/null +++ b/.changes/1495.json @@ -0,0 +1,6 @@ +{ + "description": "Fix `*-solaris` targets", + "issues": [1424], + "type": "fixed", + "breaking": true +} diff --git a/.changes/1525.json b/.changes/1525.json new file mode 100644 index 000000000..c912f3852 --- /dev/null +++ b/.changes/1525.json @@ -0,0 +1,5 @@ +{ + "description": "Fix riscv64gc-unknown-linux-gnu target", + "issues": [1423], + "type": "fixed" +} diff --git a/.changes/554.json b/.changes/554.json new file mode 100644 index 000000000..09b940da4 --- /dev/null +++ b/.changes/554.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "add the `--force-non-host` flag for newer rustup versions", + "issues": [536] +} diff --git a/.changes/589.json b/.changes/589.json new file mode 100644 index 000000000..fb6605f20 --- /dev/null +++ b/.changes/589.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "Ensure podman builds use non-interactive package installs.", + "issues": [589] +} diff --git a/.changes/591-1095.json b/.changes/591-1095.json new file mode 100644 index 000000000..567ea5254 --- /dev/null +++ b/.changes/591-1095.json @@ -0,0 +1,24 @@ +[ + { + "description": "update Ubuntu images to 20.04 LTS.", + "type": "changed", + "breaking": true, + "issues": [417, 517, 556, 616] + }, + { + "description": "remove Linux image from `mips-unknown-linux-gnu`.", + "type": "removed", + "breaking": true + }, + { + "type": "changed", + "description": "upgraded most linux images to use a 5.x kernel instead of a 4.x kernel.", + "breaking": true + }, + { + "type": "changed", + "description": "updated powerpc64, riscv64, and sparc64 `*-linux-gnu` images to use a 6.x kernel instead of a 4.x kernel.", + "breaking": true, + "issues": [1094] + } +] diff --git a/.changes/661.json b/.changes/661.json new file mode 100644 index 000000000..967fb34d9 --- /dev/null +++ b/.changes/661.json @@ -0,0 +1,5 @@ +{ + "description": "When cross has given a warning, it will now quit instead of continuing with `cargo` when run in CI or with `CROSS_NO_WARNINGS=1`", + "breaking": true, + "type": "changed" +} diff --git a/.changes/817.json b/.changes/817.json new file mode 100644 index 000000000..aa182706b --- /dev/null +++ b/.changes/817.json @@ -0,0 +1,16 @@ +[ + { + "description": "Images can now specify a certain toolchain via `target.{target}.image.toolchain`", + "breaking": true, + "type": "added" + }, + { + "description": "made `cross +channel` parsing more compliant to parsing a toolchain", + "type": "fixed" + }, + { + "description": "`pre-build` and `dockerfile` now uses buildx/buildkit", + "breaking": true, + "type": "changed" + } +] diff --git a/.changes/880-1111.json b/.changes/880-1111.json new file mode 100644 index 000000000..27d1911bf --- /dev/null +++ b/.changes/880-1111.json @@ -0,0 +1,5 @@ +{ + "description": "added a zig-based image (v0.10.0), allowing multiple targets to be built from the same image, using cargo-zigbuild (v0.14.1).", + "type": "added", + "issues": [860, 1109] +} diff --git a/.changes/918.json b/.changes/918.json new file mode 100644 index 000000000..4316604e1 --- /dev/null +++ b/.changes/918.json @@ -0,0 +1,5 @@ +{ + "description": "use JSON-based files to autogenerate CHANGELOG.md", + "issues": [662], + "type": "internal" +} diff --git a/.changes/931.json b/.changes/931.json new file mode 100644 index 000000000..50d3d5065 --- /dev/null +++ b/.changes/931.json @@ -0,0 +1,4 @@ +{ + "description": "deny installation of debian packages that conflict with our cross-compiler toolchains.", + "type": "fixed" +} diff --git a/.changes/934.json b/.changes/934.json new file mode 100644 index 000000000..caf25ade1 --- /dev/null +++ b/.changes/934.json @@ -0,0 +1,5 @@ +{ + "type": "added", + "description": "add support for pre-build hooks with remote docker", + "issues": [928] +} diff --git a/.changes/937.json b/.changes/937.json new file mode 100644 index 000000000..205a278f5 --- /dev/null +++ b/.changes/937.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "support Windows-style and absolute manifest paths", + "issues": [935] +} diff --git a/.changes/939.json b/.changes/939.json new file mode 100644 index 000000000..79aa0e2df --- /dev/null +++ b/.changes/939.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "Remove `/project` mounting, instead always mount in the same path as on the host", + "issues": [938] +} diff --git a/.changes/942.json b/.changes/942.json new file mode 100644 index 000000000..0144174da --- /dev/null +++ b/.changes/942.json @@ -0,0 +1,15 @@ +[ + { + "description": "use non-canonical paths for mount locations.", + "type": "changed", + "issues": [920] + }, + { + "description": "fixed DeviceNS drive parsing in creating WSL-style paths on windows.", + "type": "fixed" + }, + { + "description": "fixed the environment variable name for mounted volumes.", + "type": "fixed" + } +] diff --git a/.changes/945.json b/.changes/945.json new file mode 100644 index 000000000..b295c170a --- /dev/null +++ b/.changes/945.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "fix pre-build hook image naming with podman", + "issues": [944] +} diff --git a/.changes/947.json b/.changes/947.json new file mode 100644 index 000000000..ec5243eb1 --- /dev/null +++ b/.changes/947.json @@ -0,0 +1,12 @@ +[ + { + "type": "internal", + "description": "resolve symlinks to cargo and xargo home before mounting", + "issues": [373] + }, + { + "type": "fixed", + "description": "mount cargo, xargo, and the sysroot at the same path as on the host to avoid unnecessary recompilation.", + "issues": [551] + } +] diff --git a/.changes/952.json b/.changes/952.json new file mode 100644 index 000000000..3cea00897 --- /dev/null +++ b/.changes/952.json @@ -0,0 +1,4 @@ +{ + "description": "run non-x86 binaries natively if on a compatible host.", + "type": "added" +} diff --git a/.changes/955.json b/.changes/955.json new file mode 100644 index 000000000..d57dbb200 --- /dev/null +++ b/.changes/955.json @@ -0,0 +1,5 @@ +{ + "description": "Update QEMU to version 7.0.0 for Ubuntu targets", + "type": "changed", + "breaking": false +} diff --git a/.changes/956.json b/.changes/956.json new file mode 100644 index 000000000..a7cdfff6e --- /dev/null +++ b/.changes/956.json @@ -0,0 +1,5 @@ +{ + "description": "use Wine from Ubuntu 20.04 WineHQ repo", + "type": "changed", + "breaking": false +} diff --git a/.changes/957-1393.json b/.changes/957-1393.json new file mode 100644 index 000000000..4c485be14 --- /dev/null +++ b/.changes/957-1393.json @@ -0,0 +1,5 @@ +{ + "description": "use latest Debian kernel images", + "type": "changed", + "breaking": false +} diff --git a/.changes/962.json b/.changes/962.json new file mode 100644 index 000000000..cbff84ca1 --- /dev/null +++ b/.changes/962.json @@ -0,0 +1,5 @@ +{ + "description": "fix SELinux labels to allow use in multiple containers and/or the host filesystem.", + "type": "fixed", + "issues": [961] +} diff --git a/.changes/964.json b/.changes/964.json new file mode 100644 index 000000000..721c48530 --- /dev/null +++ b/.changes/964.json @@ -0,0 +1,4 @@ +{ + "type": "fixed", + "description": "don't keep stdin open when running containers." +} diff --git a/.changes/968.json b/.changes/968.json new file mode 100644 index 000000000..1c1ebd1cf --- /dev/null +++ b/.changes/968.json @@ -0,0 +1,4 @@ +{ + "description": "only print rustup --verbose if `-vv` or `CROSS_VERBOSE=1` is used", + "type": "fixed" +} diff --git a/.changes/974.json b/.changes/974.json new file mode 100644 index 000000000..03f37761c --- /dev/null +++ b/.changes/974.json @@ -0,0 +1,11 @@ +[ + { + "description": "change `mips64-unknown-linux-muslabi64` target to hard-float target.", + "type": "fixed", + "issues": [906] + }, + { + "description": "build static libgcc and link to static libgcc for `mips64-unknown-linux-muslabi64` target.", + "type": "fixed" + } +] diff --git a/.changes/982.json b/.changes/982.json new file mode 100644 index 000000000..f6979d803 --- /dev/null +++ b/.changes/982.json @@ -0,0 +1,4 @@ +{ + "type": "internal", + "description": "use generic dockerfiles for when the toolchain and image platfom match." +} diff --git a/.changes/984.json b/.changes/984.json new file mode 100644 index 000000000..e17541fc2 --- /dev/null +++ b/.changes/984.json @@ -0,0 +1,4 @@ +{ + "description": "added linux images for `riscv64gc-unknown-linux-gnu` to allow full system emulation.", + "type": "added" +} diff --git a/.changes/987.json b/.changes/987.json new file mode 100644 index 000000000..b547ca57f --- /dev/null +++ b/.changes/987.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "link to libgcc for `armv5te-unknown-linux-musleabi` to fix missing `__sync_X_and_fetch` builtins.", + "issues": [367] +} diff --git a/.changes/989.json b/.changes/989.json new file mode 100644 index 000000000..4dfc03588 --- /dev/null +++ b/.changes/989.json @@ -0,0 +1,5 @@ +{ + "type": "changed", + "description": "add default nix_store value to solve nix-related issues", + "issues": [260] +} diff --git a/.changes/994.json b/.changes/994.json new file mode 100644 index 000000000..8a98911ec --- /dev/null +++ b/.changes/994.json @@ -0,0 +1,5 @@ +{ + "type": "fixed", + "description": "fixed wrong path used for target when pre-building in container in container mode", + "issues": [993] +} diff --git a/.changes/README.md b/.changes/README.md new file mode 100644 index 000000000..9f619d02d --- /dev/null +++ b/.changes/README.md @@ -0,0 +1,46 @@ +# Changes + +This stores changes to automatically generate the changelog, to avoid merge conflicts. Files should be in a JSON format, with the following format: + +```json +{ + "description": "single-line description to add to the CHANGELOG.", + "issues": [894], + "type": "added", + "breaking": false +} +``` + +Valid types are: +- added (Added) +- changed (Changed) +- fixed (Fixed) +- removed (Removed) +- internal (Internal) + +`breaking` is optional and defaults to false. if `breaking` is present for any active changes, a `BREAKING:` notice will be added at the start of the entry. `issues` is also optional, and is currently unused, and is an array of issues fixed by the PR, and defaults to an empty array. + +The file numbers should be `${pr}.json`. The `pr` is optional, and if not, an issue number should be used, in the `_${issue}.json` format. We also support multiple PRs per entry, using the `${pr1}-${pr2}-(...).json` format. + +If multiple changes are made in a single PR, you can also pass an array of entries: + +```json +[ + { + "description": "this is one added entry.", + "issues": [630], + "type": "added" + }, + { + "description": "this is another added entry.", + "issues": [642], + "type": "added" + }, + { + "description": "this is a fixed entry that has no attached issue.", + "type": "fixed" + } +] +``` + +See [template](/.changes/template) for sample object and array-based changes. diff --git a/.changes/template/940.json b/.changes/template/940.json new file mode 100644 index 000000000..d47669e8e --- /dev/null +++ b/.changes/template/940.json @@ -0,0 +1,22 @@ +[ + { + "description": "this is one added entry.", + "issues": [630], + "type": "added" + }, + { + "description": "this is another added entry.", + "issues": [642], + "type": "added" + }, + { + "description": "this is a fixed entry that has no attached issue.", + "type": "fixed" + }, + { + "description": "this is a breaking change.", + "issues": [679], + "breaking": true, + "type": "changed" + } +] diff --git a/.changes/template/978.json b/.changes/template/978.json new file mode 100644 index 000000000..55c574ba2 --- /dev/null +++ b/.changes/template/978.json @@ -0,0 +1,5 @@ +{ + "description": "sample description for a PR adding one CHANGELOG entry.", + "issues": [437], + "type": "fixed" +} diff --git a/.changes/template/979-981.json b/.changes/template/979-981.json new file mode 100644 index 000000000..de7b0a90f --- /dev/null +++ b/.changes/template/979-981.json @@ -0,0 +1,5 @@ +{ + "description": "this has 2 PRs associated.", + "issues": [441], + "type": "added" +} diff --git a/.changes/template/CHANGELOG.md b/.changes/template/CHANGELOG.md new file mode 100644 index 000000000..d31efc63c --- /dev/null +++ b/.changes/template/CHANGELOG.md @@ -0,0 +1,375 @@ +# Change Log + +This is a template changelog. This represents an older state of this repository, used to test parsing/formatting. + + + +## [Unreleased] - ReleaseDate + +### Added + +- #905 - added `qemu-runner` for musl images, allowing use of native or emulated runners. +- #905 - added qemu emulation to `i586-unknown-linux-gnu`, `i686-unknown-linux-musl`, and `i586-unknown-linux-gnu`, so they can run on an `x86` CPU, rather than an `x86_64` CPU. +- #900 - add the option to skip copying build artifacts back to host when using remote cross via `CROSS_REMOTE_SKIP_BUILD_ARTIFACTS`. +- #891 - support custom user namespace overrides by setting the `CROSS_CONTAINER_USER_NAMESPACE` environment variable. +- #890 - support rootless docker via the `CROSS_ROOTLESS_CONTAINER_ENGINE` environment variable. +- #878 - added an image `ghcr.io/cross-rs/cross` containing cross. + +### Changed + +- #869 - ensure cargo configuration environment variable flags are passed to the docker container. +- #859 - added color diagnostic output and error messages. + +### Fixed + +- #905 - fixed running dynamically-linked libraries for all musl targets except `x86_64-unknown-linux-musl`. +- #904 - ensure `cargo metadata` works by using the same channel. +- #904 - fixed the path for workspace volumes and passthrough volumes with docker-in-docker. +- #898 - fix the path to the mount root with docker-in-docker if mounting volumes. +- #897 - ensure `target.$(...)` config options override `build` ones when parsing strings and vecs. +- #895 - convert filenames in docker tags to ASCII lowercase and ignore invalid characters +- #885 - handle symlinks when using remote docker. +- #868 - ignore the `CARGO` environment variable. +- #867 - fixed parsing of `build.env.passthrough` config values. + +## [v0.2.2] - 2022-06-24 + +### Added + +- #803 - added `CROSS_CUSTOM_TOOLCHAIN` to disable automatic installation of components for use with tools like `cargo-bisect-rustc` +- #795 - added images for additional toolchains maintained by cross-rs. +- #792 - added `CROSS_CONTAINER_IN_CONTAINER` environment variable to replace `CROSS_DOCKER_IN_DOCKER`. +- #785 - added support for remote container engines through data volumes through setting the `CROSS_REMOTE` environment variable. also adds in utility commands to create and remove persistent data volumes. +- #782 - added `build-std` config option, which builds the rust standard library from source if enabled. +- #678 - Add optional `target.{target}.dockerfile[.file]`, `target.{target}.dockerfile.context` and `target.{target}.dockerfile.build-args` to invoke docker/podman build before using an image. +- #678 - Add `target.{target}.pre-build` config for running commands before building the image. +- #772 - added `CROSS_CONTAINER_OPTS` environment variable to replace `DOCKER_OPTS`. +- #767, #788 - added the `cross-util` and `xtask` commands. +- #842 - Add `Cargo.toml` as configuration source +- #745 - added `thumbv7neon-*` targets. +- #741 - added `armv7-unknown-linux-gnueabi` and `armv7-unknown-linux-musleabi` targets. +- #721 - add support for running doctests on nightly if `CROSS_UNSTABLE_ENABLE_DOCTESTS=true`. +- #719 - add `--list` to known subcommands. +- #681 - Warn on unknown fields and confusable targets +- #624 - Add `build.default-target` +- #647 - Add `mips64-unknown-linux-muslabi64` and `mips64el-unknown-linux-muslabi64` support +- #543 - Added environment variables to control the UID and GID in the container +- #524 - docker: Add Nix Store volume support +- Added support for mounting volumes. +- #684 - Enable cargo workspaces to work from any path in the workspace, and make path dependencies mount seamlessly. Also added support for private SSH dependencies. + +### Changed + +- #838 - re-enabled the solaris targets. +- #807 - update Qemu to 6.1.0 on images using Ubuntu 18.04+ with python3.6+. +- #775 - forward Cargo exit code to host +- #762 - re-enabled `x86_64-unknown-dragonfly` target. +- #747 - reduced android image sizes. +- #746 - limit image permissions for android images. +- #377 - update WINE versions to 7.0. +- #734 - patch `arm-unknown-linux-gnueabihf` to build for ARMv6, and add architecture for crosstool-ng-based images. +- #709 - Update Emscripten targets to `emcc` version 3.1.10 +- #707, #708 - Set `BINDGEN_EXTRA_CLANG_ARGS` environment variable to pass sysroot to `rust-bindgen` +- #696 - bump freebsd to 12.3 +- #629 - Update Android NDK version and API version +- #497 - don't set RUSTFLAGS in aarch64-musl image +- #492 - Add cmake to FreeBSD images +- #748 - allow definitions in the environment variable passthrough + +### Fixed + +- #836 - write a `CACHEDIR.TAG` when creating the target directory, similar to `cargo`. +- #804 - allow usage of env `CARGO_BUILD_TARGET` as an alias for `CROSS_BUILD_TARGET` +- #792 - fixed container-in-container support when using podman. +- #781 - ensure `target.$(...)` config options override `build` ones. +- #771 - fix parsing of `DOCKER_OPTS`. +- #727 - add `PKG_CONFIG_PATH` to all `*-linux-gnu` images. +- #722 - boolean environment variables are evaluated as truthy or falsey. +- #720 - add android runner to preload `libc++_shared.so`. +- #725 - support `CROSS_DEBUG` and `CROSS_RUNNER` on android images. +- #714 - use host target directory when falling back to host cargo. +- #713 - convert relative target directories to absolute paths. +- #501 (reverted, see #764) - x86_64-linux: lower glibc version requirement to 2.17 (compatible with centos 7) +- #500 - use runner setting specified in Cross.toml +- #498 - bump linux-image version to fix CI +- Re-enabled `powerpc64-unknown-linux-gnu` image +- Re-enabled `sparc64-unknown-linux-gnu` image +- #582 - Added `libprocstat.so` to FreeBSD images +- #665 - when not using [env.volumes](https://github.com/cross-rs/cross#mounting-volumes-into-the-build-environment), mount project in /project +- #494 - Parse Cargo's --manifest-path option to determine mounted docker root + +### Removed + +- #718 - remove deb subcommand. + +### Internal + +- #856 - remove use of external wslpath and create internal helper that properly handles UNC paths. +- #828 - assume paths are Unicode and provide better error messages for path encoding errors. +- #787 - add installer for git hooks. +- #786, #791 - Migrate build script to rust: `cargo build-docker-image $TARGET` +- #730 - make FreeBSD builds more resilient. +- #670 - Use serde for deserialization of Cross.toml +- Change rust edition to 2021 and bump MSRV for the cross binary to 1.58.1 +- #654 - Use color-eyre for error reporting +- #658 - Upgrade dependencies +- #652 - Allow trying individual targets via bors. +- #650 - Improve Docker caching. +- #609 - Switch to Github Actions and GHCR. +- #588 - fix ci: bump openssl version in freebsd again +- #552 - Added CHANGELOG.md automation +- #534 - fix image builds with update of dependencies +- #502 - fix ci: bump openssl version in freebsd +- #489 - Add support for more hosts and simplify/unify host support checks +- #477 - Fix Docker/Podman links in README +- #476 - Use Rustlang mirror for Sabotage linux tarballs +- Bump nix dependency to `0.22.1` +- Bump musl version to 1.1.24. + +## [v0.2.1] - 2020-06-30 + +- Disabled `powerpc64-unknown-linux-gnu` image. +- Disabled `sparc64-unknown-linux-gnu` image. +- Disabled `x86_64-unknown-dragonfly` image. +- Removed CI testing for `i686-apple-darwin`. + +## [v0.2.0] - 2020-02-22 + +- Removed OpenSSL from all images. +- Added support for Podman. +- Bumped all images to at least Ubuntu 16.04. + +## [v0.1.16] - 2019-09-17 + +- Bump OpenSSL version to 1.0.2t. +- Re-enabled `asmjs-unknown-emscripten` target. +- Default to `native` runner instead of `qemu-user` for certain targets. + +## [v0.1.15] - 2019-09-04 + +- Images are now hosted at https://hub.docker.com/r/rustembedded/cross. +- Bump OpenSSL version to 1.0.2p. +- Bump musl version to 1.1.20. +- Bump Ubuntu to 18.04 to all musl targets. +- Bump gcc version to 6.3.0 for all musl targets. +- OpenSSL support for the `arm-unknown-linux-musleabi` target. +- OpenSSL support for the `armv7-unknown-linux-musleabihf` target. +- Build and test support for `aarch64-unknown-linux-musl`, `arm-unknown-linux-musleabihf`, + `armv5te-unknown-linux-musleabi`, `i586-unknown-linux-musl`, `mips-unknown-linux-musl`, + add `mipsel-unknown-linux-musl` targets. + +## [v0.1.14] - 2017-11-22 + +### Added + +- Support for the `i586-unknown-linux-gnu` target. + +### Changed + +- Downgraded the Solaris toolchains from 2.11 to 2.10 to make the binaries produced by Cross more + compatible (this version matches what rust-lang/rust is using). + +## [v0.1.13] - 2017-11-08 + +### Added + +- Support for the custom [`deb`] subcommand. + +[`deb`]: https://github.com/mmstick/cargo-deb + +- Partial `test` / `run` support for android targets. Using the android API via `cross run` / `cross test` is _not_ supported because Cross is using QEMU instead of the official Android emulator. + +- Partial support for the `sparcv9-sun-solaris` and `x86_64-sun-solaris` targets. `cross test` and + `cross run` doesn't work for these new targets. + +- OpenSSL support for the `i686-unknown-linux-musl` target. + +### Changed + +- Bump OpenSSL version to 1.0.2m. + +## [v0.1.12] - 2017-09-22 + +### Added + +- Support for `cross check`. This subcommand won't use any Docker container. + +### Changed + +- `binfmt_misc` is not required on the host for toolchain v1.19.0 and newer. + With these toolchains `binfmt_misc` interpreters don't need to be installed + on the host saving a _privileged_ docker run which some systems don't allow. + +## [v0.1.11] - 2017-06-10 + +### Added + +- Build and test support for `i686-pc-windows-gnu`, `x86_64-pc-windows-gnu`, + `asmjs-unknown-emscripten` and `wasm-unknown-emscripten`. + +- Build support for `aarch64-linux-android`, `arm-linux-androideabi`, + `armv7-linux-androideabi`, `x86_64-linux-android` and `i686-linux-android` + +- A `build.env.passthrough` / `build.target.*.passthrough` option to Cross.toml + to support passing environment variables from the host to the Docker image. + +### Changed + +- Bumped OpenSSL version to 1.0.2k +- Bumped QEMU version to 2.9.0 + +## [v0.1.10] - 2017-04-02 + +### Added + +- Cross compilation support for `x86_64-pc-windows-gnu` + +- Cross compilation support for Android targets + +### Changed + +- Bumped OpenSSL version to 1.0.2k + +## [v0.1.9] - 2017-02-08 + +### Added + +- Support for ARM MUSL targets. + +### Changed + +- The automatic lockfile update that happens every time `cross` is invoked + should no longer hit the network when there's no git dependency to add/update. + +- The QEMU_STRACE variable is passed to the underlying Docker container. Paired + with `cross run`, this lets you get a trace of system call from the execution + of "foreign" (non x86_64) binaries. + +## [v0.1.8] - 2017-01-21 + +### Added + +- Support for custom targets. Cross will now also try to use a docker image for + them. As with the built-in targets, one can override the image using + `[target.{}.image]` in Cross.toml. + +### Changed + +- Moved to a newer Xargo: v0.3.5 + +## [v0.1.7] - 2017-01-19 + +### Changed + +- Moved to a newer Xargo: v0.3.4 + +### Fixed + +- QEMU interpreters were being register when not required, e.g. for the + `x86_64-unknown-linux-gnu` target. + +## [v0.1.6] - 2017-01-14 + +### Fixed + +- Stable releases were picking the wrong image (wrong tag: 0.1.5 instead of + v0.1.5) + +## [v0.1.5] - 2017-01-14 [YANKED] + +### Added + +- `cross run` support for the thumb targets. + +- A `build.xargo` / `target.$TARGET.xargo` option to Cross.toml to use Xargo + instead of Cargo. + +- A `target.$TARGET.image` option to override the Docker image used for + `$TARGET`. + +- A `sparc64-unknown-linux-gnu` environment. + +- A `x86_64-unknown-dragonfly` environment. + +### Changed + +- Building older versions (<0.7.0) of the `openssl` crate is now supported. + +- Before Docker is invoked, `cross` will _always_ (re)generate the lockfile to + avoid errors later on due to read/write permissions. This removes the need to + call `cargo generate-lockfile` before `cross` in _all_ cases. + +## [v0.1.4] - 2017-01-07 + +### Added + +- Support for the `arm-unknown-linux-gnueabi` target + +- `cross build` support for: + - `i686-unknown-freebsd` + - `x86_64-unknown-freebsd` + - `x86_64-unknown-netbsd` + +### Changed + +- It's no longer necessary to call `cargo generate-lockfile` before using + `cross` as `cross` will now take care of creating a lockfile when necessary. + +- The C environments for the `thumb` targets now include newlib (`libc.a`, + `libm.a`, etc.) + +### Fixed + +- A segfault when `cross` was trying to figure out the name of the user that + called it. + +## [v0.1.3] - 2017-01-01 + +### Changed + +- Fix the `i686-unknown-linux-musl` target + +## [v0.1.2] - 2016-12-31 + +### Added + +- Support for `i686-unknown-linux-musl` +- Support for `cross build`ing crates for the `thumbv*-none-eabi*` targets. + +## [v0.1.1] - 2016-12-28 + +### Added + +- Support for `x86_64-unknown-linux-musl` +- Print shell commands when the verbose flag is used. +- Support crossing from x86_64 osx to i686 osx + +## v0.1.0 - 2016-12-26 + +- Initial release. Supports 12 targets. + + + + +[Unreleased]: https://github.com/cross-rs/cross/compare/v0.2.2...HEAD + +[v0.2.2]: https://github.com/cross-rs/cross/compare/v0.2.1...v0.2.2 +[v0.2.1]: https://github.com/cross-rs/cross/compare/v0.2.0...v0.2.1 +[v0.2.0]: https://github.com/cross-rs/cross/compare/v0.1.16...v0.2.0 +[v0.1.16]: https://github.com/cross-rs/cross/compare/v0.1.15...v0.1.16 +[v0.1.15]: https://github.com/cross-rs/cross/compare/v0.1.14...v0.1.15 +[v0.1.14]: https://github.com/cross-rs/cross/compare/v0.1.13...v0.1.14 +[v0.1.13]: https://github.com/cross-rs/cross/compare/v0.1.12...v0.1.13 +[v0.1.12]: https://github.com/cross-rs/cross/compare/v0.1.11...v0.1.12 +[v0.1.11]: https://github.com/cross-rs/cross/compare/v0.1.10...v0.1.11 +[v0.1.10]: https://github.com/cross-rs/cross/compare/v0.1.9...v0.1.10 +[v0.1.9]: https://github.com/cross-rs/cross/compare/v0.1.8...v0.1.9 +[v0.1.8]: https://github.com/cross-rs/cross/compare/v0.1.7...v0.1.8 +[v0.1.7]: https://github.com/cross-rs/cross/compare/v0.1.6...v0.1.7 +[v0.1.6]: https://github.com/cross-rs/cross/compare/v0.1.5...v0.1.6 +[v0.1.5]: https://github.com/cross-rs/cross/compare/v0.1.4...v0.1.5 +[v0.1.4]: https://github.com/cross-rs/cross/compare/v0.1.3...v0.1.4 +[v0.1.3]: https://github.com/cross-rs/cross/compare/v0.1.2...v0.1.3 +[v0.1.2]: https://github.com/cross-rs/cross/compare/v0.1.1...v0.1.2 +[v0.1.1]: https://github.com/cross-rs/cross/compare/v0.1.0...v0.1.1 + diff --git a/.changes/template/issue440.json b/.changes/template/issue440.json new file mode 100644 index 000000000..33772138a --- /dev/null +++ b/.changes/template/issue440.json @@ -0,0 +1,5 @@ +{ + "description": "no associated PR.", + "issues": [440], + "type": "fixed" +} diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 000000000..689711a65 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,25 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true + +[*.{rs,py,sh,md,toml,yml,js}] +charset = utf-8 + +[*.{rs,py,sh,yml,js}] +trim_trailing_whitespace = true + +[*.{rs,py,sh}] +indent_style = space +indent_size = 4 + +[*.yml] +indent_style = space +indent_size = 2 + +[Dockerfile.*] +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 4 diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 000000000..295b630b4 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,3 @@ +* text=auto + +Dockerfile.* linguist-language=Dockerfile eol=lf diff --git a/.github/ISSUE_TEMPLATE/b_issue_report.yml b/.github/ISSUE_TEMPLATE/b_issue_report.yml index 398616b78..c73e3ad60 100644 --- a/.github/ISSUE_TEMPLATE/b_issue_report.yml +++ b/.github/ISSUE_TEMPLATE/b_issue_report.yml @@ -33,6 +33,7 @@ body: - aarch64-linux-android - aarch64-unknown-linux-gnu - aarch64-unknown-linux-musl + - aarch64-unknown-freebsd - arm-linux-androideabi - arm-unknown-linux-gnueabi - arm-unknown-linux-gnueabihf @@ -51,6 +52,7 @@ body: - i686-unknown-freebsd - i686-unknown-linux-gnu - i686-unknown-linux-musl + - loongarch64-unknown-linux-gnu - mips64el-unknown-linux-gnuabi64 - mips64el-unknown-linux-muslabi64 - mips64-unknown-linux-gnuabi64 @@ -73,7 +75,7 @@ body: - wasm32-unknown-emscripten - x86_64-linux-android - x86_64-pc-windows-gnu - - x86_64-sun-solaris + - x86_64-pc-solaris - x86_64-unknown-freebsd - x86_64-unknown-linux-gnu - x86_64-unknown-linux-musl @@ -90,7 +92,7 @@ body: - label: Windows - label: Linux / BSD - label: other OS (specify in description) - + - type: checkboxes id: host-arch attributes: diff --git a/.github/ISSUE_TEMPLATE/z_feature_request.yml b/.github/ISSUE_TEMPLATE/z_feature_request.yml index 454bc1072..b0d7acb69 100644 --- a/.github/ISSUE_TEMPLATE/z_feature_request.yml +++ b/.github/ISSUE_TEMPLATE/z_feature_request.yml @@ -10,7 +10,7 @@ body: options: - label: I've looked through the [issues and pull requests](https://github.com/cross-rs/cross/issues?q=) for similar request required: true - - label: This feature could be solved with a [custom docker image](https://github.com/cross-rs/cross#custom-docker-images) (optional) + - label: This feature could be solved with a [custom image](https://github.com/cross-rs/cross/blob/main/docs/custom_images.md#custom-images) (optional) - type: textarea id: description validations: @@ -20,7 +20,7 @@ body: description: | Write a description of what your feature would do. If you have an idea how to solve this, feel free to leave a comment on the feature request after creating it. - + If you have tried solved this but couldn't, explain what you tried and how or why it didn't work. We want to help! placeholder: | Examples: diff --git a/.github/actions/cargo-install-upload-artifacts/action.yml b/.github/actions/cargo-install-upload-artifacts/action.yml index 07bd34f53..bcf81ea36 100644 --- a/.github/actions/cargo-install-upload-artifacts/action.yml +++ b/.github/actions/cargo-install-upload-artifacts/action.yml @@ -14,7 +14,7 @@ runs: metadata="$(cargo metadata --format-version 1 --no-deps)" package_name="cross" - echo "::set-output name=package-name::${package_name}" + echo "package-name=${package_name}" >> $GITHUB_OUTPUT out_dir="$(mktemp -d)" artifacts_dir="$(mktemp -d)" @@ -24,22 +24,21 @@ runs: artifacts_dir="$(cygpath -w "${artifacts_dir}")" fi - echo "::set-output name=out-dir::${out_dir}" - echo "::set-output name=artifacts-dir::${artifacts_dir}" + echo "out-dir=${out_dir}" >> $GITHUB_OUTPUT + echo "artifacts-dir=${artifacts_dir}" >> $GITHUB_OUTPUT + shell: bash + - run: rm -rf .git shell: bash - - name: Build with all features - uses: actions-rs/cargo@v1 - with: - command: install - args: > + run: + cargo install --locked --path . --target ${{ inputs.target }} --all-features --root ${{ steps.metadata.outputs.out-dir }} --bins - use-cross: true + shell: ${{ contains(runner.os, 'windows') && 'pwsh' || 'bash' }} env: RUSTFLAGS: "" # Make sure to unset RUSTFLAGS @@ -67,8 +66,8 @@ runs: artifact_path="$(cygpath -w "${artifact_path}")" fi - echo "::set-output name=name::${artifact_name}" - echo "::set-output name=path::${artifact_path}" + echo "name=${artifact_name}" >> $GITHUB_OUTPUT + echo "path=${artifact_path}" >> $GITHUB_OUTPUT env: package_name: ${{ steps.metadata.outputs.package-name }} out_dir: ${{ steps.metadata.outputs.out-dir }} diff --git a/.github/actions/cargo-llvm-cov/action.yml b/.github/actions/cargo-llvm-cov/action.yml index 49eb4dda4..edf689cb8 100644 --- a/.github/actions/cargo-llvm-cov/action.yml +++ b/.github/actions/cargo-llvm-cov/action.yml @@ -12,9 +12,9 @@ runs: using: composite steps: - name: Install cargo-llvm-cov - uses: taiki-e/install-action@v1 + uses: taiki-e/install-action@v2 with: - tool: cargo-llvm-cov + tool: cargo-llvm-cov@0.5.3 - run: rustup component add llvm-tools-preview shell: bash - name: LLVM instrument coverage @@ -31,19 +31,18 @@ runs: echo LLVM_PROFILE_FILE="${pwd}/target/cross-%m.profraw" >> $GITHUB_ENV echo CARGO_INCREMENTAL="0" >> $GITHUB_ENV echo RUST_TEST_THREADS="1" >> $GITHUB_ENV - echo "::set-output name=artifact-name::_coverage-${name}" + echo "artifact-name=_coverage-${name}" >> $GITHUB_OUTPUT post: | # XXX(emilgardis): Upload early? pwd=$(pwd) if which cygpath; then pwd="$(cygpath -w "$(pwd)")" fi - # No pwd needed here, we're in the root export LLVM_PROFILE_FILE="${pwd}/target/cross-%m.profraw" export CARGO_LLVM_COV_TARGET_DIR="${pwd}/target" mkdir coverage echo $(ls target) - cargo llvm-cov --no-run --remap-path-prefix --lcov --output-path "coverage/lcov.${name}.info" -vv || ( echo "::error title=Coverage merge failed::" && exit 0 ) + cargo llvm-cov report --remap-path-prefix --lcov --output-path "coverage/lcov.${name}.info" -vv || ( echo "::error title=Coverage merge failed::" && exit 0 ) rm target/*.profraw npm install @actions/artifact npm install glob diff --git a/.github/actions/cargo-publish/action.yml b/.github/actions/cargo-publish/action.yml index bcba68033..c32cb78c6 100644 --- a/.github/actions/cargo-publish/action.yml +++ b/.github/actions/cargo-publish/action.yml @@ -13,7 +13,7 @@ runs: steps: - name: Read changelog id: changelog-reader - uses: mindsers/changelog-reader-action@v2.0.0 + uses: mindsers/changelog-reader-action@v2 with: # validation_depth: 10 version: ${{ (github.ref_type == 'tag' && !contains(github.ref_name, '-') && github.ref_name) || 'Unreleased' }} @@ -27,11 +27,13 @@ runs: ${{ runner.temp }}/artifacts - name: Log into crates.io - if: github.event_name != 'pull_request' - uses: actions-rs/cargo@v1 - with: - command: login - args: -- ${{ inputs.cargo-registry-token }} + if: > + github.event_name == 'push' && ( + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || + startsWith(github.ref, 'refs/tags/v') + ) + run: cargo login ${{ inputs.cargo-registry-token }} + shell: bash - run: gh release delete --yes Unreleased || exit 0 if: > @@ -57,7 +59,5 @@ runs: ${{ steps.download-artifacts.outputs.download-path }}/cross-*/* - name: Publish crate - uses: actions-rs/cargo@v1 - with: - command: publish - args: ${{ !startsWith(github.ref, 'refs/tags/v') && '--dry-run' || '' }} + run: cargo publish ${{ !startsWith(github.ref, 'refs/tags/v') && '--dry-run' || '' }} + shell: bash diff --git a/.github/actions/post/main.js b/.github/actions/post/main.js index 94bcec651..8c8517229 100644 --- a/.github/actions/post/main.js +++ b/.github/actions/post/main.js @@ -21,6 +21,6 @@ if (process.env[`STATE_POST`] != undefined) { run(process.env.INPUT_POST); } else { // Otherwise, this is the main step - console.log(`::save-state name=POST::true`); + console.log(`POST=true >> $GITHUB_STATE`); run(process.env.INPUT_MAIN); } diff --git a/.github/actions/setup-rust/action.yml b/.github/actions/setup-rust/action.yml index 0dd4b003e..4fe64da53 100644 --- a/.github/actions/setup-rust/action.yml +++ b/.github/actions/setup-rust/action.yml @@ -16,12 +16,14 @@ runs: using: composite steps: - name: Install Rust toolchain - uses: actions-rs/toolchain@v1 + uses: dtolnay/rust-toolchain@master with: toolchain: ${{ inputs.toolchain }} target: ${{ inputs.target }} - default: true components: ${{ inputs.components }} - profile: minimal - - uses: Swatinem/rust-cache@v1.3.0 + - name: Install rust matcher + run: echo "::add-matcher::.github/actions/setup-rust/rust.json" + shell: bash + + - uses: Swatinem/rust-cache@v2.2.0 diff --git a/.github/actions/setup-rust/rust.json b/.github/actions/setup-rust/rust.json new file mode 100644 index 000000000..dbc4c7920 --- /dev/null +++ b/.github/actions/setup-rust/rust.json @@ -0,0 +1,31 @@ +{ + "problemMatcher": [ + { + "owner": "rust", + "pattern": [ + { + "regexp": "^(warning|warn|error)(\\[(.*)\\])?: (.*)$", + "severity": 1, + "message": 4, + "code": 3 + }, + { + "regexp": "^([\\s->=]*(.*):(\\d*):(\\d*)|.*)$", + "file": 2, + "line": 3, + "column": 4 + } + ] + }, + { + "owner": "cross-rs", + "pattern": [ + { + "regexp": "^\\[cross\\] (warning|error): (.*)$", + "severity": 1, + "message": 2 + } + ] + } + ] +} diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000..1dd7b0867 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,50 @@ +version: 2 +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" + - package-ecosystem: "github-actions" + directory: "/actions/cargo-install-upload-artifacts" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" + - package-ecosystem: "github-actions" + directory: "/actions/cargo-llvm-cov" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" + - package-ecosystem: "github-actions" + directory: "/actions/cargo-publish" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" + - package-ecosystem: "github-actions" + directory: "/actions/post" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" + - package-ecosystem: "github-actions" + directory: "/actions/setup-rust" + schedule: + interval: "weekly" + groups: + github_actions: + patterns: + - "*" diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 24882f538..7780c7d94 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -9,12 +9,31 @@ jobs: name: Changelog check runs-on: ubuntu-latest steps: - - name: Checkout sources - uses: actions/checkout@v2 + - uses: actions/checkout@v3 + - uses: ./.github/actions/setup-rust - - name: Changelog updated - uses: Zomzog/changelog-checker@v1.2.0 + - name: Get Changed Files + id: files + uses: tj-actions/changed-files@v41 with: - fileName: CHANGELOG.md - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + separator: ';' + files: | + .changes/*.json + + - name: Validate Changelog + id: changelog + run: | + set -x + set -e + IFS=';' read -a added_modified <<< '${{ steps.files.outputs.all_changed_files }}' + IFS=';' read -a removed <<< '${{ steps.files.outputs.deleted_files }}' + added_count=${#added_modified[@]} + removed_count=${#removed[@]} + if ${{ !contains(github.event.pull_request.labels.*.name, 'no changelog' ) }}; then + if [[ "$added_count" -eq "0" ]] && [[ "$removed_count" -eq "0" ]]; then + echo "Must add or remove changes or add the 'no changelog' label" + exit 1 + else + cargo xtask changelog validate + fi + fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fe25ba95b..f754a676b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -1,5 +1,16 @@ on: + workflow_call: + inputs: + matrix-args: + required: false + type: string + description: Arguments to pass to `cargo xtask ci-job target-matrix` + checkout-ref: + required: false + type: string + description: Used to checkout a specific ref, instead of the default ref with `actions/checkout` action pull_request: + merge_group: push: branches: [main, staging, trying] tags: @@ -16,30 +27,33 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - name: Run ShellCheck - uses: azohra/shell-linter@v0.3.0 + uses: azohra/shell-linter@v0.6.0 cargo-deny: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: EmbarkStudios/cargo-deny-action@v1 fmt: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust with: components: rustfmt - name: Run rustfmt - uses: actions-rs/cargo@v1 - with: - command: fmt - args: -- --check + run: cargo fmt -- --check clippy: runs-on: ${{ matrix.os }} @@ -51,18 +65,15 @@ jobs: - windows-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust with: components: clippy - toolchain: 1.58.1 # MSRV, Minimally Supported Rust Version. Make sure to update README.md - + toolchain: 1.77.2 # MSRV, Minimally Supported Rust Version. Make sure to update README.md and clippy.toml - name: Run clippy - uses: actions-rs/cargo@v1 - with: - command: clippy - args: --locked --all-targets --all-features --workspace -- -D warnings - + run: cargo clippy --locked --all-targets --all-features --workspace -- -D warnings test: runs-on: ${{ matrix.os }} strategy: @@ -73,6 +84,8 @@ jobs: - windows-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - uses: ./.github/actions/cargo-llvm-cov @@ -80,10 +93,7 @@ jobs: name: test-${{matrix.os}} - name: Run unit tests - uses: actions-rs/cargo@v1 - with: - command: test - args: --locked --all-targets --workspace --all-features + run: cargo test --locked --all-targets --workspace --all-features timeout-minutes: 10 check: runs-on: ubuntu-latest @@ -91,130 +101,36 @@ jobs: is-latest: ${{ steps.check.outputs.is-latest }} steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - run: cargo xtask ci-job check id: check - shell: bash generate-matrix: runs-on: ubuntu-latest outputs: matrix: ${{ steps.generate-matrix.outputs.matrix }} + tests: ${{ steps.generate-matrix.outputs.tests }} steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/setup-rust + - name: Generate matrix id: generate-matrix - run: | - if [[ -z "${commit_message}" ]]; then - bors_args=() - else - bors_args="$(echo "${commit_message}" | sed -E 's/^Try #[0-9]+:\s+//')" - declare -a "bors_args=(${bors_args})" - fi - - targets=() - index=0 - while [[ "${bors_args[$index]-}" == --target ]]; do - index=$(expr $index + 1) - targets+=("${bors_args[$index]}") - index=$(expr $index + 1) - done - - selects=() - for target in "${targets[@]}"; do - base=$(echo "${target}" | cut -d '.' -f 1) - sub=$(echo "${target}" | cut -d '.' -sf 2) - selects+=("select(.target | test(\"${base}\")) and if \"${sub}\" != \"\" then .sub == \"${sub}\" else .sub == null end") - done - query=$(printf " or %s" "${selects[@]}") - query="${query:4}" - - if [[ "${#targets[@]}" != "0" ]]; then - echo "Only running targets matching '${targets[@]}'." - matrix=$( - yq --output-format json <<< "${matrix}" | jq '[.[] | select('"${query}"')]' - ) - else - echo 'Running all targets.' - matrix="$(yq --output-format json <<< "${matrix}")" - fi - - jq -C '[.[] | . += {"pretty": ([.target] + [select(.sub != null).sub]) | join(":")}]' <<< "${matrix}" - - echo "::set-output name=matrix::$(jq -c '[.[] | . += {"pretty": ([.target] + [select(.sub != null).sub]) | join(":")}]' <<< "${matrix}")" + run: cargo xtask ci-job target-matrix ${{ github.event_name == 'merge_group' && format('--merge-group {0}', github.ref) || '' }} ${{ inputs.matrix-args || '' }} env: - commit_message: > - ${{ - (( - startsWith(github.event.head_commit.message, 'Try #') && - github.event.head_commit.author.username == 'bors[bot]' - ) && github.event.head_commit.message) || '' - }} - matrix: | - - { target: x86_64-apple-darwin, os: macos-10.15, deploy: true } - - { target: x86_64-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: native qemu-user qemu-system, deploy: true } - - { target: x86_64-unknown-linux-musl, os: ubuntu-latest, std: 1, run: 1, deploy: true } - - { target: x86_64-pc-windows-msvc, os: windows-2019, deploy: true } - - - { target: x86_64-unknown-linux-gnu, sub: centos, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: native qemu-user qemu-system } - - { target: aarch64-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: arm-unknown-linux-gnueabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: arm-unknown-linux-gnueabihf, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: armv7-unknown-linux-gnueabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user } - - { target: armv7-unknown-linux-gnueabihf, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: thumbv7neon-unknown-linux-gnueabihf, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: i586-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: i686-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: native qemu-user qemu-system } - - { target: mips-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: mipsel-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: mips64-unknown-linux-gnuabi64, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: mips64el-unknown-linux-gnuabi64, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: mips64-unknown-linux-muslabi64, os: ubuntu-latest, std: 1, run: 1 } - - { target: mips64el-unknown-linux-muslabi64, os: ubuntu-latest, std: 1, run: 1 } - - { target: powerpc-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: powerpc64-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: powerpc64le-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user qemu-system } - - { target: riscv64gc-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, std: 1, run: 1 } - - { target: s390x-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-system } - - { target: sparc64-unknown-linux-gnu, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-system } - - { target: aarch64-unknown-linux-musl, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: arm-unknown-linux-musleabihf, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: arm-unknown-linux-musleabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: armv5te-unknown-linux-gnueabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: armv5te-unknown-linux-musleabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: armv7-unknown-linux-musleabi, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: armv7-unknown-linux-musleabihf, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: i586-unknown-linux-musl, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user } - - { target: i686-unknown-linux-musl, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1, runners: qemu-user } - - { target: mips-unknown-linux-musl, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: mipsel-unknown-linux-musl, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, run: 1 } - - { target: aarch64-linux-android, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: arm-linux-androideabi, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: armv7-linux-androideabi, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: thumbv7neon-linux-androideabi, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: i686-linux-android, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: x86_64-linux-android, os: ubuntu-latest, cpp: 1, std: 1, run: 1, runners: qemu-user } - - { target: x86_64-pc-windows-gnu, os: ubuntu-latest, cpp: 1, std: 1, run: 1 } - - { target: i686-pc-windows-gnu, os: ubuntu-latest, cpp: 1, std: 1, run: 1 } - # Disabled for now, see https://github.com/rust-lang/rust/issues/98216 - #-{ target: asmjs-unknown-emscripten, os: ubuntu-latest, cpp: 1, std: 1, run: 1 } - - { target: wasm32-unknown-emscripten, os: ubuntu-latest, cpp: 1, std: 1, run: 1 } - - { target: x86_64-unknown-dragonfly, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1, build-std: 1 } - - { target: i686-unknown-freebsd, os: ubuntu-latest, dylib: 1, std: 1 } - - { target: x86_64-unknown-freebsd, os: ubuntu-latest, dylib: 1, std: 1 } - - { target: x86_64-unknown-netbsd, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1 } - - { target: sparcv9-sun-solaris, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1 } - - { target: x86_64-sun-solaris, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1 } - - { target: x86_64-unknown-illumos, os: ubuntu-latest, cpp: 1, dylib: 1, std: 1 } - - { target: thumbv6m-none-eabi, os: ubuntu-latest, std: 1 } - - { target: thumbv7em-none-eabi, os: ubuntu-latest, std: 1 } - - { target: thumbv7em-none-eabihf, os: ubuntu-latest, std: 1 } - - { target: thumbv7m-none-eabi, os: ubuntu-latest, std: 1 } - - { target: cross, os: ubuntu-latest } + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} build: name: target (${{ matrix.pretty }},${{ matrix.os }}) runs-on: ${{ matrix.os }} needs: [shellcheck, test, generate-matrix, check] - if: github.event_name == 'push' + if: (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'merge_group' || github.event_name == 'issue_comment' || github.event_name == 'schedule') && needs.generate-matrix.outputs.matrix != '{}' && needs.generate-matrix.outputs.matrix != '[]' && needs.generate-matrix.outputs.matrix != '' + concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ matrix.pretty }} + cancel-in-progress: false strategy: fail-fast: false matrix: @@ -225,12 +141,14 @@ jobs: coverage-artifact: ${{ steps.cov.outputs.artifact-name }} steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - name: Set up Docker Buildx if: runner.os == 'Linux' - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 - name: Build xtask run: cargo build -p xtask @@ -247,7 +165,7 @@ jobs: - name: LLVM instrument coverage id: cov uses: ./.github/actions/cargo-llvm-cov - if: steps.prepare-meta.outputs.has-image + if: steps.prepare-meta.outputs.has-image && steps.prepare-meta.outputs.test-variant != 'zig' with: name: cross-${{matrix.pretty}} @@ -267,8 +185,8 @@ jobs: - name: Build Docker image id: build-docker-image if: steps.prepare-meta.outputs.has-image - timeout-minutes: 60 - run: cargo xtask build-docker-image -v "${TARGET}${SUB:+.$SUB}" + timeout-minutes: 120 + run: cargo xtask build-docker-image -v "${TARGET}${SUB:+.$SUB}" ${{ matrix.verbose && '-v' || '' }} env: TARGET: ${{ matrix.target }} SUB: ${{ matrix.sub }} @@ -300,7 +218,11 @@ jobs: if: matrix.deploy with: target: ${{ matrix.target }} - image: ${{ steps.build-docker-image.outputs.image }} + + - name: Test Zig Image + if: steps.prepare-meta.outputs.has-image && steps.prepare-meta.outputs.test-variant == 'zig' + run: ./ci/test-zig-image.sh + shell: bash - name: Test Cross Image if: steps.prepare-meta.outputs.has-image && steps.prepare-meta.outputs.test-variant == 'cross' @@ -312,13 +234,14 @@ jobs: - name: Login to GitHub Container Registry if: steps.prepare-meta.outputs.has-image - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: registry: ghcr.io username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - name: Push image to GitHub Container Registry if: > + (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule') && steps.prepare-meta.outputs.has-image && ( github.ref == format('refs/heads/{0}', github.event.repository.default_branch) || startsWith(github.ref, 'refs/tags/v') @@ -333,15 +256,16 @@ jobs: # we should always have an artifact from a previous build. remote: - needs: [shellcheck, test, check] + needs: [test, check, generate-matrix] + if: fromJson(needs.generate-matrix.outputs.tests).remote runs-on: ubuntu-latest - if: github.actor == 'bors[bot]' steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - name: LLVM instrument coverage - id: remote-cov uses: ./.github/actions/cargo-llvm-cov with: name: integration-remote @@ -353,15 +277,16 @@ jobs: shell: bash bisect: - needs: [shellcheck, test, check] + needs: [test, check, generate-matrix] + if: fromJson(needs.generate-matrix.outputs.tests).bisect runs-on: ubuntu-latest - if: github.actor == 'bors[bot]' steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - name: LLVM instrument coverage - id: bisect-cov uses: ./.github/actions/cargo-llvm-cov with: name: integration-bisect @@ -372,16 +297,44 @@ jobs: run: ./ci/test-bisect.sh shell: bash + foreign: + needs: [test, check, generate-matrix] + if: fromJson(needs.generate-matrix.outputs.tests).foreign + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} + - uses: ./.github/actions/setup-rust + + - name: LLVM instrument coverage + uses: ./.github/actions/cargo-llvm-cov + with: + name: integration-bisect + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + with: + platforms: arm64 + - name: Set up docker buildx + uses: docker/setup-buildx-action@v2 + id: buildx + with: + install: true + - name: Run Foreign toolchain test + run: ./ci/test-foreign-toolchain.sh + shell: bash + docker-in-docker: - needs: [shellcheck, test, check] + needs: [test, check, generate-matrix] + if: fromJson(needs.generate-matrix.outputs.tests).docker-in-docker runs-on: ubuntu-latest - if: github.actor == 'bors[bot]' steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - name: LLVM instrument coverage - id: docker-in-docker-cov uses: ./.github/actions/cargo-llvm-cov with: name: integration-docker-in-docker @@ -393,11 +346,52 @@ jobs: run: ./ci/test-docker-in-docker.sh shell: bash + podman: + name: podman + runs-on: ubuntu-latest + needs: [shellcheck, test, check, generate-matrix] + if: fromJson(needs.generate-matrix.outputs.tests).podman + strategy: + fail-fast: false + outputs: + coverage-artifact: ${{ steps.cov.outputs.artifact-name }} + steps: + - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} + + - uses: ./.github/actions/setup-rust + + - name: Install Podman + env: + DEBIAN_FRONTEND: noninteractive + run: | + sudo apt-get update + sudo apt-get install podman --no-install-recommends --assume-yes + + - name: LLVM instrument coverage + id: cov + uses: ./.github/actions/cargo-llvm-cov + with: + name: cross-podman-aarch64-unknown-linux-gnu + + - name: Install cross + run: cargo install --path . --force --debug + + - name: Run Podman Test + run: ./ci/test-podman.sh + env: + CROSS_CONTAINER_ENGINE: podman + TARGET: aarch64-unknown-linux-gnu + shell: bash + publish: needs: [build, check, fmt, clippy, cargo-deny] runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - uses: ./.github/actions/cargo-publish with: @@ -405,7 +399,7 @@ jobs: github-token: ${{ secrets.GITHUB_TOKEN }} conclusion: - needs: [shellcheck, fmt, clippy, test, generate-matrix, build, publish, check, remote, bisect, docker-in-docker] + needs: [shellcheck, fmt, clippy, test, generate-matrix, build, publish, check, remote, bisect, docker-in-docker, foreign, podman] if: always() runs-on: ubuntu-latest steps: @@ -413,7 +407,7 @@ jobs: run: | jq -C <<< "${needs}" - # Check if all needs were successfull or skipped. + # Check if all needs were successful or skipped. "$(jq -r 'all(.result as $result | (["success", "skipped"] | contains([$result])))' <<< "${needs}")" env: needs: ${{ toJson(needs) }} @@ -426,12 +420,14 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 + with: + ref: ${{ inputs.checkout-ref }} - uses: ./.github/actions/setup-rust - uses: actions/download-artifact@v3 with: path: ${{ runner.temp }}/artifacts - name: Grab PR number - run: echo "::set-output name=pr::"$(echo $commit_message | sed -ne 's/.*\#\(.*\):/\1/p') + run: echo "pr=$(echo ${commit_message} | sed -ne 's/.*#\(.*\):/\1/p')" >> $GITHUB_OUTPUT id: pr-number if: ${{ !github.event.pull_request.number }} env: @@ -453,5 +449,5 @@ jobs: ./codecov -F $name ${pr:+-P ${pr}} -f $file --sha ${sha_rev} -n $name; done env: - pr: ${{ steps.pr-number.outputs.pr || '' }} + pr: ${{ steps.pr-number.outputs.pr }} artifacts: ${{ runner.temp }}/artifacts diff --git a/.github/workflows/try.yml b/.github/workflows/try.yml new file mode 100644 index 000000000..bcda9a625 --- /dev/null +++ b/.github/workflows/try.yml @@ -0,0 +1,66 @@ +name: Try +on: + issue_comment: + types: [created] +jobs: + acknowledge: + runs-on: ubuntu-latest + if: github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && (contains(toJson(github.event.comment.body), '\n/ci try') || startsWith(github.event.comment.body, '/ci try')) + steps: + - uses: actions/checkout@v3 + - name: Acknowledge command + id: acknowledge + run: | + gh pr comment ${{ github.event.issue.number }} --body " + Starting try run. [Link to action](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}?pr=${{ github.event.issue.number }})" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + continue-on-error: true + try: + if: github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && (contains(toJson(github.event.comment.body), '\n/ci try') || startsWith(github.event.comment.body, '/ci try')) + uses: ./.github/workflows/ci.yml + with: + matrix-args: try --comment "${{ github.event.comment.body }}" --pr ${{ github.event.issue.number }} + checkout-ref: refs/pull/${{ github.event.issue.number }}/head + comment: + needs: [try, acknowledge] + if: always() && needs.try.result != 'skipped' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Minimize existing comments + run: | + COMMENTS=$(gh pr view ${{ github.event.issue.number }} --json comments --jq '.comments[] | select((.body | contains("") or contains("")) and (.author.login == "github-actions") and (.isMinimized | not)) | .id') + while read -r comment_id; do + gh api graphql -f query='mutation { minimizeComment(input: { classifier: OUTDATED, subjectId: "'"$comment_id"'" }) { minimizedComment { isMinimized } } }' + done <<< "$COMMENTS" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + continue-on-error: true + # comment on the PR with the result and links to the logs using gh cli + # Something like `### Try build: [{result}]({link_to_logs})` + # the url to the logs are on jobs[name="try"].url gathered with `gh run view ${{ github.run_id }} --json jobs` + - name: Comment on PR + run: | + PR_ID=${{ github.event.issue.number }} + gh run view ${{ github.run_id }} --json jobs |\ + jq -r --arg pr_id "$PR_ID" --arg comment "${{ github.event.comment.html_url }}" ' + def box: .conclusion | if . == "success" then "✔️ " elif . == "skipped" then "🛇 " else "❌ " end; + def job_to_md: . | "- [\(.name)](\(.url)?pr=\($pr_id)\(.conclusion | if . == "success" then "#step:10:1)" else "#)" end) - \(box)"; + def wrap_if_needed: + (.[0].conclusion | if . == "success" then "#### Successful Jobs\n\n" else "#### Failed Jobs\n\n" end) + + if length > 10 then + "
\nList\n\n\(map(job_to_md) | join("\n"))\n\n
\n" + else + map(job_to_md) | join("\n") + "\n" + end; + "\n## [Try](\(.jobs[] | select(.name == "try / generate-matrix") | .url + "#step:4:18")) run for [comment](\($comment))\n\n" + + "\(.jobs[] | select(.name == "try / conclusion") | job_to_md)\n\n" + + ([.jobs[] | select(.name | startswith("try / target")) | select(.name | contains("matrix.pretty") | not ) | . as $job | + {conclusion: $job.conclusion, name: ($job.name | capture("\\((?[^,]+),.*") | .name), url: $job.url} ] | + group_by(if .conclusion == "success" then "success" else "failure" end) | + map(wrap_if_needed) | + join("\n"))' |\ + gh pr comment "$PR_ID" --body "$(< /dev/stdin)" + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/weekly.yml b/.github/workflows/weekly.yml index 0e34792a2..dde87c401 100644 --- a/.github/workflows/weekly.yml +++ b/.github/workflows/weekly.yml @@ -2,6 +2,10 @@ on: schedule: - cron: '0 0 * * 5' workflow_dispatch: + inputs: + targets: + required: false + description: 'check these space or comma separated targets, supports wildcard *' name: Check @@ -11,20 +15,10 @@ env: jobs: weekly: - name: Check All Targets - No Cache - timeout-minutes: 1440 - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - - uses: ./.github/actions/setup-rust - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 - - name: Build xtask - run: cargo build -p xtask - - name: Build Docker image - id: build-docker-image - run: cargo xtask build-docker-image -v --no-cache --no-output --from-ci --no-fastfail --tag weekly - timeout-minutes: 1440 + uses: ./.github/workflows/ci.yml + with: + matrix-args: --weekly + checkout-ref: ${{ github.ref }} wiki: name: Ensure wiki is valid runs-on: ubuntu-latest diff --git a/.gitignore b/.gitignore index e333188d5..9b330119f 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,40 @@ **/.idea/ **/.vscode/*.* **/*.log -/cargo-timing*.html \ No newline at end of file +/cargo-timing*.html +CHANGELOG.md.draft + +# python stuff +__pycache__/ +.pytest_cache/ +*.py[cod] +*$py.class +*.egg-info/ +*.egg +.tox + +#--------------------------------------------------# +# The following was generated with gitignore.nvim: # +#--------------------------------------------------# +# Gitignore for the following technologies: Vim + +# Swap +[._]*.s[a-v][a-z] +!*.svg # comment out if you don't need vector files +[._]*.sw[a-p] +[._]s[a-rt-v][a-z] +[._]ss[a-gi-z] +[._]sw[a-p] + +# Session +Session.vim +Sessionx.vim + +# Temporary +.netrwhist +*~ +# Auto-generated tag files +tags +# Persistent undo +[._]*.un~ + diff --git a/CHANGELOG.md b/CHANGELOG.md index 55b47a683..27d3b9d7a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,16 +1,24 @@ # Change Log -All notable changes to this project will be documented in this file. +All notable changes to this project will be documented in this file. This is an automatically-generated document: entries are added via changesets present in the `.changes` directory. This project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] - ReleaseDate -## [v0.2.4] - 2022-07-10 +## [v0.2.5] - 2023-02-04 ## Fixed +- #962 - fix SELinux labels to allow use in multiple containers and/or the host filesystem. +- #1166 - freebsd: include memstat in build image to fix build with libc 0.2.138 and up. +- #1183 - resolve issue when using `pre-build` in `Cargo.toml` + +## [v0.2.4] - 2022-07-10 + +### Fixed + - #930 - fix any parsing of 1-character subcommands - #929 - Fixed issue where `--verbose` would not output data when it should @@ -135,7 +143,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). - #502 - fix ci: bump openssl version in freebsd - #489 - Add support for more hosts and simplify/unify host support checks - #477 - Fix Docker/Podman links in README -- #476 - Use Rustlang mirror for Sabotage linux tarbals +- #476 - Use Rustlang mirror for Sabotage linux tarballs - Bump nix dependency to `0.22.1` - Bump musl version to 1.1.24. @@ -160,7 +168,7 @@ This project adheres to [Semantic Versioning](http://semver.org/). ## [v0.1.15] - 2019-09-04 -- Images are now hosted at https://hub.docker.com/r/rustembedded/cross. +- Images are now hosted at . - Bump OpenSSL version to 1.0.2p. - Bump musl version to 1.1.20. - Bump Ubuntu to 18.04 to all musl targets. @@ -364,7 +372,9 @@ This project adheres to [Semantic Versioning](http://semver.org/). -[Unreleased]: https://github.com/cross-rs/cross/compare/v0.2.4...HEAD +[Unreleased]: https://github.com/cross-rs/cross/compare/v0.2.5...HEAD + +[v0.2.5]: https://github.com/cross-rs/cross/compare/v0.2.4...v0.2.5 [v0.2.4]: https://github.com/cross-rs/cross/compare/v0.2.3...v0.2.4 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..ab1bdbd00 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,12 @@ +Thank you for looking to contribute to cross. Have a new feature you'd like to add? Know how to fix an open bug? Want to add an image for a new target? We host documentation for how to contribute on our [wiki](https://github.com/cross-rs/cross/wiki/Contributing). + +Please read our [code of conduct](https://github.com/cross-rs/cross/blob/main/CODE_OF_CONDUCT.md) so our community stays positive and welcoming. If you have any additional questions, please feel free to ask in either our [discussions](https://github.com/cross-rs/cross/discussions) or our [Matrix room](https://matrix.to/#/#cross-rs:matrix.org). + + diff --git a/Cargo.lock b/Cargo.lock index 0ca6781d1..521da2755 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,9 +4,9 @@ version = 3 [[package]] name = "addr2line" -version = "0.17.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9ecd88a8c8378ca913a680cd98f0f13ac67383d35993f86c90a70e3f137816b" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" dependencies = [ "gimli", ] @@ -19,35 +19,98 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "aho-corasick" -version = "0.7.18" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] +[[package]] +name = "android-tzdata" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anstream" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" + +[[package]] +name = "anstyle-parse" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +dependencies = [ + "anstyle", + "windows-sys", +] + [[package]] name = "atty" version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" dependencies = [ - "hermit-abi", + "hermit-abi 0.1.19", "libc", "winapi", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backtrace" -version = "0.3.66" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cab84319d616cfb654d03394f38ab7e6f0919e181b1b57e1fd15e7fb4077d9a7" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -64,11 +127,33 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +[[package]] +name = "bitflags" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" + +[[package]] +name = "bstr" +version = "1.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" +dependencies = [ + "memchr", + "serde", +] + +[[package]] +name = "bumpalo" +version = "3.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" + [[package]] name = "cc" -version = "1.0.73" +version = "1.0.96" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" +checksum = "065a29261d53ba54260972629f9ca6bffa69bac13cd1fed61420f7fa68b9f8bd" [[package]] name = "cfg-if" @@ -76,31 +161,49 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "chrono" +version = "0.4.38" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +dependencies = [ + "android-tzdata", + "iana-time-zone", + "js-sys", + "num-traits", + "wasm-bindgen", + "windows-targets", +] + [[package]] name = "clap" -version = "3.2.8" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190814073e85d238f31ff738fcb0bf6910cedeb73376c87cd69291028966fd83" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ - "atty", - "bitflags", + "clap_builder", "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" +dependencies = [ + "anstream", + "anstyle", "clap_lex", - "indexmap", - "once_cell", "strsim", - "termcolor", - "textwrap", ] [[package]] name = "clap_derive" -version = "3.2.7" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759bf187376e1afa7b85b959e6a664a3e7a95203415dba952ad19139e798f902" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck", - "proc-macro-error", "proc-macro2", "quote", "syn", @@ -108,18 +211,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.4" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2850f2f5a82cbf437dd5af4d49848fbdfc27c157c3d010345776f952765261c5" -dependencies = [ - "os_str_bytes", -] +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "color-eyre" -version = "0.6.1" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ebf286c900a6d5867aeff75cfee3192857bb7f24b547d4f0df2ed6baa812c90" +checksum = "55146f5e46f237f7423d74111267d4597b59b0dad0ffaf7303bce9945d843ad5" dependencies = [ "backtrace", "color-spantrace", @@ -132,9 +232,9 @@ dependencies = [ [[package]] name = "color-spantrace" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +checksum = "cd6be1b2a7e382e2b98b43b2adcca6bb0e465af0bdd38123873ae61eb17a72c2" dependencies = [ "once_cell", "owo-colors", @@ -142,36 +242,51 @@ dependencies = [ "tracing-error", ] +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + [[package]] name = "const-sha1" version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb58b6451e8c2a812ad979ed1d83378caa5e927eef2622017a45f251457c2c9d" +[[package]] +name = "core-foundation-sys" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" + [[package]] name = "cross" -version = "0.2.4" +version = "0.2.5" dependencies = [ - "atty", "clap", "color-eyre", "const-sha1", - "ctrlc", "directories", "dunce", "eyre", "home", + "ignore", + "is-terminal", + "is_ci", "libc", "nix", "once_cell", "owo-colors", "regex", "rustc_version", + "semver", "serde", "serde_ignored", "serde_json", "shell-escape", "shell-words", + "signal-hook", "tempfile", "thiserror", "toml", @@ -181,15 +296,30 @@ dependencies = [ ] [[package]] -name = "ctrlc" -version = "3.2.2" +name = "crossbeam-deque" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37feaa84e6861e00a1f5e5aa8da3ee56d605c9992d33e082786754828e20865" +checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "nix", - "winapi", + "crossbeam-epoch", + "crossbeam-utils", ] +[[package]] +name = "crossbeam-epoch" +version = "0.9.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" + [[package]] name = "directories" version = "4.0.1" @@ -212,21 +342,37 @@ dependencies = [ [[package]] name = "dunce" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "453440c271cf5577fd2a40e4942540cb7d0d2f85e27c8d07dd0023c925a67541" +checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" [[package]] name = "either" -version = "1.7.0" +version = "1.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "errno" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f107b87b6afc2a64fd13cac55fe06d6c8859f12d4b14cbcdd2c67d0976781be" +checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +dependencies = [ + "libc", + "windows-sys", +] [[package]] name = "eyre" -version = "0.6.8" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" dependencies = [ "indenter", "once_cell", @@ -234,18 +380,15 @@ dependencies = [ [[package]] name = "fastrand" -version = "1.7.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fcf0cee53519c866c09b5de1f6c56ff9d647101f81c1964fa632e148896cdf" -dependencies = [ - "instant", -] +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "getrandom" -version = "0.2.7" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -254,21 +397,34 @@ dependencies = [ [[package]] name = "gimli" -version = "0.26.1" +version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78cc372d058dcf6d5ecd98510e7fbc9e5aec4d21de70f65fea8fecebcd881bd4" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "globset" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57da3b9b5b85bd66f31093f8c408b90a74431672542466497dcbdfdc02034be1" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata", + "regex-syntax", +] [[package]] name = "hashbrown" -version = "0.12.2" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "607c8a29735385251a339424dd462993c0fed8fa09d378f259377df08c126022" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" [[package]] name = "heck" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" @@ -279,13 +435,58 @@ dependencies = [ "libc", ] +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + [[package]] name = "home" -version = "0.5.3" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2456aef2e6b6a9784192ae780c0f15bc57df0e918585282325e8c8ac27737654" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "winapi", + "windows-sys", +] + +[[package]] +name = "iana-time-zone" +version = "0.1.60" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "ignore" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b46810df39e66e925525d6e38ce1e7f6e1d208f72dc39757880fcb66e2c58af1" +dependencies = [ + "crossbeam-deque", + "globset", + "log", + "memchr", + "regex-automata", + "same-file", + "walkdir", + "winapi-util", ] [[package]] @@ -296,34 +497,45 @@ checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" [[package]] name = "indexmap" -version = "1.9.1" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ - "autocfg", + "equivalent", "hashbrown", ] [[package]] -name = "instant" -version = "0.1.12" +name = "is-terminal" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" dependencies = [ - "cfg-if", + "hermit-abi 0.3.9", + "libc", + "windows-sys", ] [[package]] name = "is_ci" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "616cde7c720bb2bb5824a224687d8f77bfd38922027f01d825cd7453be5099fb" +checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "js-sys" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +dependencies = [ + "wasm-bindgen", +] [[package]] name = "lazy_static" @@ -333,171 +545,160 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.154" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + +[[package]] +name = "linux-raw-sys" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] -name = "linked-hash-map" -version = "0.5.6" +name = "log" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "memchr" -version = "2.5.0" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "miniz_oxide" -version = "0.5.3" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" dependencies = [ "adler", ] [[package]] name = "nix" -version = "0.24.1" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f17df307904acd05aa8e32e97bb20f2a0df1728bbc2d771ae8f9a90463441e9" +checksum = "598beaf3cc6fdd9a5dfb1630c2800c7acd31df7aaf0f565796fba2b53ca1af1b" dependencies = [ - "bitflags", + "bitflags 1.3.2", "cfg-if", "libc", ] [[package]] -name = "object" -version = "0.29.0" +name = "num-traits" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21158b2c33aa6d4561f1c0a6ea283ca92bc54802a93b263e910746d679a7eb53" +checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ - "memchr", + "autocfg", ] [[package]] -name = "once_cell" -version = "1.13.0" +name = "object" +version = "0.32.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] [[package]] -name = "os_str_bytes" -version = "6.1.0" +name = "once_cell" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "owo-colors" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "decf7381921fea4dcb2549c5667eda59b3ec297ab7e2b5fc33eac69d2e7da87b" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" dependencies = [ "supports-color", ] [[package]] name = "pin-project-lite" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" - -[[package]] -name = "proc-macro-error" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" -dependencies = [ - "proc-macro-error-attr", - "proc-macro2", - "quote", - "syn", - "version_check", -] - -[[package]] -name = "proc-macro-error-attr" -version = "1.0.4" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" -dependencies = [ - "proc-macro2", - "quote", - "version_check", -] +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "proc-macro2" -version = "1.0.40" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd96a1e8ed2596c337f8eae5f24924ec83f5ad5ab21ea8e455d3566c69fbcaf7" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] [[package]] name = "quote" -version = "1.0.20" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] -[[package]] -name = "redox_syscall" -version = "0.2.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" -dependencies = [ - "bitflags", -] - [[package]] name = "redox_users" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", - "redox_syscall", + "libredox", "thiserror", ] [[package]] name = "regex" -version = "1.6.0" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", + "regex-automata", "regex-syntax", ] [[package]] -name = "regex-syntax" -version = "0.6.27" +name = "regex-automata" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] [[package]] -name = "remove_dir_all" -version = "0.5.3" +name = "regex-syntax" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "rustc-demangle" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ef03e0a2b150c7a90d01faf6254c9c48a41e95fb2a8c2ac1c6f0d2b9aefc342" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" [[package]] name = "rustc_version" @@ -508,11 +709,24 @@ dependencies = [ "semver", ] +[[package]] +name = "rustix" +version = "0.38.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +dependencies = [ + "bitflags 2.5.0", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -525,24 +739,24 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.12" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2333e6df6d6598f2b1974829f853c2b4c5f4a6e503c10af918081aa6f8564e1" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "serde" -version = "1.0.138" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1578c6245786b9d168c5447eeacfb96856573ca56c9d68fdcf394be134882a47" +checksum = "0c9f6e76df036c77cd94996771fb40db98187f096dd0b9af39c6c6e452ba966a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.138" +version = "1.0.199" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "023e9b1467aef8a10fb88f25611870ada9800ef7e22afce356bb0d2387b6f27c" +checksum = "11bd257a6541e141e42ca6d24ae26f7714887b47e89aa739099104c7e4d3b7fc" dependencies = [ "proc-macro2", "quote", @@ -551,18 +765,18 @@ dependencies = [ [[package]] name = "serde_ignored" -version = "0.1.3" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1940036ca2411651a40012009d062087dfe62817b2191a03750fb569e11fa633" +checksum = "a8e319a36d1b52126a0d608f24e93b2d81297091818cd70625fcf50a15d84ddf" dependencies = [ "serde", ] [[package]] name = "serde_json" -version = "1.0.82" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82c2c1fdcd807d1098552c5b9a36e425e42e9fbd7c6a37a8425f390f781f7fa7" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -570,22 +784,19 @@ dependencies = [ ] [[package]] -name = "serde_yaml" -version = "0.8.25" +name = "serde_spanned" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec0091e1f5aa338283ce049bd9dfefd55e1f168ac233e85c1ffe0038fb48cbe" +checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" dependencies = [ - "indexmap", - "ryu", "serde", - "yaml-rust", ] [[package]] name = "sharded-slab" -version = "0.1.4" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" dependencies = [ "lazy_static", ] @@ -602,17 +813,36 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" +[[package]] +name = "signal-hook" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +dependencies = [ + "libc", + "signal-hook-registry", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +dependencies = [ + "libc", +] + [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "supports-color" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4872ced36b91d47bae8a214a683fe54e7078875b399dfa251df346c9b547d1f9" +checksum = "8ba6faf2ca7ee42fdd458f4347ae0a9bd6bcc445ad7cb57ad82b383f18870d6f" dependencies = [ "atty", "is_ci", @@ -620,9 +850,9 @@ dependencies = [ [[package]] name = "syn" -version = "1.0.98" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", @@ -631,47 +861,30 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.3.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", + "rustix", + "windows-sys", ] -[[package]] -name = "termcolor" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755" -dependencies = [ - "winapi-util", -] - -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", @@ -680,38 +893,63 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.4" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ + "cfg-if", "once_cell", ] [[package]] name = "toml" -version = "0.5.9" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ + "indexmap", "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "cfg-if", "pin-project-lite", "tracing-core", ] [[package]] name = "tracing-core" -version = "0.1.28" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7358be39f2f274f322d2aaed611acc57f382e8eb1e5b48cb9ae30933495ce7" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" dependencies = [ "once_cell", "valuable", @@ -729,9 +967,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.14" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a713421342a5a666b7577783721d3117f1b69a393df803ee17bb73b1e122a59" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "sharded-slab", "thread_local", @@ -740,30 +978,29 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] -name = "valuable" -version = "0.1.0" +name = "utf8parse" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] -name = "version_check" -version = "0.9.4" +name = "valuable" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" [[package]] name = "walkdir" -version = "2.3.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "808cf2735cd4b6866113f648b791c6adc5714537bc222d9347bb203386ffda56" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", - "winapi", "winapi-util", ] @@ -773,17 +1010,78 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasm-bindgen" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +dependencies = [ + "bumpalo", + "log", + "once_cell", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.92" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" + [[package]] name = "which" -version = "4.2.5" +version = "4.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4fb54e6113b6a8772ee41c3404fb0301ac79604489467e0a9ce1f3e97c24ae" +checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" dependencies = [ "either", - "lazy_static", - "libc", + "home", + "once_cell", + "rustix", ] +[[package]] +name = "wildmatch" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "939e59c1bc731542357fdaad98b209ef78c8743d652bb61439d16b16a79eb025" + [[package]] name = "winapi" version = "0.3.9" @@ -802,11 +1100,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.5" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -815,10 +1113,102 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-core" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" + +[[package]] +name = "winnow" +version = "0.5.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" +dependencies = [ + "memchr", +] + [[package]] name = "xtask" version = "0.0.0-dev.0" dependencies = [ + "chrono", "clap", "color-eyre", "cross", @@ -827,16 +1217,9 @@ dependencies = [ "semver", "serde", "serde_json", - "serde_yaml", + "shell-words", + "toml", "walkdir", "which", -] - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", + "wildmatch", ] diff --git a/Cargo.toml b/Cargo.toml index 10eeeb09e..e8a10ae4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,12 +6,10 @@ keywords = ["cross", "compilation", "testing", "tool"] license = "MIT OR Apache-2.0" name = "cross" repository = "https://github.com/cross-rs/cross" -version = "0.2.4" +version = "0.2.5" edition = "2021" include = [ "src/**/*", - "docker/Dockerfile.*", - "docker/*.sh", "docs/*.md", "Cargo.toml", "Cargo.lock", @@ -19,6 +17,7 @@ include = [ "README.md", "assets/*", ] +rust-version = "1.77.2" [features] default = [] @@ -28,55 +27,53 @@ dev = [] members = ["xtask"] [dependencies] -atty = "0.2" -clap = { version = "3.2.2", features = ["derive", "unstable-v4"] } -color-eyre = { version = "0.6", default-features = false } -eyre = "0.6" -thiserror = "1" -home = "0.5" -rustc_version = "0.4" -toml = "0.5" -which = { version = "4", default_features = false } -shell-escape = "0.1" -serde = { version = "1", features = ["derive"] } -serde_json = "1" -serde_ignored = "0.1.2" +is-terminal = "0.4.2" +clap = { version = "4.1.4", features = ["derive"] } +color-eyre = { version = "0.6.2", default-features = false, features = ["track-caller"] } +eyre = "0.6.8" +thiserror = "1.0.38" +home = "0.5.4" +rustc_version = "0.4.0" +toml = "0.7.0" +which = { version = "4.4.0", default_features = false } +shell-escape = "0.1.5" +serde = { version = "1.0.152", features = ["derive"] } +serde_json = { version = "1.0.91", features = ["raw_value"] } +serde_ignored = "0.1.7" shell-words = "1.1.0" const-sha1 = "0.2.0" -ctrlc = { version = "3.2.2", features = ["termination"] } +signal-hook = { version = "0.3.15" } directories = "4.0.1" -walkdir = { version = "2", optional = true } +walkdir = { version = "2.3.2", optional = true } tempfile = "3.3.0" -owo-colors = { version = "3.4.0", features = ["supports-colors"] } +owo-colors = { version = "3.5.0", features = ["supports-colors"] } +semver = "1.0.16" +is_ci = "1.1.1" [target.'cfg(not(windows))'.dependencies] -nix = { version = "0.24", default-features = false, features = ["user"] } -libc = "0.2" +nix = { version = "0.26.2", default-features = false, features = ["user"] } +libc = "0.2.139" [target.'cfg(windows)'.dependencies] -winapi = { version = "0.3", features = ["winbase"] } -dunce = "1" +winapi = { version = "0.3.9", features = ["winbase"] } +dunce = "1.0.3" [profile.release] lto = true [dev-dependencies] -regex = "1" -once_cell = "1" -walkdir = "2" +regex = "1.7.1" +once_cell = "1.17.0" +ignore = "0.4.20" [package.metadata.release] -dev-version = false push = false publish = false tag = false +consolidate-commits = false +pre-release-hook = ["cargo", "xtask", "changelog", "build", "--release", "{{version}}"] pre-release-commit-message = "release version {{version}}" -[[package.metadata.release.pre-release-replacements]] -file = "CHANGELOG.md" -search = "Unreleased" -replace = "v{{version}}" - [[package.metadata.release.pre-release-replacements]] file = "CHANGELOG.md" search = "\\.\\.\\.HEAD" @@ -85,19 +82,14 @@ exactly = 1 [[package.metadata.release.pre-release-replacements]] file = "CHANGELOG.md" -search = "ReleaseDate" -replace = "{{date}}" - -[[package.metadata.release.pre-release-replacements]] -file = "CHANGELOG.md" -search = "" -replace = "\n\n## [Unreleased] - ReleaseDate" +search = "" +replace = "\n\n[Unreleased]: https://github.com/cross-rs/{{crate_name}}/compare/v{{version}}...HEAD" exactly = 1 [[package.metadata.release.pre-release-replacements]] -file = "CHANGELOG.md" -search = "" -replace = "\n\n[Unreleased]: https://github.com/cross-rs/{{crate_name}}/compare/v{{version}}...HEAD" +file = "docs/config_file.md" +search = "(# Translates to `.*?:).*?(-centos`)" +replace = "${1}{{version}}$2" exactly = 1 [package.metadata.binstall] diff --git a/README.md b/README.md index b84717f2a..6d0bb7298 100644 --- a/README.md +++ b/README.md @@ -36,27 +36,28 @@ New contributors are welcome! Please join our [Matrix room] and say hi. ## Dependencies -- [rustup](https://rustup.rs/) +See our [Getting Started](./docs/getting-started.md) guide for detailed +installation instructions. +- [rustup](https://rustup.rs/) - A Linux kernel with [binfmt_misc] support is required for cross testing. -[binfmt_misc]: https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html - One of these container engines is required. If both are installed, `cross` will default to `docker`. -- [Docker]. Note that on Linux non-sudo users need to be in the `docker` group. - Read the official [post-installation steps][post]. Requires version 1.24 or later. - -[post]: https://docs.docker.com/install/linux/linux-postinstall/ - -- [Podman]. Requires version 1.6.3 or later. +- [Docker]. Note that on Linux non-sudo users need to be in the `docker` group or use rootless docker. + Read the container engine [install guide][docker_install] for the required installation and post-installation steps. Requires version 20.10 (API 1.40) or later. +- [Podman]. Requires version 3.4.0 or later. ## Installation +```sh +cargo install cross --git https://github.com/cross-rs/cross ``` -$ cargo install cross --git https://github.com/cross-rs/cross -``` + +It's also possible to directly download the pre-compiled [release +binaries](https://github.com/cross-rs/cross/releases) or use +[cargo-binstall](https://github.com/cargo-bins/cargo-binstall). ## Usage @@ -80,98 +81,52 @@ $ cross test --target mips64-unknown-linux-gnuabi64 $ cross rustc --target powerpc-unknown-linux-gnu --release -- -C lto ``` -Additional documentation can be found on the [wiki](https://github.com/cross-rs/cross/wiki). +Additional documentation can be found on the +[wiki](https://github.com/cross-rs/cross/wiki) or the `docs/` subfolder. ## Configuration -You have three options to configure `cross`. All of these options use the TOML format for configuration and the possible configuration values are documented [here](docs/cross_toml.md). - -### Option 1: Configuring `cross` directly in your `Cargo.toml` - -You can directly set [configuration values](docs/cross_toml.md) in your `Cargo.toml` file, under the `[package.metadata.cross]` table, i.e. key prefix. -An example config snippet would look like this: - -```toml,cargo -[package.metadata.cross.target.aarch64-unknown-linux-gnu] -xargo = false -image = "test-image" -runner = "custom-runner" -``` - -### Option 2: Configuring `cross` via a `Cross.toml` file - -You can put your [configuration](docs/cross_toml.md) inside a `Cross.toml` file in your project root directory. - -### Option 3: Using `CROSS_CONFIG` to specify the location of your configuration +### Configuring cross behavior -By setting the `CROSS_CONFIG` environment variable, you can tell `cross` where it should search for the config file. This way you are not limited to a `Cross.toml` file in the project root. +You have four options to configure `cross`. All of these options use the TOML +format for configuration and the possible configuration values are documented +[here][config_file]. -### Custom Docker images +#### Option 1: Configuring `cross` directly in your `Cargo.toml` -`cross` provides default Docker images for the targets listed below. However, it -can't cover every single use case out there. For other targets, or when the -default image is not enough, you can use the `target.{{TARGET}}.image` field in -`Cross.toml` to use custom Docker image for a specific target: +You can directly set [configuration values][config_file] in your `Cargo.toml` +file, under the `[workspace.metadata.cross]` table, i.e. key prefix. An example +config snippet would look like this: -```toml -[target.aarch64-unknown-linux-gnu] -image = "my/image:tag" -``` - -In the example above, `cross` will use a image named `my/image:tag` instead of -the default one. Normal Docker behavior applies, so: - -- Docker will first look for a local image named `my/image:tag` - -- If it doesn't find a local image, then it will look in Docker Hub. - -- If only `image:tag` is specified, then Docker won't look in Docker Hub. - -- If only `tag` is omitted, then Docker will use the `latest` tag. - -#### Dockerfiles - -If you're using a custom Dockerfile, you can use `target.{{TARGET}}.dockerfile` to automatically build it - -```toml -[target.aarch64-unknown-linux-gnu] -dockerfile = "./path/to/where/the/Dockerfile/resides" +```toml,cargo +[workspace.metadata.cross.target.aarch64-unknown-linux-gnu] +# Install libssl-dev:arm64, see +pre-build = [ + "dpkg --add-architecture $CROSS_DEB_ARCH", + "apt-get update && apt-get --assume-yes install libssl-dev:$CROSS_DEB_ARCH" +] +[workspace.metadata.cross.target.armv7-unknown-linux-gnueabi] +image = "my/image:latest" +[workspace.metadata.cross.build] +env.volumes = ["A_DIRECTORY=/path/to/volume"] ``` -`cross` will build and use the image that was built instead of the default image. - -It's recommended to base your custom image on the default Docker image that -cross uses: `ghcr.io/cross-rs/{{TARGET}}:{{VERSION}}` (where `{{VERSION}}` is cross's version). -This way you won't have to figure out how to install a cross C toolchain in your -custom image. - - -``` Dockerfile -FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:latest - -RUN dpkg --add-architecture arm64 && \ - apt-get update && \ - apt-get install --assume-yes libfoo:arm64 -``` +#### Option 2: Configuring `cross` via a `Cross.toml` file -If you want cross to provide the `FROM` instruction, you can do the following +You can put your [configuration][config_file] inside a `Cross.toml` file +in your project root directory. -``` Dockerfile -ARG CROSS_BASE_IMAGE -FROM $CROSS_BASE_IMAGE +#### Option 3: Using `CROSS_CONFIG` to specify the location of your configuration -RUN ... -``` +By setting the `CROSS_CONFIG` environment variable, you can tell `cross` where +it should search for the config file. This way you are not limited to a +`Cross.toml` file in the project root. -#### Pre-build hook +#### Option 4: Configuring `cross` through environment variables -`cross` enables you to add dependencies and run other necessary commands in the image before using it. -This action will be added to the used image, so it won't be ran/built every time you use `cross`. +Besides the TOML-based configuration files, config can be passed through +[environment variables][docs_env_vars], too. -```toml -[target.x86_64-unknown-linux-gnu] -pre-build = ["dpkg --add-architecture arm64 && apt-get update && apt-get install --assume-yes libfoo:arm64"] -``` ### Docker in Docker @@ -187,11 +142,12 @@ $ docker run -v /var/run/docker.sock:/var/run/docker.sock -v .:/project \ The image running `cross` requires the rust development tools to be installed. With this setup `cross` must find and mount the correct host paths into the -container used for cross compilation. This includes the original project directory as -well as the root path of the parent container to give access to the rust build -tools. +container used for cross compilation. This includes the original project +directory as well as the root path of the parent container to give access to +the rust build tools. -To inform `cross` that it is running inside a container set `CROSS_CONTAINER_IN_CONTAINER=true`. +To inform `cross` that it is running inside a container set +`CROSS_CONTAINER_IN_CONTAINER=true`. A development or CI container can be created like this: @@ -225,79 +181,6 @@ environment variable. For example in case you want use [Podman], you can set `CROSS_CONTAINER_ENGINE=podman`. -### Passing environment variables into the build environment - -By default, `cross` does not pass any environment variables into the build -environment from the calling shell. This is chosen as a safe default as most use -cases will not want the calling environment leaking into the inner execution -environment. - -In the instances that you do want to pass through environment variables, this -can be done via `build.env.passthrough` in your `Cross.toml`: - -```toml -[build.env] -passthrough = [ - "RUST_BACKTRACE", - "RUST_LOG", - "TRAVIS", -] -``` - -To pass variables through for one target but not others, you can use -this syntax instead: - -```toml -[target.aarch64-unknown-linux-gnu.env] -passthrough = [ - "RUST_DEBUG", -] -``` - -### Unstable Features - -Certain unstable features can enable additional functionality useful to -cross-compiling. Note that these are unstable, and may be removed at any -time (particularly if the feature is stabilized or removed), and will -only be used on a nightly channel. - -- `CROSS_UNSTABLE_ENABLE_DOCTESTS=true`: also run doctests. - -### Mounting volumes into the build environment - -In addition to passing environment variables, you can also specify environment -variables pointing to paths which should be mounted into the container: - -```toml -[target.aarch64-unknown-linux-gnu.env] -volumes = [ - "BUILD_DIR", -] -``` - -### Use Xargo instead of Cargo - -By default, `cross` uses `xargo` to build your Cargo project only for all -non-standard targets (i.e. something not reported by rustc/rustup). However, -you can use the `build.xargo` or `target.{{TARGET}}.xargo` field in -`Cross.toml` to force the use of `xargo`: - -```toml -# all the targets will use `xargo` -[build] -xargo = true -``` - -Or, - -```toml -# only this target will use `xargo` -[target.aarch64-unknown-linux-gnu] -xargo = true -``` - -`xargo = false` will work the opposite way (pick cargo always) and is useful -when building for custom targets that you know to work with cargo. ## Supported targets @@ -317,86 +200,100 @@ QEMU gets upset when you spawn multiple threads. This means that, if one of your unit tests spawns threads, then it's more likely to fail or, worst, never terminate. -| Target | libc | GCC | C++ | QEMU | `test` | -|--------------------------------------|-------:|--------:|:---:|------:|:------:| -| `aarch64-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `aarch64-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `aarch64-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `arm-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `arm-unknown-linux-gnueabi` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `arm-unknown-linux-gnueabihf` | 2.17 | 8.3.0 | ✓ | 6.1.0 | ✓ | -| `arm-unknown-linux-musleabi` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `arm-unknown-linux-musleabihf` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `armv5te-unknown-linux-gnueabi` | 2.27 | 7.5.0 | ✓ | 6.1.0 | ✓ | -| `armv5te-unknown-linux-musleabi` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `armv7-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `armv7-unknown-linux-gnueabi` | 2.27 | 7.5.0 | ✓ | 6.1.0 | ✓ | -| `armv7-unknown-linux-gnueabihf` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `armv7-unknown-linux-musleabi` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `armv7-unknown-linux-musleabihf` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `i586-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | N/A | ✓ | -| `i586-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | N/A | ✓ | -| `i686-unknown-freebsd` | 1.5 | 6.4.0 | ✓ | N/A | | -| `i686-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `i686-pc-windows-gnu` | N/A | 7.5 | ✓ | N/A | ✓ | -| `i686-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `i686-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | N/A | ✓ | -| `mips-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `mips-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `mips64-unknown-linux-gnuabi64` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `mips64-unknown-linux-muslabi64` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `mips64el-unknown-linux-gnuabi64` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `mips64el-unknown-linux-muslabi64` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `mipsel-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `mipsel-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | 6.1.0 | ✓ | -| `powerpc-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `powerpc64-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `powerpc64le-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `riscv64gc-unknown-linux-gnu` | 2.27 | 7.5.0 | ✓ | 6.1.0 | ✓ | -| `s390x-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `sparc64-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `sparcv9-sun-solaris` | 1.22.7 | 8.4.0 | ✓ | N/A | | -| `thumbv6m-none-eabi` [4] | 2.2.0 | 4.9.3 | | N/A | | -| `thumbv7em-none-eabi` [4] | 2.2.0 | 4.9.3 | | N/A | | -| `thumbv7em-none-eabihf` [4] | 2.2.0 | 4.9.3 | | N/A | | -| `thumbv7m-none-eabi` [4] | 2.2.0 | 4.9.3 | | N/A | | -| `thumbv7neon-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `thumbv7neon-unknown-linux-gnueabihf`| 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `wasm32-unknown-emscripten` [6] | 3.1.14 | 15.0.0 | ✓ | N/A | ✓ | -| `x86_64-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | -| `x86_64-pc-windows-gnu` | N/A | 7.3 | ✓ | N/A | ✓ | -| `x86_64-sun-solaris` | 1.22.7 | 8.4.0 | ✓ | N/A | | -| `x86_64-unknown-freebsd` | 1.5 | 6.4.0 | ✓ | N/A | | -| `x86_64-unknown-dragonfly` [2] [3] | 6.0.1 | 5.3.0 | ✓ | N/A | | -| `x86_64-unknown-illumos` | 1.20.4 | 8.4.0 | ✓ | N/A | | -| `x86_64-unknown-linux-gnu` | 2.23 | 5.4.0 | ✓ | 5.1.0 | ✓ | -| `x86_64-unknown-linux-gnu:centos` [5] | 2.17 | 4.8.5 | ✓ | 4.2.1 | ✓ | -| `x86_64-unknown-linux-musl` | 1.1.24 | 9.2.0 | ✓ | N/A | ✓ | -| `x86_64-unknown-netbsd` [3] | 9.2.0 | 9.4.0 | ✓ | N/A | | +| Target | libc | GCC | C++ | QEMU | `test` | +|----------------------------------------|-------:|-------:|:---:|------:|:------:| +| `aarch64-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `aarch64-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `aarch64-unknown-linux-gnu:centos` [7] | 2.17 | 4.8.5 | | 4.2.1 | ✓ | +| `aarch64-unknown-linux-musl` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `arm-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `arm-unknown-linux-gnueabi` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `arm-unknown-linux-gnueabihf` | 2.31 | 8.5.0 | ✓ | 6.1.0 | ✓ | +| `arm-unknown-linux-musleabi` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `arm-unknown-linux-musleabihf` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `armv5te-unknown-linux-gnueabi` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `armv5te-unknown-linux-musleabi` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `armv7-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `armv7-unknown-linux-gnueabi` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `armv7-unknown-linux-gnueabihf` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `armv7-unknown-linux-musleabi` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `armv7-unknown-linux-musleabihf` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `i586-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | N/A | ✓ | +| `i586-unknown-linux-musl` | 1.2.3 | 9.2.0 | ✓ | N/A | ✓ | +| `i686-unknown-freebsd` | 1.5 | 6.4.0 | ✓ | N/A | | +| `i686-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `i686-pc-windows-gnu` | N/A | 9.4 | ✓ | N/A | ✓ | +| `i686-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `loongarch64-unknown-linux-gnu` | 2.36 | 13.2.0 | ✓ | 8.2.2 | ✓ | +| `mips-unknown-linux-gnu` | 2.30 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `mips-unknown-linux-musl` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `mips64-unknown-linux-gnuabi64` | 2.30 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `mips64-unknown-linux-muslabi64` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `mips64el-unknown-linux-gnuabi64` | 2.30 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `mips64el-unknown-linux-muslabi64` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `mipsel-unknown-linux-gnu` | 2.30 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `mipsel-unknown-linux-musl` | 1.2.3 | 9.2.0 | ✓ | 6.1.0 | ✓ | +| `powerpc-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `powerpc64-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `powerpc64le-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `riscv64gc-unknown-linux-gnu` | 2.35 | 11.4.0 | ✓ | 8.2.2 | ✓ | +| `s390x-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `sparc64-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `sparcv9-sun-solaris` | 1.22.7 | 8.4.0 | ✓ | N/A | | +| `thumbv6m-none-eabi` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv7em-none-eabi` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv7em-none-eabihf` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv7m-none-eabi` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv7neon-linux-androideabi` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `thumbv7neon-unknown-linux-gnueabihf` | 2.31 | 9.4.0 | ✓ | N/A | ✓ | +| `thumbv8m.base-none-eabi` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv8m.main-none-eabi` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `thumbv8m.main-none-eabihf` [4] | 3.3.0 | 9.2.1 | | N/A | | +| `wasm32-unknown-emscripten` [6] | 3.1.14 | 15.0.0 | ✓ | N/A | ✓ | +| `x86_64-linux-android` [1] | 9.0.8 | 9.0.8 | ✓ | 6.1.0 | ✓ | +| `x86_64-pc-windows-gnu` | N/A | 9.3 | ✓ | N/A | ✓ | +| `x86_64-pc-solaris` | 1.22.7 | 8.4.0 | ✓ | N/A | | +| `x86_64-unknown-freebsd` | 1.5 | 6.4.0 | ✓ | N/A | | +| `x86_64-unknown-dragonfly` [2] [3] | 6.0.1 | 10.3.0 | ✓ | N/A | | +| `x86_64-unknown-illumos` | 1.20.4 | 8.4.0 | ✓ | N/A | | +| `x86_64-unknown-linux-gnu` | 2.31 | 9.4.0 | ✓ | 6.1.0 | ✓ | +| `x86_64-unknown-linux-gnu:centos` [5] | 2.17 | 4.8.5 | ✓ | 4.2.1 | ✓ | +| `x86_64-unknown-linux-musl` | 1.2.3 | 9.2.0 | ✓ | N/A | ✓ | +| `x86_64-unknown-netbsd` [3] | 9.2.0 | 9.4.0 | ✓ | N/A | | -[1] libc = bionic; Only works with native tests, that is, tests that do not depends on the - Android Runtime. For i686 some tests may fails with the error `assertion - failed: signal(libc::SIGPIPE, libc::SIG_IGN) != libc::SIG_ERR`, see - [issue #140](https://github.com/cross-rs/cross/issues/140) for more - information. +[1] libc = bionic; Only works with native tests, that is, tests that do not + depends on the Android Runtime. For i686 some tests may fails with the + error `assertion failed: signal(libc::SIGPIPE, libc::SIG_IGN) != + libc::SIG_ERR`, see [issue + #140](https://github.com/cross-rs/cross/issues/140) for more information. [2] No `std` component available. -[3] For some \*BSD and Solaris targets, the libc column indicates the OS release version - from which libc was extracted. +[3] For some \*BSD and Solaris targets, the libc column indicates the OS + release version from which libc was extracted. [4] libc = newlib -[5] Must change `image = "x86_64-unknown-linux-gnu:main-centos"` in `Cross.toml` for `[target.x86_64-unknown-linux-gnu]` to use the CentOS7-compatible target. +[5] Must change + `image = "ghcr.io/cross-rs/x86_64-unknown-linux-gnu:main-centos"` in + `Cross.toml` for `[target.x86_64-unknown-linux-gnu]` to use the + CentOS7-compatible target. [6] libc = emscripten and GCC = clang +[7] Must change + `image = "ghcr.io/cross-rs/aarch64-unknown-linux-gnu:main-centos"` in + `Cross.toml` for `[target.aarch64-unknown-linux-gnu]` to use the + CentOS7-compatible target. + -Additional Dockerfiles for other targets can be found in [cross-toolchains](https://github.com/cross-rs/cross-toolchains). -These include MSVC and Apple Darwin targets, which we cannot ship pre-built images of. +Additional Dockerfiles for other targets can be found in +[cross-toolchains](https://github.com/cross-rs/cross-toolchains). These include +MSVC and Apple Darwin targets, which we cannot ship pre-built images of. + ## Debugging @@ -421,7 +318,7 @@ $ QEMU_STRACE=1 cross run --target aarch64-unknown-linux-gnu ## Minimum Supported Rust Version (MSRV) -This crate is guaranteed to compile on stable Rust 1.58.1 and up. It *might* +This crate is guaranteed to compile on stable Rust 1.77.2 and up. It *might* compile with older versions but that may change in any new patch release. Some cross-compilation targets require a later Rust version, and using Xargo @@ -454,3 +351,7 @@ to intervene to uphold that code of conduct. [Docker]: https://www.docker.com [Podman]: https://podman.io [Matrix room]: https://matrix.to/#/#cross-rs:matrix.org +[docker_install]: https://github.com/cross-rs/cross/wiki/Getting-Started#installing-a-container-engine +[binfmt_misc]: https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html +[config_file]: ./docs/config_file.md +[docs_env_vars]: ./docs/environment_variables.md diff --git a/ci/build_release.sh b/ci/build_release.sh index 858248d93..7e3b89927 100755 --- a/ci/build_release.sh +++ b/ci/build_release.sh @@ -19,7 +19,7 @@ rm -rf "${BUILD_BINARIESDIRECTORY}" mkdir "${BUILD_BINARIESDIRECTORY}" if [[ -f "target/${TARGET}/release/cross.exe" ]]; then - mv "target/${TARGET}/release/cross.exe" "${BUILD_BINARIESDIRECTORY}/" + mv "target/${TARGET}/release/cross.exe" "${BUILD_BINARIESDIRECTORY}/" else - mv "target/${TARGET}/release/cross" "${BUILD_BINARIESDIRECTORY}/" + mv "target/${TARGET}/release/cross" "${BUILD_BINARIESDIRECTORY}/" fi diff --git a/ci/shared.sh b/ci/shared.sh index 457861dd7..be112abd9 100755 --- a/ci/shared.sh +++ b/ci/shared.sh @@ -1,23 +1,67 @@ #!/usr/bin/env bash +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +PROJECT_HOME=$(dirname "${ci_dir}") +export PROJECT_HOME +CARGO_TMP_DIR="${PROJECT_HOME}/target/tmp" +export CARGO_TMP_DIR + +if [[ -n "${CROSS_CONTAINER_ENGINE}" ]]; then + CROSS_ENGINE="${CROSS_CONTAINER_ENGINE}" +elif command -v docker >/dev/null 2>&1; then + CROSS_ENGINE=docker +else + CROSS_ENGINE=podman +fi +export CROSS_ENGINE + function retry { - local tries="${TRIES-5}" - local timeout="${TIMEOUT-1}" - local try=0 - local exit_code=0 - - while (( try < tries )); do - if "${@}"; then - return 0 - else - exit_code=$? - fi - - sleep "${timeout}" - echo "Retrying ..." 1>&2 - try=$(( try + 1 )) - timeout=$(( timeout * 2 )) - done - - return ${exit_code} + local tries="${TRIES-5}" + local timeout="${TIMEOUT-1}" + local try=0 + local exit_code=0 + + while (( try < tries )); do + if "${@}"; then + return 0 + else + exit_code=$? + fi + + sleep "${timeout}" + echo "Retrying ..." 1>&2 + try=$(( try + 1 )) + timeout=$(( timeout * 2 )) + done + + return ${exit_code} +} + +function mkcargotemp { + local td= + td="$CARGO_TMP_DIR"/$(mktemp -u "${@}" | xargs basename) + mkdir -p "$td" + echo '# Cargo.toml + [workspace] + members = ["'"$(basename "$td")"'"] + ' > "$CARGO_TMP_DIR"/Cargo.toml + echo "$td" +} + +function binary_path() { + local binary="${1}" + local home="${2}" + local build_mode="${3}" + local cross="${home}/target/${build_mode}/${binary}" + + case "$OSTYPE" in + msys*|cygwin*) + cross="${cross}.exe" + ;; + *) + ;; + esac + + echo "${cross}" } diff --git a/ci/test-bisect.sh b/ci/test-bisect.sh index 4db9a5acf..0b847c454 100755 --- a/ci/test-bisect.sh +++ b/ci/test-bisect.sh @@ -13,7 +13,7 @@ fi ci_dir=$(dirname "${BASH_SOURCE[0]}") ci_dir=$(realpath "${ci_dir}") . "${ci_dir}"/shared.sh -project_home=$(dirname "${ci_dir}") + main() { local td= @@ -22,7 +22,8 @@ main() { retry cargo fetch cargo build cargo install cargo-bisect-rustc --debug - export CROSS="${project_home}/target/debug/cross" + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS td="$(mktemp -d)" git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" @@ -32,10 +33,12 @@ main() { # shellcheck disable=SC2016 echo '#!/usr/bin/env bash export CROSS_CUSTOM_TOOLCHAIN=1 -exec "${CROSS}" run --target '"${TARGET}" > bisect.sh +"${CROSS}" run --target '"${TARGET}"' +cargo -V | grep 2022-06 +' > bisect.sh chmod +x bisect.sh - if ! err=$(cargo bisect-rustc --script=./bisect.sh --target "${TARGET}" 2>&1 >/dev/null); then + if ! err=$(cargo-bisect-rustc --start 2022-07-01 --end 2022-07-03 --script=./bisect.sh --target "${TARGET}" 2>&1); then if [[ "${err}" != *"does not reproduce the regression"* ]]; then echo "${err}" exit 1 diff --git a/ci/test-cross-image.sh b/ci/test-cross-image.sh index 9fb57f51b..55c04b047 100755 --- a/ci/test-cross-image.sh +++ b/ci/test-cross-image.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# shellcheck disable=SC2086 +# shellcheck disable=SC2086,SC1091,SC1090 set -x set -eo pipefail @@ -20,6 +20,9 @@ if [[ -z "${CROSS_TARGET_CROSS_IMAGE}" ]]; then CROSS_TARGET_CROSS_IMAGE="ghcr.io/cross-rs/cross:main" fi +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +. "${ci_dir}"/shared.sh main() { @@ -32,6 +35,17 @@ git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" cd "${td}" cross run --target "${TARGET}" ' +td="$(mkcargotemp -d)" +git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" +cd "${td}" +echo '# Cross.toml +[target.'${TARGET}'] +pre-build = ["exit 0"] +' > Cross.toml +docker run --rm -e TARGET -e CROSS_CONTAINER_IN_CONTAINER=1 -e "CROSS_TARGET_${TARGET_UPPER//-/_}_IMAGE" \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $PWD:/mount -w /mount \ + "${CROSS_TARGET_CROSS_IMAGE}" cross build --target "${TARGET}" } main "${@}" diff --git a/ci/test-docker-in-docker.sh b/ci/test-docker-in-docker.sh index f7705c068..0fa89a07a 100755 --- a/ci/test-docker-in-docker.sh +++ b/ci/test-docker-in-docker.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# shellcheck disable=SC1004 +# shellcheck disable=SC1004,SC1091,SC1090 # test to see that running docker-in-docker works @@ -18,17 +18,17 @@ if [[ "${IMAGE}" ]]; then export "CROSS_TARGET_${TARGET_UPPER//-/_}_IMAGE"="${IMAGE}" fi -source=$(dirname "${BASH_SOURCE[0]}") -source=$(realpath "${source}") -home=$(dirname "${source}") +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +. "${ci_dir}"/shared.sh main() { - docker run -v "${home}":"${home}" -w "${home}" \ - --rm -e TARGET -e RUSTFLAGS -e RUST_TEST_THREADS \ + docker run --platform linux/amd64 -v "${PROJECT_HOME}":"${PROJECT_HOME}" -w "${PROJECT_HOME}" \ + --rm -e TARGET -e TARGET_UPPER -e RUSTFLAGS -e RUST_TEST_THREADS \ -e LLVM_PROFILE_FILE -e CARGO_INCREMENTAL \ -e "CROSS_TARGET_${TARGET_UPPER//-/_}_IMAGE" \ -v /var/run/docker.sock:/var/run/docker.sock \ - docker:18.09-dind sh -c ' + docker:20.10-dind sh -c ' #!/usr/bin/env sh set -x set -euo pipefail @@ -55,10 +55,14 @@ git clone --depth 1 https://github.com/cross-rs/test-workspace "${td}" cd "${td}" cross build --target "${TARGET}" --workspace \ --manifest-path="./workspace/Cargo.toml" --verbose +eval CROSS_TARGET_${TARGET_UPPER//-/_}_PRE_BUILD="exit" cross build --target "${TARGET}" --workspace \ + --manifest-path="./workspace/Cargo.toml" --verbose cd workspace cross build --target "${TARGET}" --workspace --verbose +eval CROSS_TARGET_${TARGET_UPPER//-/_}_PRE_BUILD="exit" cross build --target "${TARGET}" --workspace --verbose cd binary cross run --target "${TARGET}" --verbose +eval CROSS_TARGET_${TARGET_UPPER//-/_}_PRE_BUILD="exit" cross run --target "${TARGET}" --verbose ' } diff --git a/ci/test-foreign-toolchain.sh b/ci/test-foreign-toolchain.sh new file mode 100755 index 000000000..ea5a6c4b5 --- /dev/null +++ b/ci/test-foreign-toolchain.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1091,SC1090 + +# test to see that foreign toolchains work + +set -x +set -eo pipefail + +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +. "${ci_dir}"/shared.sh + +main() { + local td= + + retry cargo fetch + cargo build + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS + + td="$(mkcargotemp -d)" + + pushd "${td}" + cargo init --bin --name foreign_toolchain + # shellcheck disable=SC2016 + echo '# Cross.toml +[build] +default-target = "x86_64-unknown-linux-musl" + +[target."x86_64-unknown-linux-musl"] +image.name = "alpine:edge" +image.toolchain = ["x86_64-unknown-linux-musl"] +pre-build = ["apk add --no-cache gcc musl-dev"]' >"${CARGO_TMP_DIR}"/Cross.toml + + "${CROSS}" run -v + + local tmp_basename + tmp_basename=$(basename "${CARGO_TMP_DIR}") + "${CROSS_ENGINE}" images --format '{{.Repository}}:{{.Tag}}' --filter 'label=org.cross-rs.for-cross-target' | grep "cross-custom-${tmp_basename}" | xargs -t "${CROSS_ENGINE}" rmi + + echo '# Cross.toml +[build] +default-target = "x86_64-unknown-linux-gnu" + +[target.x86_64-unknown-linux-gnu] +pre-build = [ + "apt-get update && apt-get install -y libc6 g++-x86-64-linux-gnu libc6-dev-amd64-cross", +] + +[target.x86_64-unknown-linux-gnu.env] +passthrough = [ + "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=x86_64-linux-gnu-gcc", + "CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER=/qemu-runner x86_64", + "CC_x86_64_unknown_linux_gnu=x86_64-linux-gnu-gcc", + "CXX_x86_64_unknown_linux_gnu=x86_64-linux-gnu-g++", +] + +[target.x86_64-unknown-linux-gnu.image] +name = "ubuntu:20.04" +toolchain = ["aarch64-unknown-linux-gnu"] + ' >"${CARGO_TMP_DIR}"/Cross.toml + + "${CROSS}" build -v + + "${CROSS_ENGINE}" images --format '{{.Repository}}:{{.Tag}}' --filter 'label=org.cross-rs.for-cross-target' | grep "cross-custom-${tmp_basename}" | xargs "${CROSS_ENGINE}" rmi + + popd + + rm -rf "${td}" +} + +main diff --git a/ci/test-podman.sh b/ci/test-podman.sh new file mode 100755 index 000000000..4e49f7ab6 --- /dev/null +++ b/ci/test-podman.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash +# shellcheck disable=SC1091,SC1090 + +# test to see that running and building images with podman works. + +set -x +set -eo pipefail + +export CROSS_CONTAINER_ENGINE=podman +if [[ -z "${TARGET}" ]]; then + export TARGET="aarch64-unknown-linux-gnu" +fi + +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +. "${ci_dir}"/shared.sh + +main() { + local td= + local parent= + local target="${TARGET}" + + retry cargo fetch + cargo build + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS + + td="$(mkcargotemp -d)" + parent=$(dirname "${td}") + pushd "${td}" + cargo init --bin --name "hello" . + + echo '[build] +pre-build = ["apt-get update"]' > "${parent}/Cross.toml" + + CROSS_CONTAINER_ENGINE="${CROSS_ENGINE}" "${CROSS}" build --target "${target}" --verbose + + popd + rm -rf "${td}" +} + +main diff --git a/ci/test-remote.sh b/ci/test-remote.sh index 8fb3583ed..e9a4bec63 100755 --- a/ci/test-remote.sh +++ b/ci/test-remote.sh @@ -14,15 +14,16 @@ fi ci_dir=$(dirname "${BASH_SOURCE[0]}") ci_dir=$(realpath "${ci_dir}") . "${ci_dir}"/shared.sh -project_home=$(dirname "${ci_dir}") main() { local err= retry cargo fetch cargo build - export CROSS="${project_home}/target/debug/cross" - export CROSS_UTIL="${project_home}/target/debug/cross-util" + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS + CROSS_UTIL=$(binary_path cross-util "${PROJECT_HOME}" debug) + export CROSS_UTIL # if the create volume fails, ensure it exists. if ! err=$("${CROSS_UTIL}" volumes create 2>&1 >/dev/null); then @@ -40,13 +41,15 @@ main() { cross_test_cpp() { local td= - td="$(mktemp -d)" + td="$(mkcargotemp -d)" git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" pushd "${td}" retry cargo fetch - "${CROSS}" run --target "${TARGET}" + "${CROSS}" run --target "${TARGET}" | grep "Hello, world!" + sed -i 's/Hello, world/Hello, test/g' hellopp.cc + "${CROSS}" run --target "${TARGET}" | grep "Hello, test!" popd rm -rf "${td}" diff --git a/ci/test-zig-image.sh b/ci/test-zig-image.sh new file mode 100755 index 000000000..2fdddc104 --- /dev/null +++ b/ci/test-zig-image.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2086,SC1091,SC1090 + +set -x +set -eo pipefail + +# NOTE: "${@}" is an unbound variable for bash 3.2, which is the +# installed version on macOS. likewise, "${var[@]}" is an unbound +# error if var is an empty array. + +ci_dir=$(dirname "${BASH_SOURCE[0]}") +ci_dir=$(realpath "${ci_dir}") +. "${ci_dir}"/shared.sh + +# zig cc is very slow: only use a few targets. +TARGETS=( + "aarch64-unknown-linux-gnu" + "aarch64-unknown-linux-musl" + # disabled, see https://github.com/cross-rs/cross/issues/1425 + #"i586-unknown-linux-gnu" + #"i586-unknown-linux-musl" +) + +# on CI, it sets `CROSS_TARGET_ZIG_IMAGE` rather than `CROSS_BUILD_ZIG_IMAGE` +if [[ -n "${CROSS_TARGET_ZIG_IMAGE}" ]]; then + export CROSS_BUILD_ZIG_IMAGE="${CROSS_TARGET_ZIG_IMAGE}" + unset CROSS_TARGET_ZIG_IMAGE +fi + +main() { + export CROSS_BUILD_ZIG=1 + + local td= + local target= + + retry cargo fetch + cargo build + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS + + td="$(mktemp -d)" + git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" + pushd "${td}" + + for target in "${TARGETS[@]}"; do + CROSS_CONTAINER_ENGINE="${CROSS_ENGINE}" "${CROSS}" build --target "${target}" --verbose + # note: ensure #724 doesn't replicate during CI. + # https://github.com/cross-rs/cross/issues/724 + cargo clean + done + + popd + rm -rf "${td}" +} + +main "${@}" diff --git a/ci/test.sh b/ci/test.sh index fbfaddaa5..147c6b06f 100755 --- a/ci/test.sh +++ b/ci/test.sh @@ -2,7 +2,7 @@ # shellcheck disable=SC2086,SC1091,SC1090 set -x -set -euo pipefail +set -eo pipefail # NOTE: "${@}" is an unbound variable for bash 3.2, which is the # installed version on macOS. likewise, "${var[@]}" is an unbound @@ -11,13 +11,12 @@ set -euo pipefail ci_dir=$(dirname "${BASH_SOURCE[0]}") ci_dir=$(realpath "${ci_dir}") . "${ci_dir}"/shared.sh -project_home=$(dirname "${ci_dir}") workspace_test() { - "${CROSS[@]}" build --target "${TARGET}" --workspace "$@" ${CROSS_FLAGS} - "${CROSS[@]}" run --target "${TARGET}" -p binary "$@" ${CROSS_FLAGS} - "${CROSS[@]}" run --target "${TARGET}" --bin dependencies \ - --features=dependencies "$@" ${CROSS_FLAGS} + "${CROSS[@]}" build --target "${TARGET}" --workspace "$@" ${CROSS_FLAGS} + "${CROSS[@]}" run --target "${TARGET}" -p binary "$@" ${CROSS_FLAGS} + "${CROSS[@]}" run --target "${TARGET}" --bin dependencies \ + --features=dependencies "$@" ${CROSS_FLAGS} } main() { @@ -32,14 +31,19 @@ main() { export QEMU_STRACE=1 # ensure we have the proper toolchain and optional rust flags - export CROSS=("${project_home}/target/debug/cross") - export CROSS_FLAGS="" + CROSS=$(binary_path cross "${PROJECT_HOME}" debug) + export CROSS=("${CROSS}") + export CROSS_FLAGS="-v" if (( ${BUILD_STD:-0} )); then # use build-std instead of xargo, due to xargo being # maintenance-only. build-std requires a nightly compiler rustup toolchain add nightly CROSS_FLAGS="${CROSS_FLAGS} -Zbuild-std" CROSS+=("+nightly") + if [[ "${TARGET}" == *"mips"* ]]; then + # workaround for https://github.com/cross-rs/cross/issues/1322 & https://github.com/rust-lang/rust/issues/108835 + [[ ! "$RUSTFLAGS" =~ opt-level ]] && export RUSTFLAGS="${RUSTFLAGS:+$RUSTFLAGS }-C opt-level=1" + fi elif ! (( ${STD:-0} )); then # don't use xargo: should have native support just from rustc rustup toolchain add nightly @@ -48,7 +52,7 @@ main() { if (( ${STD:-0} )); then # test `cross check` - td=$(mktemp -d) + td=$(mkcargotemp -d) cargo init --lib --name foo "${td}" pushd "${td}" echo '#![no_std]' > src/lib.rs @@ -57,7 +61,7 @@ main() { rm -rf "${td}" else # `cross build` test for targets where `std` is not available - td=$(mktemp -d) + td=$(mkcargotemp -d) git clone \ --depth 1 \ @@ -68,33 +72,32 @@ main() { retry cargo fetch # don't use xargo: should have native support just from rustc rustup toolchain add nightly - "${CROSS[@]}" build --lib --target "${TARGET}" ${CROSS_FLAGS} + cross_build --lib --target "${TARGET}" popd rm -rf "${td}" - - return fi # `cross build` test for the other targets if [[ "${TARGET}" == *-unknown-emscripten ]]; then - td=$(mktemp -d) + td=$(mkcargotemp -d) pushd "${td}" cargo init --lib --name foo . retry cargo fetch - "${CROSS[@]}" build --target "${TARGET}" ${CROSS_FLAGS} + cross_build --target "${TARGET}" popd rm -rf "${td}" + # thumb targets are tested in later steps elif [[ "${TARGET}" != thumb* ]]; then - td=$(mktemp -d) + td=$(mkcargotemp -d) pushd "${td}" # test that linking works cargo init --bin --name hello . retry cargo fetch - "${CROSS[@]}" build --target "${TARGET}" ${CROSS_FLAGS} + cross_build --target "${TARGET}" popd rm -rf "${td}" @@ -103,7 +106,7 @@ main() { if (( ${RUN:-0} )); then # `cross test` test if (( ${DYLIB:-0} )); then - td=$(mktemp -d) + td=$(mkcargotemp -d) pushd "${td}" cargo init --lib --name foo . @@ -117,7 +120,7 @@ main() { # `cross run` test case "${TARGET}" in thumb*-none-eabi*) - td=$(mktemp -d) + td=$(mkcargotemp -d) git clone \ --depth 1 \ @@ -131,7 +134,7 @@ main() { rm -rf "${td}" ;; *) - td=$(mktemp -d) + td=$(mkcargotemp -d) cargo init --bin --name hello "${td}" @@ -146,7 +149,7 @@ main() { popd rm -rf "${td}" - td=$(mktemp -d) + td=$(mkcargotemp -d) git clone \ --depth 1 \ --recursive \ @@ -166,9 +169,23 @@ main() { fi - # Test C++ support + # Test C++ support in a no_std context if (( ${CPP:-0} )); then - td="$(mktemp -d)" + td="$(mkcargotemp -d)" + + git clone --depth 1 https://github.com/cross-rs/rust-cpp-accumulate "${td}" + + pushd "${td}" + retry cargo fetch + cross_build --target "${TARGET}" + popd + + rm -rf "${td}" + fi + + # Test C++ support + if (( ${STD:-0} )) && (( ${CPP:-0} )); then + td="$(mkcargotemp -d)" git clone --depth 1 https://github.com/cross-rs/rust-cpp-hello-word "${td}" @@ -177,12 +194,92 @@ main() { if (( ${RUN:-0} )); then cross_run --target "${TARGET}" else - "${CROSS[@]}" build --target "${TARGET}" ${CROSS_FLAGS} + cross_build --target "${TARGET}" fi popd rm -rf "${td}" fi + + # special tests for a shared C runtime, since we disable the shared c++ runtime + # https://github.com/cross-rs/cross/issues/902 + if [[ "${TARGET}" == *-linux-musl* ]]; then + td=$(mkcargotemp -d) + + pushd "${td}" + cargo init --bin --name hello . + retry cargo fetch + RUSTFLAGS="$RUSTFLAGS -C target-feature=-crt-static" \ + cross_build --target "${TARGET}" + popd + + rm -rf "${td}" + fi + + # test cmake support + td="$(mkcargotemp -d)" + + git clone \ + --recursive \ + --depth 1 \ + https://github.com/cross-rs/rust-cmake-hello-world "${td}" + + pushd "${td}" + retry cargo fetch + if [[ "${TARGET}" == "arm-linux-androideabi" ]]; then + # ARMv5te isn't supported anymore by Android, which produces missing + # symbol errors with re2 like `__libcpp_signed_lock_free`. + cross_run --target "${TARGET}" --features=tryrun + elif (( ${STD:-0} )) && (( ${RUN:-0} )) && (( ${CPP:-0} )); then + cross_run --target "${TARGET}" --features=re2,tryrun + elif (( ${STD:-0} )) && (( ${CPP:-0} )); then + cross_build --target "${TARGET}" --features=re2 + elif (( ${STD:-0} )) && (( ${RUN:-0} )); then + cross_run --target "${TARGET}" --features=tryrun + elif (( ${STD:-0} )); then + cross_build --target "${TARGET}" --features=tryrun + else + cross_build --lib --target "${TARGET}" + fi + popd + + rm -rf "${td}" + + # test running binaries with cleared environment + # Command is not implemented for wasm32-unknown-emscripten + if (( ${RUN:-0} )) && [[ "${TARGET}" != "wasm32-unknown-emscripten" ]]; then + td="$(mkcargotemp -d)" + pushd "${td}" + cargo init --bin --name foo . + mkdir src/bin + upper_target=$(echo "${TARGET}" | tr '[:lower:]' '[:upper:]' | tr '-' '_') + cat < src/bin/launch.rs +fn main() { + let runner = std::env::var("CARGO_TARGET_${upper_target}_RUNNER"); + let mut command = if let Ok(runner) = runner { + runner.split(' ').map(str::to_string).collect() + } else { + vec![] + }; + let executable = format!("/target/${TARGET}/debug/foo{}", std::env::consts::EXE_SUFFIX); + command.push(executable.to_string()); + let status = dbg!(std::process::Command::new(&command[0]) + .args(&command[1..]) + .env_clear()) // drop all environment variables + .status() + .unwrap(); + std::process::exit(status.code().unwrap()); +} +EOF + cross_build --target "${TARGET}" + cross_run --target "${TARGET}" --bin launch + popd + rm -rf "${td}" + fi +} + +cross_build() { + "${CROSS[@]}" build "$@" ${CROSS_FLAGS} } cross_run() { @@ -190,7 +287,7 @@ cross_run() { "${CROSS[@]}" run "$@" ${CROSS_FLAGS} else for runner in ${RUNNERS}; do - echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > Cross.toml + echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > "${CARGO_TMP_DIR}"/Cross.toml "${CROSS[@]}" run "$@" ${CROSS_FLAGS} done fi @@ -201,7 +298,7 @@ cross_test() { "${CROSS[@]}" test "$@" ${CROSS_FLAGS} else for runner in ${RUNNERS}; do - echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > Cross.toml + echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > "${CARGO_TMP_DIR}"/Cross.toml "${CROSS[@]}" test "$@" ${CROSS_FLAGS} done fi @@ -212,7 +309,7 @@ cross_bench() { "${CROSS[@]}" bench "$@" ${CROSS_FLAGS} else for runner in ${RUNNERS}; do - echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > Cross.toml + echo -e "[target.${TARGET}]\nrunner = \"${runner}\"" > "${CARGO_TMP_DIR}"/Cross.toml "${CROSS[@]}" bench "$@" ${CROSS_FLAGS} done fi diff --git a/clippy.toml b/clippy.toml index 2b7937a77..fe7b17498 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,5 +1,5 @@ disallowed-methods = [ { path = "std::path::Path::display", reason = "incorrect handling of non-Unicode paths, use path.to_utf8() or debug (`{path:?}`) instead" }, ] -# needs clippy 1.61 -# allow-unwrap-in-tests = true +allow-unwrap-in-tests = true +msrv = "1.77.2" diff --git a/crosstool-ng/arm-unknown-linux-gnueabihf.config.in b/crosstool-ng/arm-unknown-linux-gnueabihf.config.in index 2fab6580c..2ec7d18f0 100644 --- a/crosstool-ng/arm-unknown-linux-gnueabihf.config.in +++ b/crosstool-ng/arm-unknown-linux-gnueabihf.config.in @@ -1,681 +1,21 @@ -# -# Automatically generated file; DO NOT EDIT. -# crosstool-NG Configuration -# -# This file was adapted from: -# https://github.com/rust-lang/rust/blob/0595ea1d12cf745e0a672d05341429ecb0917e66/src/ci/docker/host-x86_64/dist-armhf-linux/arm-linux-gnueabihf.config -CT_CONFIGURE_has_static_link=y -CT_CONFIGURE_has_cxx11=y -CT_CONFIGURE_has_wget=y -CT_CONFIGURE_has_curl=y -CT_CONFIGURE_has_make_3_81_or_newer=y -CT_CONFIGURE_has_make_4_0_or_newer=y -CT_CONFIGURE_has_libtool_2_4_or_newer=y -CT_CONFIGURE_has_libtoolize_2_4_or_newer=y -CT_CONFIGURE_has_autoconf_2_65_or_newer=y -CT_CONFIGURE_has_autoreconf_2_65_or_newer=y -CT_CONFIGURE_has_automake_1_15_or_newer=y -CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y -CT_CONFIGURE_has_python_3_4_or_newer=y -CT_CONFIGURE_has_bison_2_7_or_newer=y -CT_CONFIGURE_has_python=y -CT_CONFIGURE_has_git=y -CT_CONFIGURE_has_md5sum=y -CT_CONFIGURE_has_sha1sum=y -CT_CONFIGURE_has_sha256sum=y -CT_CONFIGURE_has_sha512sum=y -CT_CONFIGURE_has_install_with_strip_program=y -CT_CONFIG_VERSION_CURRENT="3" -CT_CONFIG_VERSION="3" -CT_MODULES=y - -# -# Paths and misc options -# - -# -# crosstool-NG behavior -# -# CT_OBSOLETE is not set -# CT_EXPERIMENTAL is not set -# CT_DEBUG_CT is not set - -# -# Paths -# -CT_LOCAL_TARBALLS_DIR="${HOME}/src" -CT_SAVE_TARBALLS=y -# CT_TARBALLS_BUILDROOT_LAYOUT is not set -CT_WORK_DIR="${CT_TOP_DIR}/.build" -CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_CONFIG_VERSION="4" CT_PREFIX_DIR="/x-tools/${CT_TARGET}" -CT_RM_RF_PREFIX_DIR=y -CT_REMOVE_DOCS=y -CT_INSTALL_LICENSES=y -CT_PREFIX_DIR_RO=y -CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y -# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set - -# -# Downloading -# -CT_DOWNLOAD_AGENT_WGET=y -# CT_DOWNLOAD_AGENT_CURL is not set -# CT_DOWNLOAD_AGENT_NONE is not set -# CT_FORBID_DOWNLOAD is not set -# CT_FORCE_DOWNLOAD is not set -CT_CONNECT_TIMEOUT=10 -CT_DOWNLOAD_WGET_OPTIONS="--passive-ftp --tries=3 -nc --progress=dot:binary" -# CT_ONLY_DOWNLOAD is not set -# CT_USE_MIRROR is not set -CT_VERIFY_DOWNLOAD_DIGEST=y -CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y -# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set -# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set -# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set -CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" -# CT_VERIFY_DOWNLOAD_SIGNATURE is not set - -# -# Extracting -# -# CT_FORCE_EXTRACT is not set -CT_OVERRIDE_CONFIG_GUESS_SUB=y -# CT_ONLY_EXTRACT is not set -CT_PATCH_BUNDLED=y -# CT_PATCH_BUNDLED_LOCAL is not set -CT_PATCH_ORDER="bundled" - -# -# Build behavior -# -CT_PARALLEL_JOBS=0 -CT_LOAD="" -CT_USE_PIPES=y -CT_EXTRA_CFLAGS_FOR_BUILD="" -CT_EXTRA_LDFLAGS_FOR_BUILD="" -CT_EXTRA_CFLAGS_FOR_HOST="" -CT_EXTRA_LDFLAGS_FOR_HOST="" -# CT_CONFIG_SHELL_SH is not set -# CT_CONFIG_SHELL_ASH is not set -CT_CONFIG_SHELL_BASH=y -# CT_CONFIG_SHELL_CUSTOM is not set -CT_CONFIG_SHELL="${bash}" - -# -# Logging -# -# CT_LOG_ERROR is not set -# CT_LOG_WARN is not set -# CT_LOG_INFO is not set -CT_LOG_EXTRA=y -# CT_LOG_ALL is not set -# CT_LOG_DEBUG is not set -CT_LOG_LEVEL_MAX="EXTRA" -# CT_LOG_SEE_TOOLS_WARN is not set -CT_LOG_PROGRESS_BAR=y -CT_LOG_TO_FILE=y -CT_LOG_FILE_COMPRESS=y - -# -# Target options -# -# CT_ARCH_ALPHA is not set -# CT_ARCH_ARC is not set +CT_DOWNLOAD_AGENT_CURL=y CT_ARCH_ARM=y -# CT_ARCH_AVR is not set -# CT_ARCH_M68K is not set -# CT_ARCH_MIPS is not set -# CT_ARCH_NIOS2 is not set -# CT_ARCH_POWERPC is not set -# CT_ARCH_S390 is not set -# CT_ARCH_SH is not set -# CT_ARCH_SPARC is not set -# CT_ARCH_X86 is not set -# CT_ARCH_XTENSA is not set -CT_ARCH="arm" -CT_ARCH_CHOICE_KSYM="ARM" -CT_ARCH_CPU="" -CT_ARCH_TUNE="" -CT_ARCH_ARM_SHOW=y - -# -# Options for arm -# -CT_ARCH_ARM_PKG_KSYM="" -CT_ARCH_ARM_MODE="arm" -CT_ARCH_ARM_MODE_ARM=y -# CT_ARCH_ARM_MODE_THUMB is not set -# CT_ARCH_ARM_INTERWORKING is not set -CT_ARCH_ARM_EABI_FORCE=y -CT_ARCH_ARM_EABI=y -CT_ARCH_ARM_TUPLE_USE_EABIHF=y -CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC RISCV S390 SH SPARC X86 XTENSA" -CT_ARCH_SUFFIX="" -# CT_OMIT_TARGET_VENDOR is not set - -# -# Generic target options -# -# CT_MULTILIB is not set -CT_DEMULTILIB=y -CT_ARCH_SUPPORTS_BOTH_MMU=y -CT_ARCH_DEFAULT_HAS_MMU=y -CT_ARCH_USE_MMU=y -CT_ARCH_SUPPORTS_FLAT_FORMAT=y -CT_ARCH_SUPPORTS_EITHER_ENDIAN=y -CT_ARCH_DEFAULT_LE=y -# CT_ARCH_BE is not set -CT_ARCH_LE=y -CT_ARCH_ENDIAN="little" -CT_ARCH_SUPPORTS_32=y -CT_ARCH_SUPPORTS_64=y -CT_ARCH_DEFAULT_32=y -CT_ARCH_BITNESS=32 -CT_ARCH_32=y -# CT_ARCH_64 is not set - -# -# Target optimisations -# -CT_ARCH_SUPPORTS_WITH_ARCH=y -CT_ARCH_SUPPORTS_WITH_CPU=y -CT_ARCH_SUPPORTS_WITH_TUNE=y -CT_ARCH_SUPPORTS_WITH_FLOAT=y -CT_ARCH_SUPPORTS_WITH_FPU=y -CT_ARCH_SUPPORTS_SOFTFP=y -CT_ARCH_EXCLUSIVE_WITH_CPU=y CT_ARCH_ARCH="armv6" CT_ARCH_FPU="vfp" -# CT_ARCH_FLOAT_AUTO is not set CT_ARCH_FLOAT_HW=y -# CT_ARCH_FLOAT_SOFTFP is not set -# CT_ARCH_FLOAT_SW is not set -CT_TARGET_CFLAGS="" -CT_TARGET_LDFLAGS="" -CT_ARCH_FLOAT="hard" - -# -# Toolchain options -# - -# -# General toolchain options -# -CT_FORCE_SYSROOT=y -CT_USE_SYSROOT=y -CT_SYSROOT_NAME="sysroot" -CT_SYSROOT_DIR_PREFIX="" -CT_WANTS_STATIC_LINK=y -CT_WANTS_STATIC_LINK_CXX=y -# CT_STATIC_TOOLCHAIN is not set -CT_SHOW_CT_VERSION=y -CT_TOOLCHAIN_PKGVERSION="" -CT_TOOLCHAIN_BUGURL="" - -# -# Tuple completion and aliasing -# -CT_TARGET_VENDOR="unknown" -CT_TARGET_ALIAS_SED_EXPR="" -CT_TARGET_ALIAS="" - -# -# Toolchain type -# -CT_CROSS=y -# CT_CANADIAN is not set -CT_TOOLCHAIN_TYPE="cross" - -# -# Build system -# -CT_BUILD="" -CT_BUILD_PREFIX="" -CT_BUILD_SUFFIX="" - -# -# Misc options -# -# CT_TOOLCHAIN_ENABLE_NLS is not set - -# -# Operating System -# -CT_KERNEL_SUPPORTS_SHARED_LIBS=y -# CT_KERNEL_BARE_METAL is not set CT_KERNEL_LINUX=y -CT_KERNEL="linux" -CT_KERNEL_CHOICE_KSYM="LINUX" -CT_KERNEL_LINUX_SHOW=y - -# -# Options for linux -# -CT_KERNEL_LINUX_PKG_KSYM="LINUX" -CT_LINUX_DIR_NAME="linux" -CT_LINUX_PKG_NAME="linux" -CT_LINUX_SRC_RELEASE=y -CT_LINUX_PATCH_ORDER="global" %CT_LINUX_V% -CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" -CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" %CT_LINUX% -CT_KERNEL_LINUX_VERBOSITY_0=y -# CT_KERNEL_LINUX_VERBOSITY_1 is not set -# CT_KERNEL_LINUX_VERBOSITY_2 is not set -CT_KERNEL_LINUX_VERBOSE_LEVEL=0 -CT_KERNEL_LINUX_INSTALL_CHECK=y -CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" - -# -# Common kernel options -# -CT_SHARED_LIBS=y - -# -# Binary utilities -# -CT_ARCH_BINFMT_ELF=y -CT_BINUTILS_BINUTILS=y -CT_BINUTILS="binutils" -CT_BINUTILS_CHOICE_KSYM="BINUTILS" -CT_BINUTILS_BINUTILS_SHOW=y - -# -# Options for binutils -# -CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" -CT_BINUTILS_DIR_NAME="binutils" -CT_BINUTILS_USE_GNU=y -CT_BINUTILS_USE="BINUTILS" -CT_BINUTILS_PKG_NAME="binutils" -CT_BINUTILS_SRC_RELEASE=y -CT_BINUTILS_PATCH_ORDER="global" CT_BINUTILS_V_2_32=y -# CT_BINUTILS_V_2_31 is not set -# CT_BINUTILS_V_2_30 is not set -# CT_BINUTILS_V_2_29 is not set -# CT_BINUTILS_V_2_28 is not set -# CT_BINUTILS_V_2_27 is not set -# CT_BINUTILS_V_2_26 is not set -# CT_BINUTILS_NO_VERSIONS is not set -CT_BINUTILS_VERSION="2.32" -CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" -CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" -CT_BINUTILS_later_than_2_30=y -CT_BINUTILS_2_30_or_later=y -CT_BINUTILS_later_than_2_27=y -CT_BINUTILS_2_27_or_later=y -CT_BINUTILS_later_than_2_25=y -CT_BINUTILS_2_25_or_later=y -CT_BINUTILS_later_than_2_23=y -CT_BINUTILS_2_23_or_later=y - -# -# GNU binutils -# -CT_BINUTILS_HAS_HASH_STYLE=y -CT_BINUTILS_HAS_GOLD=y -CT_BINUTILS_HAS_PLUGINS=y -CT_BINUTILS_HAS_PKGVERSION_BUGURL=y -CT_BINUTILS_GOLD_SUPPORTS_ARCH=y -CT_BINUTILS_GOLD_SUPPORT=y -CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y -CT_BINUTILS_LINKER_LD=y -# CT_BINUTILS_LINKER_LD_GOLD is not set -CT_BINUTILS_LINKERS_LIST="ld" -CT_BINUTILS_LINKER_DEFAULT="bfd" -# CT_BINUTILS_PLUGINS is not set -CT_BINUTILS_RELRO=m -CT_BINUTILS_EXTRA_CONFIG_ARRAY="" -# CT_BINUTILS_FOR_TARGET is not set -CT_ALL_BINUTILS_CHOICES="BINUTILS" - -# -# C-library -# -CT_LIBC_GLIBC=y -# CT_LIBC_UCLIBC is not set -CT_LIBC="glibc" -CT_LIBC_CHOICE_KSYM="GLIBC" -CT_THREADS="nptl" -CT_LIBC_GLIBC_SHOW=y - -# -# Options for glibc -# -CT_LIBC_GLIBC_PKG_KSYM="GLIBC" -CT_GLIBC_DIR_NAME="glibc" -CT_GLIBC_USE_GNU=y -CT_GLIBC_USE="GLIBC" -CT_GLIBC_PKG_NAME="glibc" -CT_GLIBC_SRC_RELEASE=y -CT_GLIBC_PATCH_ORDER="global" %CT_GLIBC_V% -CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" -CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" %CT_GLIBC% -CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y -CT_GLIBC_DEP_BINUTILS=y -CT_GLIBC_DEP_GCC=y -CT_GLIBC_DEP_PYTHON=y -CT_GLIBC_HAS_NPTL_ADDON=y -CT_GLIBC_HAS_PORTS_ADDON=y -CT_GLIBC_HAS_LIBIDN_ADDON=y -CT_GLIBC_USE_PORTS_ADDON=y -CT_GLIBC_USE_NPTL_ADDON=y -# CT_GLIBC_USE_LIBIDN_ADDON is not set -CT_GLIBC_HAS_OBSOLETE_RPC=y -CT_GLIBC_EXTRA_CONFIG_ARRAY="" -CT_GLIBC_CONFIGPARMS="" -CT_GLIBC_EXTRA_CFLAGS="" -CT_GLIBC_ENABLE_OBSOLETE_RPC=y -# CT_GLIBC_DISABLE_VERSIONING is not set -CT_GLIBC_OLDEST_ABI="" -CT_GLIBC_FORCE_UNWIND=y -# CT_GLIBC_LOCALES is not set -# CT_GLIBC_KERNEL_VERSION_NONE is not set -CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y -# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set -CT_GLIBC_MIN_KERNEL="3.2.101" -CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC" -CT_LIBC_SUPPORT_THREADS_ANY=y -CT_LIBC_SUPPORT_THREADS_NATIVE=y - -# -# Common C library options -# -CT_THREADS_NATIVE=y -# CT_CREATE_LDSO_CONF is not set -CT_LIBC_XLDD=y - -# -# C compiler -# -CT_CC_CORE_PASSES_NEEDED=y -CT_CC_CORE_PASS_1_NEEDED=y -CT_CC_CORE_PASS_2_NEEDED=y -CT_CC_SUPPORT_CXX=y -CT_CC_SUPPORT_FORTRAN=y -CT_CC_SUPPORT_ADA=y -CT_CC_SUPPORT_OBJC=y -CT_CC_SUPPORT_OBJCXX=y -CT_CC_SUPPORT_GOLANG=y -CT_CC_GCC=y -CT_CC="gcc" -CT_CC_CHOICE_KSYM="GCC" -CT_CC_GCC_SHOW=y - -# -# Options for gcc -# -CT_CC_GCC_PKG_KSYM="GCC" -CT_GCC_DIR_NAME="gcc" -CT_GCC_USE_GNU=y -CT_GCC_USE="GCC" -CT_GCC_PKG_NAME="gcc" -CT_GCC_SRC_RELEASE=y -CT_GCC_PATCH_ORDER="global" %CT_GCC_V% -CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" -CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_GCC_SIGNATURE_FORMAT="" %CT_GCC% -CT_CC_GCC_HAS_LIBMPX=y -CT_CC_GCC_ENABLE_CXX_FLAGS="" -CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" -CT_CC_GCC_EXTRA_CONFIG_ARRAY="" -CT_CC_GCC_STATIC_LIBSTDCXX=y -# CT_CC_GCC_SYSTEM_ZLIB is not set -CT_CC_GCC_CONFIG_TLS=m - -# -# Optimisation features -# -CT_CC_GCC_USE_GRAPHITE=y -CT_CC_GCC_USE_LTO=y - -# -# Settings for libraries running on target -# -CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y -# CT_CC_GCC_LIBMUDFLAP is not set -# CT_CC_GCC_LIBGOMP is not set -# CT_CC_GCC_LIBSSP is not set -# CT_CC_GCC_LIBQUADMATH is not set -# CT_CC_GCC_LIBSANITIZER is not set - -# -# Misc. obscure options. -# -CT_CC_CXA_ATEXIT=y -# CT_CC_GCC_DISABLE_PCH is not set -CT_CC_GCC_SJLJ_EXCEPTIONS=m -CT_CC_GCC_LDBL_128=m -# CT_CC_GCC_BUILD_ID is not set -CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y -# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set -# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set -# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set -CT_CC_GCC_LNK_HASH_STYLE="" -CT_CC_GCC_DEC_FLOAT_AUTO=y -# CT_CC_GCC_DEC_FLOAT_BID is not set -# CT_CC_GCC_DEC_FLOAT_DPD is not set -# CT_CC_GCC_DEC_FLOATS_NO is not set -CT_ALL_CC_CHOICES="GCC" - -# -# Additional supported languages: -# CT_CC_LANG_CXX=y -# CT_CC_LANG_FORTRAN is not set - -# -# Debug facilities -# -# CT_DEBUG_DUMA is not set -# CT_DEBUG_GDB is not set -# CT_DEBUG_LTRACE is not set -# CT_DEBUG_STRACE is not set -CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" - -# -# Companion libraries -# -# CT_COMPLIBS_CHECK is not set -# CT_COMP_LIBS_CLOOG is not set -# CT_COMP_LIBS_EXPAT is not set -CT_COMP_LIBS_GETTEXT=y -CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" -CT_GETTEXT_DIR_NAME="gettext" -CT_GETTEXT_PKG_NAME="gettext" -CT_GETTEXT_SRC_RELEASE=y -CT_GETTEXT_PATCH_ORDER="global" CT_GETTEXT_V_0_19_8_1=y -# CT_GETTEXT_NO_VERSIONS is not set -CT_GETTEXT_VERSION="0.19.8.1" -CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" -CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.gz" -CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" -CT_COMP_LIBS_GMP=y -CT_COMP_LIBS_GMP_PKG_KSYM="GMP" -CT_GMP_DIR_NAME="gmp" -CT_GMP_PKG_NAME="gmp" -CT_GMP_SRC_RELEASE=y -CT_GMP_PATCH_ORDER="global" CT_GMP_V_6_1=y -# CT_GMP_NO_VERSIONS is not set -CT_GMP_VERSION="6.1.2" -CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" -CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" -CT_GMP_SIGNATURE_FORMAT="packed/.sig" -CT_GMP_later_than_5_1_0=y -CT_GMP_5_1_0_or_later=y -CT_GMP_later_than_5_0_0=y -CT_GMP_5_0_0_or_later=y -CT_GMP_REQUIRE_5_0_0_or_later=y -CT_COMP_LIBS_ISL=y -CT_COMP_LIBS_ISL_PKG_KSYM="ISL" -CT_ISL_DIR_NAME="isl" -CT_ISL_PKG_NAME="isl" -CT_ISL_SRC_RELEASE=y -CT_ISL_PATCH_ORDER="global" CT_ISL_V_0_20=y -# CT_ISL_V_0_19 is not set -# CT_ISL_V_0_18 is not set -# CT_ISL_V_0_17 is not set -# CT_ISL_V_0_16 is not set -# CT_ISL_V_0_15 is not set -# CT_ISL_NO_VERSIONS is not set -CT_ISL_VERSION="0.20" -CT_ISL_MIRRORS="https://ci-mirrors.rust-lang.org/rustc" -CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_ISL_SIGNATURE_FORMAT="" -CT_ISL_later_than_0_18=y -CT_ISL_0_18_or_later=y -CT_ISL_later_than_0_15=y -CT_ISL_0_15_or_later=y -CT_ISL_REQUIRE_0_15_or_later=y -CT_ISL_later_than_0_14=y -CT_ISL_0_14_or_later=y -CT_ISL_REQUIRE_0_14_or_later=y -CT_ISL_later_than_0_13=y -CT_ISL_0_13_or_later=y -CT_ISL_later_than_0_12=y -CT_ISL_0_12_or_later=y -CT_ISL_REQUIRE_0_12_or_later=y -# CT_COMP_LIBS_LIBELF is not set -CT_COMP_LIBS_LIBICONV=y -CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" -CT_LIBICONV_DIR_NAME="libiconv" -CT_LIBICONV_PKG_NAME="libiconv" -CT_LIBICONV_SRC_RELEASE=y -CT_LIBICONV_PATCH_ORDER="global" CT_LIBICONV_V_1_15=y -# CT_LIBICONV_NO_VERSIONS is not set -CT_LIBICONV_VERSION="1.15" -CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" -CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" -CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" -CT_COMP_LIBS_MPC=y -CT_COMP_LIBS_MPC_PKG_KSYM="MPC" -CT_MPC_DIR_NAME="mpc" -CT_MPC_PKG_NAME="mpc" -CT_MPC_SRC_RELEASE=y -CT_MPC_PATCH_ORDER="global" -CT_MPC_V_1_1=y -# CT_MPC_V_1_0 is not set -# CT_MPC_NO_VERSIONS is not set -CT_MPC_VERSION="1.1.0" -CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" -CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_MPC_ARCHIVE_FORMATS=".tar.gz" -CT_MPC_SIGNATURE_FORMAT="packed/.sig" -CT_MPC_1_1_0_or_later=y -CT_MPC_1_1_0_or_older=y -CT_COMP_LIBS_MPFR=y -CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" -CT_MPFR_DIR_NAME="mpfr" -CT_MPFR_PKG_NAME="mpfr" -CT_MPFR_SRC_RELEASE=y -CT_MPFR_PATCH_ORDER="global" -CT_MPFR_V_4_0=y -# CT_MPFR_V_3_1 is not set -# CT_MPFR_NO_VERSIONS is not set -CT_MPFR_VERSION="4.0.2" -CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" -CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" -CT_MPFR_SIGNATURE_FORMAT="packed/.asc" -CT_MPFR_later_than_4_0_0=y -CT_MPFR_4_0_0_or_later=y -CT_MPFR_later_than_3_0_0=y -CT_MPFR_3_0_0_or_later=y -CT_MPFR_REQUIRE_3_0_0_or_later=y -CT_COMP_LIBS_NCURSES=y -CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" -CT_NCURSES_DIR_NAME="ncurses" -CT_NCURSES_PKG_NAME="ncurses" -CT_NCURSES_SRC_RELEASE=y -CT_NCURSES_PATCH_ORDER="global" CT_NCURSES_V_6_1=y -# CT_NCURSES_V_6_0 is not set -# CT_NCURSES_NO_VERSIONS is not set -CT_NCURSES_VERSION="6.1" -CT_NCURSES_MIRRORS="ftp://invisible-island.net/ncurses $(CT_Mirrors GNU ncurses)" -CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" -CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" -CT_NCURSES_HOST_CONFIG_ARGS="" -CT_NCURSES_HOST_DISABLE_DB=y -CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" -CT_NCURSES_TARGET_CONFIG_ARGS="" -# CT_NCURSES_TARGET_DISABLE_DB is not set -CT_NCURSES_TARGET_FALLBACKS="" -CT_COMP_LIBS_ZLIB=y -CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" -CT_ZLIB_DIR_NAME="zlib" -CT_ZLIB_PKG_NAME="zlib" -CT_ZLIB_SRC_RELEASE=y -CT_ZLIB_PATCH_ORDER="global" -CT_ZLIB_V_1_2_11=y -# CT_ZLIB_NO_VERSIONS is not set -CT_ZLIB_VERSION="1.2.11" -CT_ZLIB_MIRRORS="https://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION}" -CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" -CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP ISL LIBELF LIBICONV MPC MPFR NCURSES ZLIB" -CT_LIBICONV_NEEDED=y -CT_GETTEXT_NEEDED=y -CT_GMP_NEEDED=y -CT_MPFR_NEEDED=y -CT_ISL_NEEDED=y -CT_MPC_NEEDED=y -CT_NCURSES_NEEDED=y -CT_ZLIB_NEEDED=y -CT_LIBICONV=y -CT_GETTEXT=y -CT_GMP=y -CT_MPFR=y -CT_ISL=y -CT_MPC=y -CT_NCURSES=y -CT_ZLIB=y - -# -# Companion tools -# -# CT_COMP_TOOLS_FOR_HOST is not set -# CT_COMP_TOOLS_AUTOCONF is not set -# CT_COMP_TOOLS_AUTOMAKE is not set -# CT_COMP_TOOLS_BISON is not set -# CT_COMP_TOOLS_DTC is not set -# CT_COMP_TOOLS_LIBTOOL is not set -# CT_COMP_TOOLS_M4 is not set -# CT_COMP_TOOLS_MAKE is not set -CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" diff --git a/crosstool-ng/loongarch64-unknown-linux-gnu.config.in b/crosstool-ng/loongarch64-unknown-linux-gnu.config.in new file mode 100644 index 000000000..24d862768 --- /dev/null +++ b/crosstool-ng/loongarch64-unknown-linux-gnu.config.in @@ -0,0 +1,16 @@ +CT_CONFIG_VERSION="4" +CT_EXPERIMENTAL=y +CT_PREFIX_DIR="/x-tools/${CT_TARGET}" +CT_ARCH_LOONGARCH=y +# CT_DEMULTILIB is not set +CT_ARCH_USE_MMU=y +CT_ARCH_ARCH="loongarch64" +CT_KERNEL_LINUX=y +%CT_LINUX_V% +%CT_LINUX% +%CT_GLIBC_V% +%CT_GLIBC% +%CT_GCC_V% +%CT_GCC% +CT_CC_GCC_ENABLE_DEFAULT_PIE=y +CT_CC_LANG_CXX=y diff --git a/deny.toml b/deny.toml index 3f7755317..756088cc5 100644 --- a/deny.toml +++ b/deny.toml @@ -1,4 +1,4 @@ - +[graph] # only check for the targets we currently publish targets = [ { triple = "x86_64-apple-darwin" }, @@ -8,15 +8,19 @@ targets = [ ] [advisories] -vulnerability = "deny" -unmaintained = "deny" -notice = "deny" -unsound = "deny" -ignore = [] +version = 2 +# FIXME: remove this if/when clap changes to is-terminal, atty is +# patched, or we migrated to an MSRV of 1.66.0. +ignore = [ + "RUSTSEC-2021-0145", +] [bans] multiple-versions = "deny" deny = [] +skip-tree = [ + { name = "nix", version = "=0.26.4", depth = 2 }, +] [sources] unknown-registry = "deny" @@ -24,11 +28,9 @@ unknown-git = "deny" allow-git = [] [licenses] +version = 2 # need this since to suppress errors in case we add crates with these allowed licenses unused-allowed-license = "allow" -unlicensed = "deny" -allow-osi-fsf-free = "neither" -copyleft = "deny" confidence-threshold = 0.93 allow = [ "Apache-2.0", @@ -38,7 +40,8 @@ allow = [ "0BSD", "BSD-2-Clause", "BSD-3-Clause", - "Unlicense" + "Unlicense", + "Unicode-DFS-2016", ] [licenses.private] diff --git a/docker/.dockerignore b/docker/.dockerignore new file mode 100644 index 000000000..cc0396c4b --- /dev/null +++ b/docker/.dockerignore @@ -0,0 +1,11 @@ +# don't copy any of the python artifacts to the docker context +__pycache__/ +.pytest_cache/ +*.py[cod] +*$py.class +**/*.egg-info/ +*.egg +.tox + +# also skip our test suite +android/tests/ diff --git a/docker/.gitattributes b/docker/.gitattributes new file mode 100644 index 000000000..fcadb2cf9 --- /dev/null +++ b/docker/.gitattributes @@ -0,0 +1 @@ +* text eol=lf diff --git a/docker/Dockerfile.aarch64-linux-android b/docker/Dockerfile.aarch64-linux-android index ea85d5d51..211b3c6e5 100644 --- a/docker/Dockerfile.aarch64-linux-android +++ b/docker/Dockerfile.aarch64-linux-android @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,31 +10,67 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +COPY qemu.sh / +RUN /qemu.sh aarch64 + +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh arm64 + COPY android-ndk.sh / -RUN /android-ndk.sh arm64 28 +RUN /android-ndk.sh arm64 ENV PATH=$PATH:/android-ndk/bin COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR RUN /android-system.sh arm64 -COPY qemu.sh / -RUN /qemu.sh aarch64 - -RUN cp /android-ndk/sysroot/usr/lib/aarch64-linux-android/28/libz.so /system/lib/ +ENV CROSS_TOOLCHAIN_PREFIX=aarch64-linux-android- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh aarch64 aarch64-linux-android COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT -ENV CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER=aarch64-linux-android-gcc \ - CARGO_TARGET_AARCH64_LINUX_ANDROID_RUNNER="/android-runner aarch64" \ - CC_aarch64_linux_android=aarch64-linux-android-gcc \ - CXX_aarch64_linux_android=aarch64-linux-android-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_aarch64_linux_android="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner aarch64" +ENV CARGO_TARGET_AARCH64_LINUX_ANDROID_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_AARCH64_LINUX_ANDROID_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_aarch64_linux_android="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_aarch64_linux_android=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_aarch64_linux_android="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include"/ \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=aarch64 \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="-DANDROID -ffunction-sections -fdata-sections -fPIC" diff --git a/docker/Dockerfile.aarch64-unknown-freebsd b/docker/Dockerfile.aarch64-unknown-freebsd new file mode 100644 index 000000000..8631717d4 --- /dev/null +++ b/docker/Dockerfile.aarch64-unknown-freebsd @@ -0,0 +1,44 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +RUN echo "export ARCH=aarch64" > /freebsd-arch.sh +COPY freebsd-common.sh / +COPY freebsd.sh / +RUN /freebsd.sh + +COPY freebsd-install.sh / +COPY freebsd-extras.sh / +RUN /freebsd-extras.sh + +ENV CROSS_TOOLCHAIN_PREFIX=aarch64-unknown-freebsd13- +ENV CROSS_SYSROOT=/usr/local/aarch64-unknown-freebsd13 + +COPY freebsd-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY toolchain.cmake /opt/toolchain.cmake + +COPY freebsd-fetch-best-mirror.sh / +COPY freebsd-setup-packagesite.sh / +COPY freebsd-install-package.sh / + +ENV CARGO_TARGET_AARCH64_UNKNOWN_FREEBSD_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + AR_aarch64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_aarch64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_aarch64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_aarch64_unknown_freebsd=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_freebsd="--sysroot=$CROSS_SYSROOT" \ + AARCH64_UNKNOWN_FREEBSD_OPENSSL_DIR="$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=FreeBSD \ + CROSS_CMAKE_SYSTEM_PROCESSOR=amd64 \ + CROSS_CMAKE_CRT=freebsd \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.aarch64-unknown-linux-gnu b/docker/Dockerfile.aarch64-unknown-linux-gnu index d9d823e23..85910e398 100644 --- a/docker/Dockerfile.aarch64-unknown-linux-gnu +++ b/docker/Dockerfile.aarch64-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-aarch64-linux-gnu \ + gfortran-aarch64-linux-gnu \ libc6-dev-arm64-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=arm64 /deny-debian-packages.sh \ + binutils \ + binutils-aarch64-linux-gnu + COPY qemu.sh / RUN /qemu.sh aarch64 softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh aarch64 -COPY linux-runner / - -ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner aarch64" \ - CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ - CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_linux_gnu="--sysroot=/usr/aarch64-linux-gnu" \ - QEMU_LD_PREFIX=/usr/aarch64-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=aarch64-linux-gnu- +ENV CROSS_SYSROOT=/usr/aarch64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner aarch64" +ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_aarch64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=aarch64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.aarch64-unknown-linux-gnu.centos b/docker/Dockerfile.aarch64-unknown-linux-gnu.centos new file mode 100644 index 000000000..5b1c4e2f3 --- /dev/null +++ b/docker/Dockerfile.aarch64-unknown-linux-gnu.centos @@ -0,0 +1,50 @@ +FROM ubuntu:20.04 as base + +COPY lib.sh / +COPY linux-image.sh / +RUN /linux-image.sh aarch64 + +FROM centos:7 + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +COPY qemu.sh / +RUN /qemu.sh aarch64 softmmu + +COPY dropbear.sh / +RUN /dropbear.sh + +COPY --from=0 /qemu /qemu + +COPY linux-runner base-runner.sh / + +COPY aarch64-linux-gnu-glibc.sh / +RUN /aarch64-linux-gnu-glibc.sh + +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=aarch64-linux-gnu- +ENV CROSS_SYSROOT=/usr/aarch64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner aarch64" +ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_aarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_aarch64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=aarch64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.aarch64-unknown-linux-musl b/docker/Dockerfile.aarch64-unknown-linux-musl index cfb7e4d87..810e05272 100644 --- a/docker/Dockerfile.aarch64-unknown-linux-musl +++ b/docker/Dockerfile.aarch64-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,24 +10,37 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh aarch64 COPY musl.sh / RUN /musl.sh TARGET=aarch64-linux-musl -ENV CROSS_MUSL_SYSROOT=/usr/local/aarch64-linux-musl +ENV CROSS_TOOLCHAIN_PREFIX=aarch64-linux-musl- +ENV CROSS_SYSROOT=/usr/local/aarch64-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT aarch64 - -COPY aarch64-linux-musl-gcc.sh /usr/bin/ - -COPY qemu-runner / - -ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER=aarch64-linux-musl-gcc.sh \ - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner aarch64" \ - CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc \ - CXX_aarch64_unknown_linux_musl=aarch64-linux-musl-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT aarch64 + +COPY musl-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner aarch64" +ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_aarch64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_aarch64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_aarch64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_aarch64_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_aarch64_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=aarch64 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" \ + CROSS_BUILTINS_PATCHED_MINOR_VERSION=48 + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.arm-linux-androideabi b/docker/Dockerfile.arm-linux-androideabi index 3faae941d..7e9d7da37 100644 --- a/docker/Dockerfile.arm-linux-androideabi +++ b/docker/Dockerfile.arm-linux-androideabi @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,31 +10,67 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +COPY qemu.sh / +RUN /qemu.sh arm + +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh arm + COPY android-ndk.sh / -RUN /android-ndk.sh arm 28 +RUN /android-ndk.sh arm ENV PATH=$PATH:/android-ndk/bin COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR RUN /android-system.sh arm -COPY qemu.sh / -RUN /qemu.sh arm - -RUN cp /android-ndk/sysroot/usr/lib/arm-linux-androideabi/28/libz.so /system/lib/ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-androideabi- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh arm arm-linux-androideabi COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT -ENV CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER="/android-runner arm" \ - CC_arm_linux_androideabi=arm-linux-androideabi-gcc \ - CXX_arm_linux_androideabi=arm-linux-androideabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_arm_linux_androideabi="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner arm" +ENV CARGO_TARGET_ARM_LINUX_ANDROIDEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARM_LINUX_ANDROIDEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_arm_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_arm_linux_androideabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_arm_linux_androideabi="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include/" \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv5te \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="--target=arm-linux-androideabi -DANDROID -ffunction-sections -fdata-sections -fPIC --target=arm-linux-androideabi" diff --git a/docker/Dockerfile.arm-unknown-linux-gnueabi b/docker/Dockerfile.arm-unknown-linux-gnueabi index 7d6dcd8b5..a6341617a 100644 --- a/docker/Dockerfile.arm-unknown-linux-gnueabi +++ b/docker/Dockerfile.arm-unknown-linux-gnueabi @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,19 +10,41 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-arm-linux-gnueabi \ - libc6-dev-armel-cross && \ - /qemu.sh arm + gfortran-arm-linux-gnueabi \ + libc6-dev-armel-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=armel /deny-debian-packages.sh \ + binutils \ + binutils-arm-linux-gnueabi -COPY qemu-runner / +COPY qemu.sh / +RUN /qemu.sh arm + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake -ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_LINKER=arm-linux-gnueabi-gcc \ - CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_RUNNER="/qemu-runner arm" \ - CC_arm_unknown_linux_gnueabi=arm-linux-gnueabi-gcc \ - CXX_arm_unknown_linux_gnueabi=arm-linux-gnueabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_gnueabi="--sysroot=/usr/arm-linux-gnueabi" \ - QEMU_LD_PREFIX=/usr/arm-linux-gnueabi \ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-gnueabi- +ENV CROSS_SYSROOT=/usr/arm-linux-gnueabi +ENV CROSS_TARGET_RUNNER="/qemu-runner arm" +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_arm_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_arm_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_arm_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_arm_unknown_linux_gnueabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_gnueabi="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv6 -marm -mfloat-abi=soft" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.arm-unknown-linux-gnueabihf b/docker/Dockerfile.arm-unknown-linux-gnueabihf index cc8aa8163..a4eaf1fe1 100644 --- a/docker/Dockerfile.arm-unknown-linux-gnueabihf +++ b/docker/Dockerfile.arm-unknown-linux-gnueabihf @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + ARG VERBOSE COPY crosstool-ng.sh / COPY crosstool-config/arm-unknown-linux-gnueabihf.config / @@ -17,15 +19,32 @@ RUN /crosstool-ng.sh arm-unknown-linux-gnueabihf.config 5 ENV PATH /x-tools/arm-unknown-linux-gnueabihf/bin/:$PATH +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=armhf /deny-debian-packages.sh + COPY qemu.sh / RUN /qemu.sh arm -COPY qemu-runner / - -ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-unknown-linux-gnueabihf-gcc \ - CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_RUNNER="/qemu-runner arm" \ - CC_arm_unknown_linux_gnueabihf=arm-unknown-linux-gnueabihf-gcc \ - CXX_arm_unknown_linux_gnueabihf=arm-unknown-linux-gnueabihf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_gnueabihf="--sysroot=/x-tools/arm-unknown-linux-gnueabihf/arm-unknown-linux-gnueabihf/sysroot/" \ - QEMU_LD_PREFIX=/x-tools/arm-unknown-linux-gnueabihf/arm-unknown-linux-gnueabihf/sysroot/ \ - RUST_TEST_THREADS=1 +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=arm-unknown-linux-gnueabihf- +ENV CROSS_SYSROOT=/x-tools/arm-unknown-linux-gnueabihf/arm-unknown-linux-gnueabihf/sysroot/ +ENV CROSS_TARGET_RUNNER="/qemu-runner armhf" +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARM_UNKNOWN_LINUX_GNUEABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_arm_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_arm_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_arm_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_arm_unknown_linux_gnueabihf=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_gnueabihf="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv6 -marm -mfpu=vfp" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.arm-unknown-linux-musleabi b/docker/Dockerfile.arm-unknown-linux-musleabi index 2a3844be3..e84e44941 100644 --- a/docker/Dockerfile.arm-unknown-linux-musleabi +++ b/docker/Dockerfile.arm-unknown-linux-musleabi @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh arm @@ -20,16 +22,27 @@ RUN /musl.sh \ --with-float=soft \ --with-mode=arm" -ENV CROSS_MUSL_SYSROOT=/usr/local/arm-linux-musleabi +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-musleabi- +ENV CROSS_SYSROOT=/usr/local/arm-linux-musleabi COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT arm - -COPY qemu-runner / - -ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABI_LINKER=arm-linux-musleabi-gcc \ - CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABI_RUNNER="/qemu-runner arm" \ - CC_arm_unknown_linux_musleabi=arm-linux-musleabi-gcc \ - CXX_arm_unknown_linux_musleabi=arm-linux-musleabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_musleabi="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT arm + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner arm" +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_arm_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_arm_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_arm_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_arm_unknown_linux_musleabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_musleabi="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv6 -marm -mfloat-abi=soft" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.arm-unknown-linux-musleabihf b/docker/Dockerfile.arm-unknown-linux-musleabihf index d012d38b7..a677a87b7 100644 --- a/docker/Dockerfile.arm-unknown-linux-musleabihf +++ b/docker/Dockerfile.arm-unknown-linux-musleabihf @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh arm @@ -21,16 +23,27 @@ RUN /musl.sh \ --with-float=hard \ --with-mode=arm" -ENV CROSS_MUSL_SYSROOT=/usr/local/arm-linux-musleabihf +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-musleabihf- +ENV CROSS_SYSROOT=/usr/local/arm-linux-musleabihf COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT armhf - -COPY qemu-runner / - -ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_LINKER=arm-linux-musleabihf-gcc \ - CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_RUNNER="/qemu-runner arm" \ - CC_arm_unknown_linux_musleabihf=arm-linux-musleabihf-gcc \ - CXX_arm_unknown_linux_musleabihf=arm-linux-musleabihf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_musleabihf="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT armhf + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner armhf" +ENV CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARM_UNKNOWN_LINUX_MUSLEABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_arm_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_arm_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_arm_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_arm_unknown_linux_musleabihf=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_arm_unknown_linux_musleabihf="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv6 -marm -mfpu=vfp" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.armv5te-unknown-linux-gnueabi b/docker/Dockerfile.armv5te-unknown-linux-gnueabi index 3c9f6189f..878d9132b 100644 --- a/docker/Dockerfile.armv5te-unknown-linux-gnueabi +++ b/docker/Dockerfile.armv5te-unknown-linux-gnueabi @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,20 +10,42 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-arm-linux-gnueabi \ + gfortran-arm-linux-gnueabi \ crossbuild-essential-armel \ - libc6-dev-armel-cross && \ - /qemu.sh arm + libc6-dev-armel-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=armel /deny-debian-packages.sh \ + binutils \ + binutils-arm-linux-gnueabi -COPY qemu-runner / +COPY qemu.sh / +RUN /qemu.sh arm + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake -ENV CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_GNUEABI_LINKER=arm-linux-gnueabi-gcc \ - CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_GNUEABI_RUNNER="/qemu-runner arm" \ - CC_armv5te_unknown_linux_gnueabi=arm-linux-gnueabi-gcc \ - CXX_armv5te_unknown_linux_gnueabi=arm-linux-gnueabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv5te_unknown_linux_gnueabi="--sysroot=/usr/arm-linux-gnueabi" \ - QEMU_LD_PREFIX=/usr/arm-linux-gnueabi \ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-gnueabi- +ENV CROSS_SYSROOT=/usr/arm-linux-gnueabi +ENV CROSS_TARGET_RUNNER="/qemu-runner arm" +ENV CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_GNUEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_GNUEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv5te_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv5te_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv5te_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv5te_unknown_linux_gnueabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv5te_unknown_linux_gnueabi="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv5te -marm -mfloat-abi=soft" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.armv5te-unknown-linux-musleabi b/docker/Dockerfile.armv5te-unknown-linux-musleabi index a87d3fdb9..e2f773781 100644 --- a/docker/Dockerfile.armv5te-unknown-linux-musleabi +++ b/docker/Dockerfile.armv5te-unknown-linux-musleabi @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh arm @@ -20,16 +22,29 @@ RUN /musl.sh \ --with-float=soft \ --with-mode=arm" -ENV CROSS_MUSL_SYSROOT=/usr/local/arm-linux-musleabi +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-musleabi- +ENV CROSS_SYSROOT=/usr/local/arm-linux-musleabi COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT arm - -COPY qemu-runner / - -ENV CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_MUSLEABI_LINKER=arm-linux-musleabi-gcc \ - CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_MUSLEABI_RUNNER="/qemu-runner arm" \ - CC_armv5te_unknown_linux_musleabi=arm-linux-musleabi-gcc \ - CXX_armv5te_unknown_linux_musleabi=arm-linux-musleabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv5te_unknown_linux_musleabi="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT arm + +COPY musl-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner arm" +ENV CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_MUSLEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + CARGO_TARGET_ARMV5TE_UNKNOWN_LINUX_MUSLEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv5te_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv5te_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv5te_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv5te_unknown_linux_musleabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv5te_unknown_linux_musleabi="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv5te -marm -mfloat-abi=soft" \ + CROSS_BUILTINS_PATCHED_MINOR_VERSION=65 + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.armv7-linux-androideabi b/docker/Dockerfile.armv7-linux-androideabi index 3b2b2760d..0ee304703 100644 --- a/docker/Dockerfile.armv7-linux-androideabi +++ b/docker/Dockerfile.armv7-linux-androideabi @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,31 +10,67 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +COPY qemu.sh / +RUN /qemu.sh arm + +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh arm + COPY android-ndk.sh / -RUN /android-ndk.sh arm 28 +RUN /android-ndk.sh arm ENV PATH=$PATH:/android-ndk/bin COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR RUN /android-system.sh arm -COPY qemu.sh / -RUN /qemu.sh arm - -RUN cp /android-ndk/sysroot/usr/lib/arm-linux-androideabi/28/libz.so /system/lib/ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-androideabi- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh arm arm-linux-androideabi COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT -ENV CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_RUNNER="/android-runner arm" \ - CC_armv7_linux_androideabi=arm-linux-androideabi-gcc \ - CXX_armv7_linux_androideabi=arm-linux-androideabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv7_linux_androideabi="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner arm" +ENV CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV7_LINUX_ANDROIDEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_armv7_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_armv7_linux_androideabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv7_linux_androideabi="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include/" \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv7-a \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="--target=arm-linux-androideabi -DANDROID -ffunction-sections -fdata-sections -fPIC --target=armv7-linux-androideabi" diff --git a/docker/Dockerfile.armv7-unknown-linux-gnueabi b/docker/Dockerfile.armv7-unknown-linux-gnueabi index c8bffaae0..c1201c759 100644 --- a/docker/Dockerfile.armv7-unknown-linux-gnueabi +++ b/docker/Dockerfile.armv7-unknown-linux-gnueabi @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,20 +10,36 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get install --assume-yes --no-install-recommends \ g++-arm-linux-gnueabi \ + gfortran-arm-linux-gnueabi \ libc6-dev-armel-cross COPY qemu.sh / RUN /qemu.sh arm -COPY qemu-runner / +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake -ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABI_LINKER=arm-linux-gnueabi-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABI_RUNNER="/qemu-runner armv7" \ - CC_armv7_unknown_linux_gnueabi=arm-linux-gnueabi-gcc \ - CXX_armv7_unknown_linux_gnueabi=arm-linux-gnueabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_gnueabi="--sysroot=/usr/arm-linux-gnueabi" \ - QEMU_LD_PREFIX=/usr/arm-linux-gnueabi \ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-gnueabi- +ENV CROSS_SYSROOT=/usr/arm-linux-gnueabi +ENV CROSS_TARGET_RUNNER="/qemu-runner armv7" +ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv7_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv7_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv7_unknown_linux_gnueabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv7_unknown_linux_gnueabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_gnueabi="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabi/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv7-a" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.armv7-unknown-linux-gnueabihf b/docker/Dockerfile.armv7-unknown-linux-gnueabihf index 8e9600805..37fac999c 100644 --- a/docker/Dockerfile.armv7-unknown-linux-gnueabihf +++ b/docker/Dockerfile.armv7-unknown-linux-gnueabihf @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-arm-linux-gnueabihf \ + gfortran-arm-linux-gnueabihf \ libc6-dev-armhf-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=armhf /deny-debian-packages.sh \ + binutils \ + binutils-arm-linux-gnueabihf + COPY qemu.sh / RUN /qemu.sh arm softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh armv7 -COPY linux-runner / - -ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER="/linux-runner armv7" \ - CC_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ - CXX_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_gnueabihf="--sysroot=/usr/arm-linux-gnueabihf" \ - QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-gnueabihf- +ENV CROSS_SYSROOT=/usr/arm-linux-gnueabihf +ENV CROSS_TARGET_RUNNER="/linux-runner armv7hf" +ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv7_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv7_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv7_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv7_unknown_linux_gnueabihf=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_gnueabihf="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv7-a -mfpu=vfpv3-d16" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.armv7-unknown-linux-musleabi b/docker/Dockerfile.armv7-unknown-linux-musleabi index ab1a965a4..e4496c326 100644 --- a/docker/Dockerfile.armv7-unknown-linux-musleabi +++ b/docker/Dockerfile.armv7-unknown-linux-musleabi @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh arm @@ -21,16 +23,27 @@ RUN /musl.sh \ --with-mode=thumb \ --with-mode=arm" -ENV CROSS_MUSL_SYSROOT=/usr/local/arm-linux-musleabi +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-musleabi- +ENV CROSS_SYSROOT=/usr/local/arm-linux-musleabi COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT arm - -COPY qemu-runner / - -ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABI_LINKER=arm-linux-musleabi-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABI_RUNNER="/qemu-runner armv7" \ - CC_armv7_unknown_linux_musleabi=arm-linux-musleabi-gcc \ - CXX_armv7_unknown_linux_musleabi=arm-linux-musleabi-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_musleabi="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT arm + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner armv7" +ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv7_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv7_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv7_unknown_linux_musleabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv7_unknown_linux_musleabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_musleabi="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv7-a" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.armv7-unknown-linux-musleabihf b/docker/Dockerfile.armv7-unknown-linux-musleabihf index 3742465d4..e3c7aab63 100644 --- a/docker/Dockerfile.armv7-unknown-linux-musleabihf +++ b/docker/Dockerfile.armv7-unknown-linux-musleabihf @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh arm @@ -21,16 +23,27 @@ RUN /musl.sh \ --with-mode=thumb \ --with-fpu=vfp" -ENV CROSS_MUSL_SYSROOT=/usr/local/arm-linux-musleabihf +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-musleabihf- +ENV CROSS_SYSROOT=/usr/local/arm-linux-musleabihf COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT armhf - -COPY qemu-runner / - -ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER=arm-linux-musleabihf-gcc \ - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_RUNNER="/qemu-runner armv7" \ - CC_armv7_unknown_linux_musleabihf=arm-linux-musleabihf-gcc \ - CXX_armv7_unknown_linux_musleabihf=arm-linux-musleabihf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_musleabihf="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT armhf + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner armv7hf" +ENV CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_MUSLEABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_armv7_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_armv7_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_armv7_unknown_linux_musleabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_armv7_unknown_linux_musleabihf=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_armv7_unknown_linux_musleabihf="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv7-a -mfpu=vfpv3-d16" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.asmjs-unknown-emscripten b/docker/Dockerfile.asmjs-unknown-emscripten index 31781fcb5..2b0a725d5 100644 --- a/docker/Dockerfile.asmjs-unknown-emscripten +++ b/docker/Dockerfile.asmjs-unknown-emscripten @@ -1,6 +1,6 @@ FROM emscripten/emsdk:3.1.14 WORKDIR / -ARG DEBIAN_FRONTEND=noninteractive +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -11,9 +11,13 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -RUN apt-get update && apt-get update && apt-get install --assume-yes --no-install-recommends \ - libxml2 \ - python +RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ + libxml2 \ + python -ENV CARGO_TARGET_ASMJS_UNKNOWN_EMSCRIPTEN_RUNNER=node \ - BINDGEN_EXTRA_CLANG_ARGS_asmjs_unknown_emscripten="--sysroot=/emsdk/upstream/emscripten/cache/sysroot" +ENV CROSS_TOOLCHAIN_PREFIX=em +ENV CROSS_SYSROOT=/emsdk/upstream/emscripten/cache/sysroot +ENV CROSS_TARGET_RUNNER="node" +ENV CARGO_TARGET_ASMJS_UNKNOWN_EMSCRIPTEN_RUNNER="$CROSS_TARGET_RUNNER" \ + BINDGEN_EXTRA_CLANG_ARGS_asmjs_unknown_emscripten="--sysroot=$CROSS_SYSROOT" \ + CMAKE_TOOLCHAIN_FILE_asmjs_unknown_emscripten=/emsdk/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake diff --git a/docker/Dockerfile.cross b/docker/Dockerfile.cross index bb0fd55d7..ab2389afd 100644 --- a/docker/Dockerfile.cross +++ b/docker/Dockerfile.cross @@ -1,12 +1,12 @@ FROM ubuntu:20.04 as rust -ARG DEBIAN_FRONTEND=noninteractive +ENV DEBIAN_FRONTEND=noninteractive COPY docker/lib.sh docker/cross.sh / COPY ./ /project RUN /cross.sh /project # we build our images in 2 steps, to ensure we have a compact # image, since we want to add our current subdirectory -FROM ubuntu:20.04 +FROM ubuntu:20.04 as base COPY --from=rust /root/.cargo /root/.cargo COPY --from=rust /root/.rustup /root/.rustup diff --git a/docker/Dockerfile.i586-unknown-linux-gnu b/docker/Dockerfile.i586-unknown-linux-gnu index 1e5610f28..b003d0d85 100644 --- a/docker/Dockerfile.i586-unknown-linux-gnu +++ b/docker/Dockerfile.i586-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,13 +10,41 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ - g++-multilib + g++-i686-linux-gnu \ + gfortran-i686-linux-gnu \ + libc6-dev-i386-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=i386 /deny-debian-packages.sh \ + binutils \ + binutils-i686-linux-gnu COPY qemu.sh / RUN /qemu.sh i386 -COPY qemu-runner / +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=i686-linux-gnu- +ENV CROSS_SYSROOT=/usr/i686-linux-gnu +ENV CROSS_TARGET_RUNNER="/qemu-runner i586" +ENV CARGO_TARGET_I586_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_I586_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i586_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i586_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i586_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_i586_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i586_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m32 -march=pentium" -ENV CARGO_TARGET_I586_UNKNOWN_LINUX_GNU_RUNNER="/qemu-runner i586" \ - PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.i586-unknown-linux-musl b/docker/Dockerfile.i586-unknown-linux-musl index ade3dd4d3..7be51b245 100644 --- a/docker/Dockerfile.i586-unknown-linux-musl +++ b/docker/Dockerfile.i586-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,21 +10,34 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY musl.sh / RUN /musl.sh TARGET=i586-linux-musl COPY qemu.sh / RUN /qemu.sh i386 -ENV CROSS_MUSL_SYSROOT=/usr/local/i586-linux-musl +ENV CROSS_TOOLCHAIN_PREFIX=i586-linux-musl- +ENV CROSS_SYSROOT=/usr/local/i586-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT i386 - -COPY qemu-runner / - -ENV CARGO_TARGET_I586_UNKNOWN_LINUX_MUSL_LINKER=i586-linux-musl-gcc \ - CARGO_TARGET_I586_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner i586" \ - CC_i586_unknown_linux_musl=i586-linux-musl-gcc \ - CXX_i586_unknown_linux_musl=i586-linux-musl-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_i586_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT +RUN /musl-symlink.sh $CROSS_SYSROOT i386 + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner i586" +ENV CARGO_TARGET_I586_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_I586_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i586_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i586_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i586_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_i586_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i586_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m32 -march=pentium -Wl,-melf_i386" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.i686-linux-android b/docker/Dockerfile.i686-linux-android index 14e987e02..411780b16 100644 --- a/docker/Dockerfile.i686-linux-android +++ b/docker/Dockerfile.i686-linux-android @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,12 +10,7 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY android-ndk.sh / -RUN /android-ndk.sh x86 28 -ENV PATH=$PATH:/android-ndk/bin - -COPY android-system.sh / -RUN /android-system.sh x86 +FROM cross-base as build # We could supposedly directly run i686 binaries like we do for x86_64, but # doing so generates an assertion failure: @@ -23,26 +18,68 @@ RUN /android-system.sh x86 # ... src/libstd/sys/unix/mod.rs # fatal runtime error: failed to initiate panic, error 5 # -# Running with qemu works as expected +# Running with qemu works as expected. it also ensures that're we're +# running on a CPU that only supports 32-bit x86 systems. COPY qemu.sh / RUN /qemu.sh i386 -RUN cp /android-ndk/sysroot/usr/lib/i686-linux-android/28/libz.so /system/lib/ +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh x86 + +COPY android-ndk.sh / +RUN /android-ndk.sh x86 +ENV PATH=$PATH:/android-ndk/bin + +COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR +RUN /android-system.sh x86 + +ENV CROSS_TOOLCHAIN_PREFIX=i686-linux-android- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh i386 i686-linux-android COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT -ENV CARGO_TARGET_I686_LINUX_ANDROID_LINKER=i686-linux-android-gcc \ - CARGO_TARGET_I686_LINUX_ANDROID_RUNNER="/android-runner i686" \ - CC_i686_linux_android=i686-linux-android-gcc \ - CXX_i686_linux_android=i686-linux-android-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_i686_linux_android="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner i686" +ENV CARGO_TARGET_I686_LINUX_ANDROID_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_I686_LINUX_ANDROID_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_i686_linux_android="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_i686_linux_android=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i686_linux_android="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include/" \ LIBZ_SYS_STATIC=1 \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=i686 \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="--target=i686-linux-android -DANDROID -ffunction-sections -fdata-sections -fPIC --target=i686-linux-android" diff --git a/docker/Dockerfile.i686-pc-windows-gnu b/docker/Dockerfile.i686-pc-windows-gnu index 761f48d96..f6d9ed517 100644 --- a/docker/Dockerfile.i686-pc-windows-gnu +++ b/docker/Dockerfile.i686-pc-windows-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,8 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN dpkg --add-architecture i386 && apt-get update && \ apt-get install --assume-yes --no-install-recommends libz-mingw-w64-dev @@ -34,8 +36,22 @@ RUN mkdir -p /usr/lib/binfmt-support/ && \ COPY windows-entry.sh / ENTRYPOINT ["/windows-entry.sh"] -ENV CARGO_TARGET_I686_PC_WINDOWS_GNU_LINKER=i686-w64-mingw32-gcc \ - CARGO_TARGET_I686_PC_WINDOWS_GNU_RUNNER=wine \ - CC_i686_pc_windows_gnu=i686-w64-mingw32-gcc-posix \ - CXX_i686_pc_windows_gnu=i686-w64-mingw32-g++-posix \ - BINDGEN_EXTRA_CLANG_ARGS_i686_pc_windows_gnu="--sysroot=/usr/i686-w64-mingw32" +COPY toolchain.cmake /opt/toolchain.cmake + +# for why we always link with pthread support, see: +# https://github.com/cross-rs/cross/pull/1123#issuecomment-1312287148 +ENV CROSS_TOOLCHAIN_PREFIX=i686-w64-mingw32- +ENV CROSS_TOOLCHAIN_SUFFIX=-posix +ENV CROSS_SYSROOT=/usr/i686-w64-mingw32 +ENV CROSS_TARGET_RUNNER="env -u CARGO_TARGET_I686_PC_WINDOWS_GNU_RUNNER wine" +ENV CARGO_TARGET_I686_PC_WINDOWS_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc"$CROSS_TOOLCHAIN_SUFFIX" \ + CARGO_TARGET_I686_PC_WINDOWS_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i686_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i686_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc"$CROSS_TOOLCHAIN_SUFFIX" \ + CXX_i686_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"g++"$CROSS_TOOLCHAIN_SUFFIX" \ + CMAKE_TOOLCHAIN_FILE_i686_pc_windows_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i686_pc_windows_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + CROSS_CMAKE_SYSTEM_NAME=Windows \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -m32" diff --git a/docker/Dockerfile.i686-unknown-freebsd b/docker/Dockerfile.i686-unknown-freebsd index af4600a9c..dce7ba815 100644 --- a/docker/Dockerfile.i686-unknown-freebsd +++ b/docker/Dockerfile.i686-unknown-freebsd @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,15 +10,35 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +RUN echo "export ARCH=i686" > /freebsd-arch.sh COPY freebsd-common.sh / COPY freebsd.sh / -RUN /freebsd.sh i686 +RUN /freebsd.sh +COPY freebsd-install.sh / COPY freebsd-extras.sh / -RUN /freebsd-extras.sh i686 +RUN /freebsd-extras.sh + +ENV CROSS_TOOLCHAIN_PREFIX=i686-unknown-freebsd13- +ENV CROSS_SYSROOT=/usr/local/i686-unknown-freebsd13 + +COPY freebsd-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY toolchain.cmake /opt/toolchain.cmake + +COPY freebsd-fetch-best-mirror.sh / +COPY freebsd-setup-packagesite.sh / +COPY freebsd-install-package.sh / -ENV CARGO_TARGET_I686_UNKNOWN_FREEBSD_LINKER=i686-unknown-freebsd12-gcc \ - CC_i686_unknown_freebsd=i686-unknown-freebsd12-gcc \ - CXX_i686_unknown_freebsd=i686-unknown-freebsd12-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_i686_unknown_freebsd="--sysroot=/usr/local/i686-unknown-freebsd12" \ - I686_UNKNOWN_FREEBSD_OPENSSL_DIR=/usr/local/i686-unknown-freebsd12/ +ENV CARGO_TARGET_I686_UNKNOWN_FREEBSD_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + AR_i686_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i686_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i686_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_i686_unknown_freebsd=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i686_unknown_freebsd="--sysroot=$CROSS_SYSROOT" \ + I686_UNKNOWN_FREEBSD_OPENSSL_DIR="$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=FreeBSD \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=freebsd \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m32" diff --git a/docker/Dockerfile.i686-unknown-linux-gnu b/docker/Dockerfile.i686-unknown-linux-gnu index a9cd692a8..ff634f6b6 100644 --- a/docker/Dockerfile.i686-unknown-linux-gnu +++ b/docker/Dockerfile.i686-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,8 +10,17 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ - g++-multilib + g++-i686-linux-gnu \ + gfortran-i686-linux-gnu \ + libc6-dev-i386-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=i386 /deny-debian-packages.sh \ + binutils \ + binutils-i686-linux-gnu COPY qemu.sh / RUN /qemu.sh i386 softmmu @@ -22,7 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh i686 -COPY linux-runner / - -ENV CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner i686" \ - PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=i686-linux-gnu- +ENV CROSS_SYSROOT=/usr/i686-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner i686" +ENV CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i686_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i686_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i686_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_i686_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i686_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + PKG_CONFIG_PATH="/usr/lib/i386-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m32 -march=i686" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.i686-unknown-linux-musl b/docker/Dockerfile.i686-unknown-linux-musl index 05821dff2..d4eb3f646 100644 --- a/docker/Dockerfile.i686-unknown-linux-musl +++ b/docker/Dockerfile.i686-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,21 +10,34 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY musl.sh / RUN /musl.sh TARGET=i686-linux-musl COPY qemu.sh / RUN /qemu.sh i386 -ENV CROSS_MUSL_SYSROOT=/usr/local/i686-linux-musl +ENV CROSS_TOOLCHAIN_PREFIX=i686-linux-musl- +ENV CROSS_SYSROOT=/usr/local/i686-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT i386 - -COPY qemu-runner / - -ENV CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_LINKER=i686-linux-musl-gcc \ - CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner i686" \ - CC_i686_unknown_linux_musl=i686-linux-musl-gcc \ - CXX_i686_unknown_linux_musl=i686-linux-musl-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_i686_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT +RUN /musl-symlink.sh $CROSS_SYSROOT i386 + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner i686" +ENV CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_I686_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_i686_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_i686_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_i686_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_i686_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_i686_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m32 -march=i686 -Wl,-melf_i386" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.loongarch64-unknown-linux-gnu b/docker/Dockerfile.loongarch64-unknown-linux-gnu new file mode 100644 index 000000000..58340b4ae --- /dev/null +++ b/docker/Dockerfile.loongarch64-unknown-linux-gnu @@ -0,0 +1,49 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +ARG VERBOSE +COPY crosstool-ng.sh / +COPY crosstool-config/loongarch64-unknown-linux-gnu.config / +RUN /crosstool-ng.sh loongarch64-unknown-linux-gnu.config 5 + +ENV PATH /x-tools/loongarch64-unknown-linux-gnu/bin/:$PATH + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=loong64 /deny-debian-packages.sh + +COPY qemu.sh / +RUN /qemu.sh loongarch64 + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=loongarch64-unknown-linux-gnu- +ENV CROSS_SYSROOT=/x-tools/loongarch64-unknown-linux-gnu/loongarch64-unknown-linux-gnu/sysroot/ +ENV CROSS_TARGET_RUNNER="/qemu-runner loongarch64" +ENV CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_LOONGARCH64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_loongarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_loongarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_loongarch64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_loongarch64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_loongarch64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=loongarch64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN mv $CROSS_SYSROOT/lib/* $CROSS_SYSROOT/lib64/ +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mips-unknown-linux-gnu b/docker/Dockerfile.mips-unknown-linux-gnu index 8f03a28d7..85ad3917c 100644 --- a/docker/Dockerfile.mips-unknown-linux-gnu +++ b/docker/Dockerfile.mips-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,26 +10,36 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get install --assume-yes --no-install-recommends \ g++-mips-linux-gnu \ + gfortran-mips-linux-gnu \ libc6-dev-mips-cross COPY qemu.sh / -RUN /qemu.sh mips softmmu - -COPY dropbear.sh / -RUN /dropbear.sh - -COPY linux-image.sh / -RUN /linux-image.sh mips - -COPY linux-runner / - -ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER=mips-linux-gnu-gcc \ - CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner mips" \ - CC_mips_unknown_linux_gnu=mips-linux-gnu-gcc \ - CXX_mips_unknown_linux_gnu=mips-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips_unknown_linux_gnu="--sysroot=/usr/mips-linux-gnu" \ - QEMU_LD_PREFIX=/usr/mips-linux-gnu \ +RUN /qemu.sh mips + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=mips-linux-gnu- +ENV CROSS_SYSROOT=/usr/mips-linux-gnu +ENV CROSS_TARGET_RUNNER="/qemu-runner mips" +ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPS_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/mips-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/mips-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mips-unknown-linux-musl b/docker/Dockerfile.mips-unknown-linux-musl index 4ba198442..ce5f3c02c 100644 --- a/docker/Dockerfile.mips-unknown-linux-musl +++ b/docker/Dockerfile.mips-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,24 +10,39 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh mips +# this is a soft-float target for the mips32r2 architecture +# https://github.com/rust-lang/rust/blob/75d3027fb5ce1af6712e4503c9574802212101bd/compiler/rustc_target/src/spec/mips_unknown_linux_musl.rs#L7 COPY musl.sh / RUN /musl.sh \ TARGET=mips-linux-muslsf \ "COMMON_CONFIG += -with-arch=mips32r2" -ENV CROSS_MUSL_SYSROOT=/usr/local/mips-linux-muslsf +ENV CROSS_TOOLCHAIN_PREFIX=mips-linux-muslsf- +ENV CROSS_SYSROOT=/usr/local/mips-linux-muslsf COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT mips-sf - -COPY qemu-runner / - -ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER=mips-linux-muslsf-gcc \ - CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner mips" \ - CC_mips_unknown_linux_musl=mips-linux-muslsf-gcc \ - CXX_mips_unknown_linux_musl=mips-linux-muslsf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT mips-sf + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner mips" +ENV CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPS_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mips64-unknown-linux-gnuabi64 b/docker/Dockerfile.mips64-unknown-linux-gnuabi64 index 4b95382ef..aff54da06 100644 --- a/docker/Dockerfile.mips64-unknown-linux-gnuabi64 +++ b/docker/Dockerfile.mips64-unknown-linux-gnuabi64 @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,19 +10,41 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-mips64-linux-gnuabi64 \ - libc6-dev-mips64-cross && \ - /qemu.sh mips64 + gfortran-mips64-linux-gnuabi64 \ + libc6-dev-mips64-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=mips64 /deny-debian-packages.sh \ + binutils \ + binutils-mips64-linux-gnuabi64 -COPY qemu-runner / +COPY qemu.sh / +RUN /qemu.sh mips64 + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake -ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER=mips64-linux-gnuabi64-gcc \ - CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_RUNNER="/qemu-runner mips64" \ - CC_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-gcc \ - CXX_mips64_unknown_linux_gnuabi64=mips64-linux-gnuabi64-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips64_unknown_linux_gnuabi64="--sysroot=/usr/mips64-linux-gnuabi64" \ - QEMU_LD_PREFIX=/usr/mips64-linux-gnuabi64 \ +ENV CROSS_TOOLCHAIN_PREFIX=mips64-linux-gnuabi64- +ENV CROSS_SYSROOT=/usr/mips64-linux-gnuabi64 +ENV CROSS_TARGET_RUNNER="/qemu-runner mips64" +ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPS64_UNKNOWN_LINUX_GNUABI64_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips64_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips64_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips64_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips64_unknown_linux_gnuabi64=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips64_unknown_linux_gnuabi64="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/mips64-linux-gnuabi64/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/mips64-linux-gnuabi64/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mips64-unknown-linux-muslabi64 b/docker/Dockerfile.mips64-unknown-linux-muslabi64 index 76f0c2001..db764f1c8 100644 --- a/docker/Dockerfile.mips64-unknown-linux-muslabi64 +++ b/docker/Dockerfile.mips64-unknown-linux-muslabi64 @@ -1,4 +1,5 @@ -FROM ubuntu:18.04 +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -9,24 +10,45 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh mips64 +# this is a hard-float target for the mips64r2 architecture +# https://github.com/rust-lang/rust/blob/75d3027fb5ce1af6712e4503c9574802212101bd/compiler/rustc_target/src/spec/mips64_unknown_linux_muslabi64.rs#L7 COPY musl.sh / RUN /musl.sh \ - TARGET=mips64-linux-muslsf \ + TARGET=mips64-linux-musl \ "COMMON_CONFIG += -with-arch=mips64r2" -ENV CROSS_MUSL_SYSROOT=/usr/local/mips64-linux-muslsf +ENV CROSS_TOOLCHAIN_PREFIX=mips64-linux-musl- +ENV CROSS_SYSROOT=/usr/local/mips64-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT mips64-sf - -COPY qemu-runner / - -ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_MUSLABI64_LINKER=mips64-linux-muslsf-gcc \ - CARGO_TARGET_MIPS64_UNKNOWN_LINUX_MUSLABI64_RUNNER="/qemu-runner mips64" \ - CC_mips64_unknown_linux_muslabi64=mips64-linux-muslsf-gcc \ - CXX_mips64_unknown_linux_muslabi64=mips64-linux-muslsf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips64_unknown_linux_muslabi64="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT mips64 +RUN mkdir -p $CROSS_SYSROOT/usr/lib64 +# needed for the C/C++ runners +RUN ln -s $CROSS_SYSROOT/usr/lib/libc.so $CROSS_SYSROOT/usr/lib64/libc.so +RUN ln -s $CROSS_SYSROOT/usr/lib/libc.so.1 $CROSS_SYSROOT/usr/lib64/libc.so.1 + +COPY musl-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner mips64" +ENV CARGO_TARGET_MIPS64_UNKNOWN_LINUX_MUSLABI64_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + CARGO_TARGET_MIPS64_UNKNOWN_LINUX_MUSLABI64_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips64_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips64_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips64_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips64_unknown_linux_muslabi64=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips64_unknown_linux_muslabi64="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips64 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" \ + CROSS_BUILTINS_PATCHED_MINOR_VERSION=65 + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mips64el-unknown-linux-gnuabi64 b/docker/Dockerfile.mips64el-unknown-linux-gnuabi64 index 03d436a8d..2e86f1a62 100644 --- a/docker/Dockerfile.mips64el-unknown-linux-gnuabi64 +++ b/docker/Dockerfile.mips64el-unknown-linux-gnuabi64 @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-mips64el-linux-gnuabi64 \ + gfortran-mips64el-linux-gnuabi64 \ libc6-dev-mips64el-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=mips64el /deny-debian-packages.sh \ + binutils \ + binutils-mips64el-linux-gnuabi64 + COPY qemu.sh / RUN /qemu.sh mips64el softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh mips64el -COPY linux-runner / - -ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_LINKER=mips64el-linux-gnuabi64-gcc \ - CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_RUNNER="/linux-runner mips64el" \ - CC_mips64el_unknown_linux_gnuabi64=mips64el-linux-gnuabi64-gcc \ - CXX_mips64el_unknown_linux_gnuabi64=mips64el-linux-gnuabi64-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips64el_unknown_linux_gnuabi64="--sysroot=/usr/mips64el-linux-gnuabi64" \ - QEMU_LD_PREFIX=/usr/mips64el-linux-gnuabi64 \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=mips64el-linux-gnuabi64- +ENV CROSS_SYSROOT=/usr/mips64el-linux-gnuabi64 +ENV CROSS_TARGET_RUNNER="/linux-runner mips64el" +ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_GNUABI64_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips64el_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips64el_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips64el_unknown_linux_gnuabi64="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips64el_unknown_linux_gnuabi64=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips64el_unknown_linux_gnuabi64="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/mips64el-linux-gnuabi64/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/mips64el-linux-gnuabi64/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.mips64el-unknown-linux-muslabi64 b/docker/Dockerfile.mips64el-unknown-linux-muslabi64 index a65d9f39a..93d466e7b 100644 --- a/docker/Dockerfile.mips64el-unknown-linux-muslabi64 +++ b/docker/Dockerfile.mips64el-unknown-linux-muslabi64 @@ -1,4 +1,5 @@ -FROM ubuntu:18.04 +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -9,24 +10,45 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh mips64el +# this is a hard-float target for the mips64r2 architecture +# https://github.com/rust-lang/rust/blob/75d3027fb5ce1af6712e4503c9574802212101bd/compiler/rustc_target/src/spec/mips64el_unknown_linux_muslabi64.rs#L6 COPY musl.sh / RUN /musl.sh \ - TARGET=mips64el-linux-muslsf \ - "COMMON_CONFIG += -with-arch=mips64" + TARGET=mips64el-linux-musl \ + "COMMON_CONFIG += -with-arch=mips64r2" -ENV CROSS_MUSL_SYSROOT=/usr/local/mips64el-linux-muslsf +ENV CROSS_TOOLCHAIN_PREFIX=mips64el-linux-musl- +ENV CROSS_SYSROOT=/usr/local/mips64el-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT mips64el-sf - -COPY qemu-runner / - -ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_MUSLABI64_LINKER=mips64el-linux-muslsf-gcc \ - CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_MUSLABI64_RUNNER="/qemu-runner mips64el" \ - CC_mips64el_unknown_linux_muslabi64=mips64el-linux-muslsf-gcc \ - CXX_mips64el_unknown_linux_muslabi64=mips64el-linux-muslsf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mips64el_unknown_linux_muslabi64="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT mips64el +RUN mkdir -p $CROSS_SYSROOT/usr/lib64 +# needed for the C/C++ runners +RUN ln -s $CROSS_SYSROOT/usr/lib/libc.so $CROSS_SYSROOT/usr/lib64/libc.so +RUN ln -s $CROSS_SYSROOT/usr/lib/libc.so.1 $CROSS_SYSROOT/usr/lib64/libc.so.1 + +COPY musl-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner mips64el" +ENV CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_MUSLABI64_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + CARGO_TARGET_MIPS64EL_UNKNOWN_LINUX_MUSLABI64_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mips64el_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mips64el_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mips64el_unknown_linux_muslabi64="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mips64el_unknown_linux_muslabi64=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mips64el_unknown_linux_muslabi64="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips64 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" \ + CROSS_BUILTINS_PATCHED_MINOR_VERSION=65 + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.mipsel-unknown-linux-gnu b/docker/Dockerfile.mipsel-unknown-linux-gnu index 6322411f1..39672a862 100644 --- a/docker/Dockerfile.mipsel-unknown-linux-gnu +++ b/docker/Dockerfile.mipsel-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-mipsel-linux-gnu \ + gfortran-mipsel-linux-gnu \ libc6-dev-mipsel-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=mipsel /deny-debian-packages.sh \ + binutils \ + binutils-mipsel-linux-gnu + COPY qemu.sh / RUN /qemu.sh mipsel softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh mipsel -COPY linux-runner / - -ENV CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_LINKER=mipsel-linux-gnu-gcc \ - CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner mipsel" \ - CC_mipsel_unknown_linux_gnu=mipsel-linux-gnu-gcc \ - CXX_mipsel_unknown_linux_gnu=mipsel-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mipsel_unknown_linux_gnu="--sysroot=/usr/mipsel-linux-gnu" \ - QEMU_LD_PREFIX=/usr/mipsel-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=mipsel-linux-gnu- +ENV CROSS_SYSROOT=/usr/mipsel-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner mipsel" +ENV CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mipsel_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mipsel_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mipsel_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mipsel_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mipsel_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/mipsel-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/mipsel-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.mipsel-unknown-linux-musl b/docker/Dockerfile.mipsel-unknown-linux-musl index 864676215..ba02e65a8 100644 --- a/docker/Dockerfile.mipsel-unknown-linux-musl +++ b/docker/Dockerfile.mipsel-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,24 +10,39 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY qemu.sh / RUN /qemu.sh mipsel +# this is a soft-float target for the mips32r2 architecture +# https://github.com/rust-lang/rust/blob/75d3027fb5ce1af6712e4503c9574802212101bd/compiler/rustc_target/src/spec/mipsel_unknown_linux_musl.rs#L6 COPY musl.sh / RUN /musl.sh \ TARGET=mipsel-linux-muslsf \ - "COMMON_CONFIG += -with-arch=mips32" + "COMMON_CONFIG += -with-arch=mips32r2" -ENV CROSS_MUSL_SYSROOT=/usr/local/mipsel-linux-muslsf +ENV CROSS_TOOLCHAIN_PREFIX=mipsel-linux-muslsf- +ENV CROSS_SYSROOT=/usr/local/mipsel-linux-muslsf COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT mipsel-sf - -COPY qemu-runner / - -ENV CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER=mipsel-linux-muslsf-gcc \ - CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner mipsel" \ - CC_mipsel_unknown_linux_musl=mipsel-linux-muslsf-gcc \ - CXX_mipsel_unknown_linux_musl=mipsel-linux-muslsf-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_mipsel_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT \ - RUST_TEST_THREADS=1 +RUN /musl-symlink.sh $CROSS_SYSROOT mipsel-sf + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner mipsel" +ENV CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_MIPSEL_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_mipsel_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_mipsel_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_mipsel_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_mipsel_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_mipsel_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=mips \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.native b/docker/Dockerfile.native new file mode 100644 index 000000000..56594460a --- /dev/null +++ b/docker/Dockerfile.native @@ -0,0 +1,38 @@ +# This dockerfile is used when the target matches the images platform in `build-docker-image` +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +ARG TARGETARCH +ARG TARGETVARIANT +ARG CROSS_TARGET_TRIPLE + +COPY qemu.sh native-qemu.sh / +RUN /native-qemu.sh + +COPY dropbear.sh / +RUN /dropbear.sh + +COPY linux-image.sh native-linux-image.sh / +RUN /native-linux-image.sh + +COPY linux-runner native-linux-runner base-runner.sh / + +ENV CROSS_TARGETARCH=$TARGETARCH +ENV CROSS_TARGETVARIANT=$TARGETVARIANT +ENV CROSS_TOOLCHAIN_PREFIX= +ENV CROSS_SYSROOT=/ +ENV CARGO_TARGET_${CROSS_TARGET_TRIPLE}_RUNNER="/native-linux-runner" + +RUN sed -e "s#@DEFAULT_CROSS_TARGETARCH@#$CROSS_TARGETARCH#g" -i /native-linux-runner +RUN sed -e "s#@DEFAULT_CROSS_TARGETVARIANT@#$CROSS_TARGETVARIANT#g" -i /native-linux-runner diff --git a/docker/Dockerfile.native.centos b/docker/Dockerfile.native.centos new file mode 100644 index 000000000..ff80e9215 --- /dev/null +++ b/docker/Dockerfile.native.centos @@ -0,0 +1,45 @@ +FROM ubuntu:20.04 as base +ENV DEBIAN_FRONTEND=noninteractive + +ARG TARGETARCH +ARG TARGETVARIANT +ARG CROSS_TARGET_TRIPLE + +COPY lib.sh / +COPY linux-image.sh native-linux-image.sh / +RUN /native-linux-image.sh + +FROM centos:7 + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +# these need to be present in **both** FROM sections +ARG TARGETARCH +ARG TARGETVARIANT +ARG CROSS_TARGET_TRIPLE + +COPY qemu.sh native-qemu.sh / +RUN /native-qemu.sh + +COPY dropbear.sh / +RUN /dropbear.sh + +COPY --from=0 /qemu /qemu + +COPY linux-runner native-linux-runner base-runner.sh / + +ENV CROSS_TARGETARCH=$TARGETARCH +ENV CROSS_TARGETVARIANT=$TARGETVARIANT +ENV CROSS_TOOLCHAIN_PREFIX= +ENV CROSS_SYSROOT=/ +ENV CARGO_TARGET_${CROSS_TARGET_TRIPLE}_RUNNER="/native-linux-runner" + +RUN sed -e "s#@DEFAULT_CROSS_TARGETARCH@#$CROSS_TARGETARCH#g" -i /native-linux-runner +RUN sed -e "s#@DEFAULT_CROSS_TARGETVARIANT@#$CROSS_TARGETVARIANT#g" -i /native-linux-runner diff --git a/docker/Dockerfile.powerpc-unknown-linux-gnu b/docker/Dockerfile.powerpc-unknown-linux-gnu index f1076d609..e3c688c54 100644 --- a/docker/Dockerfile.powerpc-unknown-linux-gnu +++ b/docker/Dockerfile.powerpc-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-powerpc-linux-gnu \ + gfortran-powerpc-linux-gnu \ libc6-dev-powerpc-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=powerpc /deny-debian-packages.sh \ + binutils \ + binutils-powerpc-linux-gnu + COPY qemu.sh / RUN /qemu.sh ppc softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh powerpc -COPY linux-runner / - -ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER=powerpc-linux-gnu-gcc \ - CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner powerpc" \ - CC_powerpc_unknown_linux_gnu=powerpc-linux-gnu-gcc \ - CXX_powerpc_unknown_linux_gnu=powerpc-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_powerpc_unknown_linux_gnu="--sysroot=/usr/powerpc-linux-gnu" \ - QEMU_LD_PREFIX=/usr/powerpc-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=powerpc-linux-gnu- +ENV CROSS_SYSROOT=/usr/powerpc-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner powerpc" +ENV CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_POWERPC_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_powerpc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_powerpc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_powerpc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_powerpc_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_powerpc_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/powerpc-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/powerpc-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=ppc \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.powerpc64-unknown-linux-gnu b/docker/Dockerfile.powerpc64-unknown-linux-gnu index e7116ad8a..faea35f15 100644 --- a/docker/Dockerfile.powerpc64-unknown-linux-gnu +++ b/docker/Dockerfile.powerpc64-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-powerpc64-linux-gnu \ + gfortran-powerpc64-linux-gnu \ libc6-dev-ppc64-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=ppc64 /deny-debian-packages.sh \ + binutils \ + binutils-powerpc64-linux-gnu + COPY qemu.sh / RUN /qemu.sh ppc64 softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh powerpc64 -COPY linux-runner / - -ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER=powerpc64-linux-gnu-gcc \ - CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner powerpc64" \ - CC_powerpc64_unknown_linux_gnu=powerpc64-linux-gnu-gcc \ - CXX_powerpc64_unknown_linux_gnu=powerpc64-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_powerpc64_unknown_linux_gnu="--sysroot=/usr/powerpc64-linux-gnu" \ - QEMU_LD_PREFIX=/usr/powerpc64-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=powerpc64-linux-gnu- +ENV CROSS_SYSROOT=/usr/powerpc64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner powerpc64" +ENV CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_POWERPC64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_powerpc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_powerpc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_powerpc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_powerpc64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_powerpc64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/powerpc64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/powerpc64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=ppc64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.powerpc64le-unknown-linux-gnu b/docker/Dockerfile.powerpc64le-unknown-linux-gnu index 3bdde5f03..728a5894d 100644 --- a/docker/Dockerfile.powerpc64le-unknown-linux-gnu +++ b/docker/Dockerfile.powerpc64le-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-powerpc64le-linux-gnu \ + gfortran-powerpc64le-linux-gnu \ libc6-dev-ppc64el-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=ppc64el /deny-debian-packages.sh \ + binutils \ + binutils-powerpc64le-linux-gnu + COPY qemu.sh / RUN /qemu.sh ppc64le softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh powerpc64le -COPY linux-runner / - -ENV CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_LINKER=powerpc64le-linux-gnu-gcc \ - CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner powerpc64le" \ - CC_powerpc64le_unknown_linux_gnu=powerpc64le-linux-gnu-gcc \ - CXX_powerpc64le_unknown_linux_gnu=powerpc64le-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_powerpc64le_unknown_linux_gnu="--sysroot=/usr/powerpc64le-linux-gnu" \ - QEMU_LD_PREFIX=/usr/powerpc64le-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=powerpc64le-linux-gnu- +ENV CROSS_SYSROOT=/usr/powerpc64le-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner powerpc64le" +ENV CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_POWERPC64LE_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_powerpc64le_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_powerpc64le_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_powerpc64le_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_powerpc64le_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_powerpc64le_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/powerpc64le-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/powerpc64le-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=ppc64le \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.riscv64gc-unknown-linux-gnu b/docker/Dockerfile.riscv64gc-unknown-linux-gnu index d2d260c06..d5ea70a17 100644 --- a/docker/Dockerfile.riscv64gc-unknown-linux-gnu +++ b/docker/Dockerfile.riscv64gc-unknown-linux-gnu @@ -1,29 +1,56 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:22.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh -# COPY cmake.sh / -# RUN /cmake.sh +COPY cmake.sh / +RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install -y --no-install-recommends \ g++-riscv64-linux-gnu \ + gfortran-riscv64-linux-gnu \ libc6-dev-riscv64-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=riscv64 /deny-debian-packages.sh \ + binutils \ + binutils-riscv64-linux-gnu + COPY qemu.sh / -RUN /qemu.sh riscv64 +RUN /qemu.sh riscv64 softmmu + +COPY dropbear.sh / +RUN /dropbear.sh -COPY linux-runner / +COPY linux-image.sh / +RUN /linux-image.sh riscv64 -ENV CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER=riscv64-linux-gnu-gcc \ - CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner riscv64" \ - CC_riscv64gc_unknown_linux_gnu=riscv64-linux-gnu-gcc \ - CXX_riscv64gc_unknown_linux_gnu=riscv64-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_riscv64gc_unknown_linux_gnu="--sysroot=/usr/riscv64-linux-gnu" \ - QEMU_LD_PREFIX=/usr/riscv64-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=riscv64-linux-gnu- +ENV CROSS_SYSROOT=/usr/riscv64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner riscv64" +ENV CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_RISCV64GC_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_riscv64gc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_riscv64gc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_riscv64gc_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_riscv64gc_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_riscv64gc_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/riscv64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/riscv64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=riscv64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=rv64gc -mabi=lp64d -mcmodel=medany" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.s390x-unknown-linux-gnu b/docker/Dockerfile.s390x-unknown-linux-gnu index 39f695fb4..043afd7e5 100644 --- a/docker/Dockerfile.s390x-unknown-linux-gnu +++ b/docker/Dockerfile.s390x-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-s390x-linux-gnu \ + gfortran-s390x-linux-gnu \ libc6-dev-s390x-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=s390x /deny-debian-packages.sh \ + binutils \ + binutils-s390x-linux-gnu + COPY qemu.sh / RUN /qemu.sh s390x softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh s390x -COPY linux-runner / - -ENV CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_LINKER=s390x-linux-gnu-gcc \ - CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner s390x" \ - CC_s390x_unknown_linux_gnu=s390x-linux-gnu-gcc \ - CXX_s390x_unknown_linux_gnu=s390x-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_s390x_unknown_linux_gnu="--sysroot=/usr/s390x-linux-gnu" \ - QEMU_LD_PREFIX=/usr/s390x-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=s390x-linux-gnu- +ENV CROSS_SYSROOT=/usr/s390x-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner s390x" +ENV CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_S390X_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_s390x_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_s390x_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_s390x_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_s390x_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_s390x_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/s390x-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/s390x-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=s390x \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.sparc64-unknown-linux-gnu b/docker/Dockerfile.sparc64-unknown-linux-gnu index ea3f3b448..cb620e61d 100644 --- a/docker/Dockerfile.sparc64-unknown-linux-gnu +++ b/docker/Dockerfile.sparc64-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-sparc64-linux-gnu \ + gfortran-sparc64-linux-gnu \ libc6-dev-sparc64-cross +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=sparc64 /deny-debian-packages.sh \ + binutils \ + binutils-sparc64-linux-gnu + COPY qemu.sh / RUN /qemu.sh sparc64 softmmu @@ -23,13 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh sparc64 -COPY linux-runner / - -ENV CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_LINKER=sparc64-linux-gnu-gcc \ - CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner sparc64" \ - CC_sparc64_unknown_linux_gnu=sparc64-linux-gnu-gcc \ - CXX_sparc64_unknown_linux_gnu=sparc64-linux-gnu-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_sparc64_unknown_linux_gnu="--sysroot=/usr/sparc64-linux-gnu" \ - QEMU_LD_PREFIX=/usr/sparc64-linux-gnu \ +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=sparc64-linux-gnu- +ENV CROSS_SYSROOT=/usr/sparc64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner sparc64" +ENV CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_SPARC64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_sparc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_sparc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_sparc64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_sparc64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_sparc64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/sparc64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/sparc64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=sparc64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.sparcv9-sun-solaris b/docker/Dockerfile.sparcv9-sun-solaris index 8d9c9c49a..814444b8a 100644 --- a/docker/Dockerfile.sparcv9-sun-solaris +++ b/docker/Dockerfile.sparcv9-sun-solaris @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,22 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY solaris.sh / -RUN /solaris.sh sparcv9 +RUN /solaris.sh sparcv9 sun + +COPY toolchain.cmake /opt/toolchain.cmake -ENV CARGO_TARGET_SPARCV9_SUN_SOLARIS_LINKER=sparcv9-sun-solaris2.10-gcc \ - CC_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-gcc \ - CXX_sparcv9_sun_solaris=sparcv9-sun-solaris2.10-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_sparcv9_sun_solaris="--sysroot=/usr/local/sparcv9-sun-solaris2.10" +ENV CROSS_TOOLCHAIN_PREFIX=sparcv9-sun-solaris2.10- +ENV CROSS_SYSROOT=/usr/local/sparcv9-sun-solaris2.10 +ENV CARGO_TARGET_SPARCV9_SUN_SOLARIS_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + AR_sparcv9_sun_solaris="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_sparcv9_sun_solaris="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_sparcv9_sun_solaris="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_sparcv9_sun_solaris=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_sparcv9_sun_solaris="--sysroot=$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=SunOS \ + CROSS_CMAKE_SYSTEM_PROCESSOR=sparc64 \ + CROSS_CMAKE_CRT=solaris \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC" diff --git a/docker/Dockerfile.thumbv6m-none-eabi b/docker/Dockerfile.thumbv6m-none-eabi index 40474559a..0e30601be 100644 --- a/docker/Dockerfile.thumbv6m-none-eabi +++ b/docker/Dockerfile.thumbv6m-none-eabi @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,11 +10,28 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ gcc-arm-none-eabi \ - libnewlib-arm-none-eabi && \ - /qemu.sh arm + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake -ENV QEMU_CPU=cortex-m3 \ - CARGO_TARGET_THUMBV6M_NONE_EABI_RUNNER=qemu-arm +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm +ENV QEMU_CPU=cortex-m1 \ + AR_thumbv6m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv6m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv6m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_thumbv6m_none_eabi=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV6M_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv6-m \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -march=armv6s-m" diff --git a/docker/Dockerfile.thumbv7em-none-eabi b/docker/Dockerfile.thumbv7em-none-eabi index f5bc0b002..90adf9396 100644 --- a/docker/Dockerfile.thumbv7em-none-eabi +++ b/docker/Dockerfile.thumbv7em-none-eabi @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,11 +10,28 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ gcc-arm-none-eabi \ - libnewlib-arm-none-eabi && \ - /qemu.sh arm + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm ENV QEMU_CPU=cortex-m4 \ - CARGO_TARGET_THUMBV7EM_NONE_EABI_RUNNER=qemu-arm + AR_thumbv7em_none_eabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv7em_none_eabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv7em_none_eabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_thumbv7em_none_eabi=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV7EM_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv7e-m \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -march=armv7e-m" diff --git a/docker/Dockerfile.thumbv7em-none-eabihf b/docker/Dockerfile.thumbv7em-none-eabihf index 0a60cf361..ad29d6e1d 100644 --- a/docker/Dockerfile.thumbv7em-none-eabihf +++ b/docker/Dockerfile.thumbv7em-none-eabihf @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,11 +10,28 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ gcc-arm-none-eabi \ - libnewlib-arm-none-eabi && \ - /qemu.sh arm + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm ENV QEMU_CPU=cortex-m4 \ - CARGO_TARGET_THUMBV7EM_NONE_EABIHF_RUNNER=qemu-arm + AR_thumbv7em_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv7em_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv7em_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_thumbv7em_none_eabihf=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV7EM_NONE_EABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv7e-m \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -mfloat-abi=hard -march=armv7e-m -mfpu=fpv4-sp-d16" diff --git a/docker/Dockerfile.thumbv7m-none-eabi b/docker/Dockerfile.thumbv7m-none-eabi index 37f1802af..63d7122b8 100644 --- a/docker/Dockerfile.thumbv7m-none-eabi +++ b/docker/Dockerfile.thumbv7m-none-eabi @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,11 +10,28 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh -COPY qemu.sh / +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ gcc-arm-none-eabi \ - libnewlib-arm-none-eabi && \ - /qemu.sh arm + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm ENV QEMU_CPU=cortex-m3 \ - CARGO_TARGET_THUMBV7M_NONE_EABI_RUNNER=qemu-arm + AR_thumbv7m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv7m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv7m_none_eabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_thumbv7m_none_eabi=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV7M_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv7-m \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -march=armv7-m" diff --git a/docker/Dockerfile.thumbv7neon-linux-androideabi b/docker/Dockerfile.thumbv7neon-linux-androideabi index 203392d52..1cb12968e 100644 --- a/docker/Dockerfile.thumbv7neon-linux-androideabi +++ b/docker/Dockerfile.thumbv7neon-linux-androideabi @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,46 +10,70 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +COPY qemu.sh / +RUN /qemu.sh arm + +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh arm + COPY android-ndk.sh / -RUN /android-ndk.sh arm 28 +RUN /android-ndk.sh arm ENV PATH=$PATH:/android-ndk/bin COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR RUN /android-system.sh arm -COPY qemu.sh / -RUN /qemu.sh arm - -RUN cp /android-ndk/sysroot/usr/lib/arm-linux-androideabi/28/libz.so /system/lib/ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-androideabi- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh arm arm-linux-androideabi COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT # likewise, the toolchains expect the prefix `thumbv7neon-linux-androideabi`, # which we don't have, so just export every possible variable, such as AR. # Also export all target binutils just in case required. -ENV CARGO_TARGET_THUMBV7NEON_LINUX_ANDROIDEABI_LINKER=arm-linux-androideabi-gcc \ - CARGO_TARGET_THUMBV7NEON_LINUX_ANDROIDEABI_RUNNER="/android-runner arm" \ - AR_thumbv7neon_linux_androideabi=arm-linux-androideabi-ar \ - AS_thumbv7neon_linux_androideabi=arm-linux-androideabi-as \ - CC_thumbv7neon_linux_androideabi=arm-linux-androideabi-gcc \ - CXX_thumbv7neon_linux_androideabi=arm-linux-androideabi-g++ \ - LD_thumbv7neon_linux_androideabi=arm-linux-androideabi-ld \ - NM_thumbv7neon_linux_androideabi=arm-linux-androideabi-nm \ - OBJCOPY_thumbv7neon_linux_androideabi=arm-linux-androideabi-objcopy \ - OBJDUMP_thumbv7neon_linux_androideabi=arm-linux-androideabi-objdump \ - RANLIB_thumbv7neon_linux_androideabi=arm-linux-androideabi-ranlib \ - READELF_thumbv7neon_linux_androideabi=arm-linux-androideabi-readelf \ - SIZE_thumbv7neon_linux_androideabi=arm-linux-androideabi-size \ - STRINGS_thumbv7neon_linux_androideabi=arm-linux-androideabi-strings \ - STRIP_thumbv7neon_linux_androideabi=arm-linux-androideabi-strip \ - BINDGEN_EXTRA_CLANG_ARGS_thumbv7neon_linux_androideabi="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner arm" +ENV CARGO_TARGET_THUMBV7NEON_LINUX_ANDROIDEABI_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_THUMBV7NEON_LINUX_ANDROIDEABI_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_thumbv7neon_linux_androideabi="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_thumbv7neon_linux_androideabi=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_thumbv7neon_linux_androideabi="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include/" \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system - + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=armv7-a \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="--target=arm-linux-androideabi -DANDROID -ffunction-sections -fdata-sections -fPIC --target=thumbv7neon-linux-androideabi" diff --git a/docker/Dockerfile.thumbv7neon-unknown-linux-gnueabihf b/docker/Dockerfile.thumbv7neon-unknown-linux-gnueabihf index 7468bdac4..03ea06a36 100644 --- a/docker/Dockerfile.thumbv7neon-unknown-linux-gnueabihf +++ b/docker/Dockerfile.thumbv7neon-unknown-linux-gnueabihf @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,8 +10,11 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ g++-arm-linux-gnueabihf \ + gfortran-arm-linux-gnueabihf \ libc6-dev-armhf-cross COPY qemu.sh / @@ -23,25 +26,37 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh armv7 -COPY linux-runner / +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake # Export all target binutils just in case required. -ENV CARGO_TARGET_THUMBV7NEON_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ - CARGO_TARGET_THUMBV7NEON_UNKNOWN_LINUX_GNUEABIHF_RUNNER="/linux-runner armv7" \ - AR_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-ar \ - AS_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-as \ - CC_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ - CXX_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-g++ \ - LD_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-ld \ - NM_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-nm \ - OBJCOPY_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-objcopy \ - OBJDUMP_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-objdump \ - RANLIB_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-ranlib \ - READELF_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-readelf \ - SIZE_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-size \ - STRINGS_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-strings \ - STRIP_thumbv7neon_unknown_linux_gnueabihf=arm-linux-gnueabihf-strip \ - BINDGEN_EXTRA_CLANG_ARGS_thumbv7neon_unknown_linux_gnueabihf="--sysroot=/usr/arm-linux-gnueabihf" \ - QEMU_LD_PREFIX=/usr/arm-linux-gnueabihf \ +ENV CROSS_TOOLCHAIN_PREFIX=arm-linux-gnueabihf- +ENV CROSS_SYSROOT=/usr/arm-linux-gnueabihf +ENV CROSS_TARGET_RUNNER="/linux-runner armv7hf" +ENV CARGO_TARGET_THUMBV7NEON_UNKNOWN_LINUX_GNUEABIHF_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_THUMBV7NEON_UNKNOWN_LINUX_GNUEABIHF_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_thumbv7neon_unknown_linux_gnueabihf="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_thumbv7neon_unknown_linux_gnueabihf=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_thumbv7neon_unknown_linux_gnueabihf="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ RUST_TEST_THREADS=1 \ - PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig/:${PKG_CONFIG_PATH}" + PKG_CONFIG_PATH="/usr/lib/arm-linux-gnueabihf/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=arm \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -march=armv7-a -mfpu=vfpv3-d16 -mfpu=neon-vfpv4 -mthumb -mfloat-abi=hard" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.thumbv8m.base-none-eabi b/docker/Dockerfile.thumbv8m.base-none-eabi new file mode 100644 index 000000000..beb905a04 --- /dev/null +++ b/docker/Dockerfile.thumbv8m.base-none-eabi @@ -0,0 +1,38 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ + gcc-arm-none-eabi \ + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm +ENV QEMU_CPU=cortex-m23 \ + AR_thumbv8m.base_none_eabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv8m.base_none_eabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv8m.base_none_eabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + # cmake-rs does not accept CMAKE_TOOLCHAIN_FILE_thumbv8m.base_none_eabi + TARGET_CMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV8M.BASE_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR="armv8-m.base" \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -march=armv8-m.base" diff --git a/docker/Dockerfile.thumbv8m.main-none-eabi b/docker/Dockerfile.thumbv8m.main-none-eabi new file mode 100644 index 000000000..720e40304 --- /dev/null +++ b/docker/Dockerfile.thumbv8m.main-none-eabi @@ -0,0 +1,38 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ + gcc-arm-none-eabi \ + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm +ENV QEMU_CPU=cortex-m33 \ + AR_thumbv8m.main_none_eabi="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv8m.main_none_eabi="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv8m.main_none_eabi="$CROSS_TOOLCHAIN_PREFIX"g++ \ + # cmake-rs does not accept CMAKE_TOOLCHAIN_FILE_thumbv8m.main_none_eabi + TARGET_CMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV8M.BASE_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR="armv8-m.main" \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -march=armv8-m.main" diff --git a/docker/Dockerfile.thumbv8m.main-none-eabihf b/docker/Dockerfile.thumbv8m.main-none-eabihf new file mode 100644 index 000000000..03af2f415 --- /dev/null +++ b/docker/Dockerfile.thumbv8m.main-none-eabihf @@ -0,0 +1,38 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ + gcc-arm-none-eabi \ + libnewlib-arm-none-eabi \ + libstdc++-arm-none-eabi-newlib + +COPY qemu.sh / +RUN /qemu.sh arm + +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=arm-none-eabi- +ENV CROSS_SYSROOT="/usr/lib/arm-none-eabi" +ENV CROSS_TARGET_RUNNER=qemu-arm +ENV QEMU_CPU=cortex-m33 \ + AR_thumbv8m.main_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_thumbv8m.main_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_thumbv8m.main_none_eabihf="$CROSS_TOOLCHAIN_PREFIX"g++ \ + # cmake-rs does not accept CMAKE_TOOLCHAIN_FILE_thumbv8m.main_none_eabihf + TARGET_CMAKE_TOOLCHAIN_FILE=/opt/toolchain.cmake \ + CARGO_TARGET_THUMBV8M.BASE_NONE_EABI_RUNNER="$CROSS_TARGET_RUNNER" \ + CROSS_CMAKE_SYSTEM_NAME=Generic \ + CROSS_CMAKE_SYSTEM_PROCESSOR="armv8-m.main" \ + CROSS_CMAKE_CRT=newlib \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -mthumb -mfloat-abi=hard -march=armv8-m.main -mfpu=fpv5-sp-d16" diff --git a/docker/Dockerfile.wasm32-unknown-emscripten b/docker/Dockerfile.wasm32-unknown-emscripten index 2d040a7f0..65b6f9095 100644 --- a/docker/Dockerfile.wasm32-unknown-emscripten +++ b/docker/Dockerfile.wasm32-unknown-emscripten @@ -1,6 +1,6 @@ FROM emscripten/emsdk:3.1.14 WORKDIR / -ARG DEBIAN_FRONTEND=noninteractive +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -15,5 +15,9 @@ RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ libxml2 \ python -ENV CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_RUNNER=node \ - BINDGEN_EXTRA_CLANG_ARGS_wasm32_unknown_emscripten="--sysroot=/emsdk/upstream/emscripten/cache/sysroot" +ENV CROSS_TOOLCHAIN_PREFIX=em +ENV CROSS_SYSROOT=/emsdk/upstream/emscripten/cache/sysroot +ENV CROSS_TARGET_RUNNER="node" +ENV CARGO_TARGET_WASM32_UNKNOWN_EMSCRIPTEN_RUNNER="$CROSS_TARGET_RUNNER" \ + BINDGEN_EXTRA_CLANG_ARGS_wasm32_unknown_emscripten="--sysroot=$CROSS_SYSROOT" \ + CMAKE_TOOLCHAIN_FILE_wasm32_unknown_emscripten=/emsdk/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake diff --git a/docker/Dockerfile.x86_64-linux-android b/docker/Dockerfile.x86_64-linux-android index 00efc61d1..923bfa6e9 100644 --- a/docker/Dockerfile.x86_64-linux-android +++ b/docker/Dockerfile.x86_64-linux-android @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,32 +10,68 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +# Using qemu allows older host cpus (without sse4) to execute the target binaries +COPY qemu.sh / +RUN /qemu.sh x86_64 + +ARG ANDROID_NDK=r25b +ARG ANDROID_SDK=28 +ARG ANDROID_VERSION=9.0.0_r1 +ARG ANDROID_SYSTEM_NONE=0 +ARG ANDROID_SYSTEM_COMPLETE=0 +ARG PYTHON_TMPDIR=/tmp/android + +COPY validate-android-args.sh / +RUN /validate-android-args.sh x86_64 + COPY android-ndk.sh / -RUN /android-ndk.sh x86_64 28 +RUN /android-ndk.sh x86_64 ENV PATH=$PATH:/android-ndk/bin COPY android-system.sh / +RUN mkdir -p $PYTHON_TMPDIR +COPY android $PYTHON_TMPDIR RUN /android-system.sh x86_64 -# Using qemu allows older host cpus (without sse4) to execute the target binaries -COPY qemu.sh / -RUN /qemu.sh x86_64 - -RUN cp /android-ndk/sysroot/usr/lib/x86_64-linux-android/28/libz.so /system/lib/ +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-linux-android- +ENV CROSS_SYSROOT=/android-ndk/sysroot +ENV CROSS_ANDROID_SDK=$ANDROID_SDK +COPY android-symlink.sh / +RUN /android-symlink.sh x86_64 x86_64-linux-android COPY android-runner / +COPY android.cmake /opt/toolchain.cmake # Libz is distributed in the android ndk, but for some unknown reason it is not # found in the build process of some crates, so we explicit set the DEP_Z_ROOT -ENV CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER=x86_64-linux-android-gcc \ - CARGO_TARGET_X86_64_LINUX_ANDROID_RUNNER="/android-runner x86_64" \ - CC_x86_64_linux_android=x86_64-linux-android-gcc \ - CXX_x86_64_linux_android=x86_64-linux-android-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_linux_android="--sysroot=/android-ndk/sysroot" \ - DEP_Z_INCLUDE=/android-ndk/sysroot/usr/include/ \ +ENV CROSS_TARGET_RUNNER="/android-runner x86_64" +ENV CARGO_TARGET_X86_64_LINUX_ANDROID_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_X86_64_LINUX_ANDROID_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ar \ + AS_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"as \ + CC_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"g++ \ + LD_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ld \ + NM_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"nm \ + OBJCOPY_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"objcopy \ + OBJDUMP_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"objdump \ + RANLIB_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"ranlib \ + READELF_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"readelf \ + SIZE_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"size \ + STRINGS_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"strings \ + STRIP_x86_64_linux_android="$CROSS_TOOLCHAIN_PREFIX"strip \ + CMAKE_TOOLCHAIN_FILE_x86_64_linux_android=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_linux_android="--sysroot=$CROSS_SYSROOT" \ + DEP_Z_INCLUDE="$CROSS_SYSROOT/usr/include/" \ RUST_TEST_THREADS=1 \ HOME=/tmp/ \ TMPDIR=/tmp/ \ ANDROID_DATA=/ \ ANDROID_DNS_MODE=local \ - ANDROID_ROOT=/system + ANDROID_ROOT=/system \ + CROSS_CMAKE_SYSTEM_NAME=Android \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=android \ + CROSS_CMAKE_OBJECT_FLAGS="--target=x86_64-linux-android -DANDROID -ffunction-sections -fdata-sections -fPIC --target=x86_64-linux-android" diff --git a/docker/Dockerfile.x86_64-pc-solaris b/docker/Dockerfile.x86_64-pc-solaris new file mode 100644 index 000000000..8103d62f8 --- /dev/null +++ b/docker/Dockerfile.x86_64-pc-solaris @@ -0,0 +1,31 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +COPY solaris.sh / +RUN /solaris.sh x86_64 pc + +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-pc-solaris2.10- +ENV CROSS_SYSROOT=/usr/local/x86_64-pc-solaris2.10 +ENV CARGO_TARGET_X86_64_PC_SOLARIS_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + AR_x86_64_pc_solaris="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_pc_solaris="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_pc_solaris="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_pc_solaris=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_pc_solaris="--sysroot=$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=SunOS \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=solaris \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.x86_64-pc-windows-gnu b/docker/Dockerfile.x86_64-pc-windows-gnu index 6397e8c7b..a3fa0d50a 100644 --- a/docker/Dockerfile.x86_64-pc-windows-gnu +++ b/docker/Dockerfile.x86_64-pc-windows-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:18.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,13 +10,15 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + RUN dpkg --add-architecture i386 && apt-get update && \ apt-get install --assume-yes --no-install-recommends libz-mingw-w64-dev COPY wine.sh / RUN /wine.sh -RUN apt-get update && apt-get install --assume-yes --no-install-recommends g++-mingw-w64-x86-64 +RUN apt-get update && apt-get install --assume-yes --no-install-recommends g++-mingw-w64-x86-64 gfortran-mingw-w64-x86-64 # run-detectors are responsible for calling the correct interpreter for exe # files. For some reason it does not work inside a docker container (it works @@ -32,8 +34,22 @@ RUN mkdir -p /usr/lib/binfmt-support/ && \ COPY windows-entry.sh / ENTRYPOINT ["/windows-entry.sh"] -ENV CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc \ - CARGO_TARGET_X86_64_PC_WINDOWS_GNU_RUNNER=wine \ - CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc-posix \ - CXX_x86_64_pc_windows_gnu=x86_64-w64-mingw32-g++-posix \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_pc_windows_gnu="--sysroot=/usr/x86_64-w64-mingw32" +COPY toolchain.cmake /opt/toolchain.cmake + +# for why we always link with pthread support, see: +# https://github.com/cross-rs/cross/pull/1123#issuecomment-1312287148 +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-w64-mingw32- +ENV CROSS_TOOLCHAIN_SUFFIX=-posix +ENV CROSS_SYSROOT=/usr/x86_64-w64-mingw32 +ENV CROSS_TARGET_RUNNER="env -u CARGO_TARGET_X86_64_PC_WINDOWS_GNU_RUNNER wine" +ENV CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc"$CROSS_TOOLCHAIN_SUFFIX" \ + CARGO_TARGET_X86_64_PC_WINDOWS_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_x86_64_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc"$CROSS_TOOLCHAIN_SUFFIX" \ + CXX_x86_64_pc_windows_gnu="$CROSS_TOOLCHAIN_PREFIX"g++"$CROSS_TOOLCHAIN_SUFFIX" \ + CMAKE_TOOLCHAIN_FILE_x86_64_pc_windows_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_pc_windows_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + CROSS_CMAKE_SYSTEM_NAME=Windows \ + CROSS_CMAKE_SYSTEM_PROCESSOR=AMD64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -m64" diff --git a/docker/Dockerfile.x86_64-sun-solaris b/docker/Dockerfile.x86_64-sun-solaris deleted file mode 100644 index 4e766d6d6..000000000 --- a/docker/Dockerfile.x86_64-sun-solaris +++ /dev/null @@ -1,19 +0,0 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive - -COPY common.sh lib.sh / -RUN /common.sh - -COPY cmake.sh / -RUN /cmake.sh - -COPY xargo.sh / -RUN /xargo.sh - -COPY solaris.sh / -RUN /solaris.sh x86_64 - -ENV CARGO_TARGET_X86_64_SUN_SOLARIS_LINKER=x86_64-sun-solaris2.10-gcc \ - CC_x86_64_sun_solaris=x86_64-sun-solaris2.10-gcc \ - CXX_x86_64_sun_solaris=x86_64-sun-solaris2.10-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_sun_solaris="--sysroot=/usr/local/x86_64-sun-solaris2.10" diff --git a/docker/Dockerfile.x86_64-unknown-dragonfly b/docker/Dockerfile.x86_64-unknown-dragonfly index 92caa19cf..fbea02e68 100644 --- a/docker/Dockerfile.x86_64-unknown-dragonfly +++ b/docker/Dockerfile.x86_64-unknown-dragonfly @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,22 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY dragonfly.sh / RUN /dragonfly.sh 5 -ENV CARGO_TARGET_X86_64_UNKNOWN_DRAGONFLY_LINKER=x86_64-unknown-dragonfly-gcc \ - CC_x86_64_unknown_dragonfly=x86_64-unknown-dragonfly-gcc \ - CXX_x86_64_unknown_dragonfly=x86_64-unknown-dragonfly-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_dragonfly="--sysroot=/usr/local/x86_64-unknown-dragonfly" +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-unknown-dragonfly- +ENV CROSS_SYSROOT=/usr/local/x86_64-unknown-dragonfly +ENV CARGO_TARGET_X86_64_UNKNOWN_DRAGONFLY_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + AR_x86_64_unknown_dragonfly="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_dragonfly="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_dragonfly="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_dragonfly=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_dragonfly="--sysroot=$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=DragonFly \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=dragonfly \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.x86_64-unknown-freebsd b/docker/Dockerfile.x86_64-unknown-freebsd index 4f53fb297..89a04fdbc 100644 --- a/docker/Dockerfile.x86_64-unknown-freebsd +++ b/docker/Dockerfile.x86_64-unknown-freebsd @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,15 +10,37 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +RUN echo "export ARCH=x86_64" > /freebsd-arch.sh COPY freebsd-common.sh / COPY freebsd.sh / -RUN /freebsd.sh x86_64 +RUN /freebsd.sh +COPY freebsd-install.sh / COPY freebsd-extras.sh / -RUN /freebsd-extras.sh x86_64 +RUN /freebsd-extras.sh + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-unknown-freebsd13- +ENV CROSS_SYSROOT=/usr/local/x86_64-unknown-freebsd13 + +COPY freebsd-gcc.sh /usr/bin/"$CROSS_TOOLCHAIN_PREFIX"gcc.sh +COPY toolchain.cmake /opt/toolchain.cmake + +COPY freebsd-fetch-best-mirror.sh / +COPY freebsd-setup-packagesite.sh / +COPY freebsd-install-package.sh / -ENV CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER=x86_64-unknown-freebsd12-gcc \ - CC_x86_64_unknown_freebsd=x86_64-unknown-freebsd12-gcc \ - CXX_x86_64_unknown_freebsd=x86_64-unknown-freebsd12-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_freebsd="--sysroot=/usr/local/x86_64-unknown-freebsd12" \ - X86_64_UNKNOWN_FREEBSD_OPENSSL_DIR=/usr/local/x86_64-unknown-freebsd12/ +ENV CARGO_TARGET_X86_64_UNKNOWN_FREEBSD_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc.sh \ + AR_x86_64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_freebsd="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_freebsd=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_freebsd="--sysroot=$CROSS_SYSROOT" \ + X86_64_UNKNOWN_FREEBSD_OPENSSL_DIR="$CROSS_SYSROOT" \ + PKG_CONFIG_PATH="${CROSS_SYSROOT}/libdata/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=FreeBSD \ + CROSS_CMAKE_SYSTEM_PROCESSOR=amd64 \ + CROSS_CMAKE_CRT=freebsd \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.x86_64-unknown-illumos b/docker/Dockerfile.x86_64-unknown-illumos index 922cf526f..aaafae7bf 100644 --- a/docker/Dockerfile.x86_64-unknown-illumos +++ b/docker/Dockerfile.x86_64-unknown-illumos @@ -1,4 +1,5 @@ -FROM ubuntu:20.04 +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -9,12 +10,23 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY illumos.sh / RUN /illumos.sh x86_64 +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-unknown-illumos- +ENV CROSS_SYSROOT=/usr/local/x86_64-unknown-illumos/sysroot ENV PATH=$PATH:/usr/local/x86_64-unknown-illumos/bin/ \ - CARGO_TARGET_X86_64_UNKNOWN_ILLUMOS_LINKER=x86_64-unknown-illumos-gcc \ - AR_x86_64_unknown_illumos=x86_64-unknown-illumos-ar \ - CC_x86_64_unknown_illumos=x86_64-unknown-illumos-gcc \ - CXX_x86_64_unknown_illumos=x86_64-unknown-illumos-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_sparcv9_sun_solaris="--sysroot=/usr/local/x86_64-unknown-illumos/sysroot" + CARGO_TARGET_X86_64_UNKNOWN_ILLUMOS_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + AR_x86_64_unknown_illumos="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_illumos="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_illumos="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_illumos=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_sparcv9_sun_solaris="--sysroot=$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=illumos \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=solaris \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.x86_64-unknown-linux-gnu b/docker/Dockerfile.x86_64-unknown-linux-gnu index accb0944b..719b51916 100644 --- a/docker/Dockerfile.x86_64-unknown-linux-gnu +++ b/docker/Dockerfile.x86_64-unknown-linux-gnu @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,6 +10,18 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + +RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ + g++-x86-64-linux-gnu \ + gfortran-x86-64-linux-gnu \ + libc6-dev-amd64-cross + +COPY deny-debian-packages.sh / +RUN TARGET_ARCH=amd64 /deny-debian-packages.sh \ + binutils \ + binutils-x86-64-linux-gnu + COPY qemu.sh / RUN /qemu.sh x86_64 softmmu @@ -19,6 +31,26 @@ RUN /dropbear.sh COPY linux-image.sh / RUN /linux-image.sh x86_64 -COPY linux-runner / - -ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner x86_64" +COPY linux-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-linux-gnu- +ENV CROSS_SYSROOT=/usr/x86_64-linux-gnu +ENV CROSS_TARGET_RUNNER="/linux-runner x86_64" +ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_x86_64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_linux_gnu="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_linux_gnu=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_linux_gnu="--sysroot=$CROSS_SYSROOT -idirafter/usr/include" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + RUST_TEST_THREADS=1 \ + PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig/:${PKG_CONFIG_PATH}" \ + PKG_CONFIG_ALLOW_CROSS=1 \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=gnu \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /linux-runner diff --git a/docker/Dockerfile.x86_64-unknown-linux-gnu.centos b/docker/Dockerfile.x86_64-unknown-linux-gnu.centos index acccac373..f32bb04db 100644 --- a/docker/Dockerfile.x86_64-unknown-linux-gnu.centos +++ b/docker/Dockerfile.x86_64-unknown-linux-gnu.centos @@ -1,29 +1,3 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive - -COPY lib.sh / -COPY linux-image.sh / -RUN /linux-image.sh x86_64 - -FROM centos:7 - -COPY common.sh lib.sh / -RUN /common.sh - -COPY cmake.sh / -RUN /cmake.sh - -COPY xargo.sh / -RUN /xargo.sh - -COPY qemu.sh / -RUN /qemu.sh x86_64 softmmu - -COPY dropbear.sh / -RUN /dropbear.sh - -COPY --from=0 /qemu /qemu - -COPY linux-runner / - -ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUNNER="/linux-runner x86_64" +# HACK: this file is currently never used and only exists for usage with `Dockerfile.native.centos` +# it will be supported for aarch64 hosts, see #751 and #975 +FROM ubuntu:20.04 as base diff --git a/docker/Dockerfile.x86_64-unknown-linux-musl b/docker/Dockerfile.x86_64-unknown-linux-musl index 1e99b626f..702b4d57a 100644 --- a/docker/Dockerfile.x86_64-unknown-linux-musl +++ b/docker/Dockerfile.x86_64-unknown-linux-musl @@ -1,5 +1,5 @@ -FROM ubuntu:20.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,21 +10,34 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY musl.sh / RUN /musl.sh TARGET=x86_64-linux-musl COPY qemu.sh / RUN /qemu.sh x86_64 -ENV CROSS_MUSL_SYSROOT=/usr/local/x86_64-linux-musl +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-linux-musl- +ENV CROSS_SYSROOT=/usr/local/x86_64-linux-musl COPY musl-symlink.sh / -RUN /musl-symlink.sh $CROSS_MUSL_SYSROOT x86_64 - -COPY qemu-runner / - -ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER=x86_64-linux-musl-gcc \ - CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUNNER="/qemu-runner x86_64" \ - CC_x86_64_unknown_linux_musl=x86_64-linux-musl-gcc \ - CXX_x86_64_unknown_linux_musl=x86_64-linux-musl-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_linux_musl="--sysroot=$CROSS_MUSL_SYSROOT" \ - QEMU_LD_PREFIX=$CROSS_MUSL_SYSROOT +RUN /musl-symlink.sh $CROSS_SYSROOT x86_64 + +COPY qemu-runner base-runner.sh / +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TARGET_RUNNER="/qemu-runner x86_64" +ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_RUNNER="$CROSS_TARGET_RUNNER" \ + AR_x86_64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_linux_musl="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_linux_musl=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_linux_musl="--sysroot=$CROSS_SYSROOT" \ + QEMU_LD_PREFIX="$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=Linux \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=musl \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" + +RUN sed -e "s#@DEFAULT_QEMU_LD_PREFIX@#$QEMU_LD_PREFIX#g" -i /qemu-runner diff --git a/docker/Dockerfile.x86_64-unknown-netbsd b/docker/Dockerfile.x86_64-unknown-netbsd index f404df3c6..ba7a331ac 100644 --- a/docker/Dockerfile.x86_64-unknown-netbsd +++ b/docker/Dockerfile.x86_64-unknown-netbsd @@ -1,5 +1,5 @@ -FROM ubuntu:16.04 -ARG DEBIAN_FRONTEND=noninteractive +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive COPY common.sh lib.sh / RUN /common.sh @@ -10,10 +10,22 @@ RUN /cmake.sh COPY xargo.sh / RUN /xargo.sh +FROM cross-base as build + COPY netbsd.sh / RUN /netbsd.sh -ENV CARGO_TARGET_X86_64_UNKNOWN_NETBSD_LINKER=x86_64-unknown-netbsd-gcc \ - CC_x86_64_unknown_netbsd=x86_64-unknown-netbsd-gcc \ - CXX_x86_64_unknown_netbsd=x86_64-unknown-netbsd-g++ \ - BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_netbsd="--sysroot=/usr/local/x86_64-unknown-netbsd" +COPY toolchain.cmake /opt/toolchain.cmake + +ENV CROSS_TOOLCHAIN_PREFIX=x86_64-unknown-netbsd- +ENV CROSS_SYSROOT=/usr/local/x86_64-unknown-netbsd +ENV CARGO_TARGET_X86_64_UNKNOWN_NETBSD_LINKER="$CROSS_TOOLCHAIN_PREFIX"gcc \ + AR_x86_64_unknown_netbsd="$CROSS_TOOLCHAIN_PREFIX"ar \ + CC_x86_64_unknown_netbsd="$CROSS_TOOLCHAIN_PREFIX"gcc \ + CXX_x86_64_unknown_netbsd="$CROSS_TOOLCHAIN_PREFIX"g++ \ + CMAKE_TOOLCHAIN_FILE_x86_64_unknown_netbsd=/opt/toolchain.cmake \ + BINDGEN_EXTRA_CLANG_ARGS_x86_64_unknown_netbsd="--sysroot=$CROSS_SYSROOT" \ + CROSS_CMAKE_SYSTEM_NAME=NetBSD \ + CROSS_CMAKE_SYSTEM_PROCESSOR=x86_64 \ + CROSS_CMAKE_CRT=netbsd \ + CROSS_CMAKE_OBJECT_FLAGS="-ffunction-sections -fdata-sections -fPIC -m64" diff --git a/docker/Dockerfile.zig b/docker/Dockerfile.zig new file mode 100644 index 000000000..ef243ecef --- /dev/null +++ b/docker/Dockerfile.zig @@ -0,0 +1,24 @@ +FROM ubuntu:20.04 as cross-base +ENV DEBIAN_FRONTEND=noninteractive + +COPY common.sh lib.sh / +RUN /common.sh + +COPY cmake.sh / +RUN /cmake.sh + +COPY xargo.sh / +RUN /xargo.sh + +FROM cross-base as build + +ARG TARGETPLATFORM +COPY zig.sh / +RUN /zig.sh $TARGETPLATFORM + +# we don't export `BINDGEN_EXTRA_CLANG_ARGS`, `QEMU_LD_PREFIX`, or +# `PKG_CONFIG_PATH` since zig doesn't have a traditional sysroot structure, +# and we're not using standard, shared packages. none of the packages +# have runners either, since they do not ship with the required +# dynamic linker (`ld-linux-${arch}.so`). +ENV PATH=$PATH:/opt/zig diff --git a/docker/aarch64-linux-gnu-glibc.sh b/docker/aarch64-linux-gnu-glibc.sh new file mode 100755 index 000000000..adfe47070 --- /dev/null +++ b/docker/aarch64-linux-gnu-glibc.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +set -x +set -euo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +unpack_rpm() { + local package="${1}" + curl --retry 3 "http://mirror.centos.org/altarch/7/os/aarch64/Packages/${package}" -O + rpm2cpio "${package}" | cpio -idmv +} + +symlink_gcc_lib() { + local prefix="${1}" + shift + local srcfile="${1}" + shift + local dstdir="/usr/lib/gcc/aarch64-linux-gnu" + + ln -s "${prefix}/lib/${srcfile}" "${dstdir}/4.8.2/${srcfile}" + ln -s "${prefix}/lib/${srcfile}" "${dstdir}/4.8.5/${srcfile}" + + local dstfile + for dstfile in "${@}"; do + ln -s "${prefix}/lib/${srcfile}" "${dstdir}/4.8.2/${dstfile}" + ln -s "${prefix}/lib/${srcfile}" "${dstdir}/4.8.5/${dstfile}" + done +} + +cp_gcc_archive() { + local name="${1}" + local srcdir="usr/lib/gcc/aarch64-redhat-linux/" + local dstdir="/usr/lib/gcc/aarch64-linux-gnu/" + cp "${srcdir}/4.8.2/${name}" "${dstdir}/4.8.2/${name}" + cp "${srcdir}/4.8.5/${name}" "${dstdir}/4.8.5/${name}" +} + +main() { + set_centos_ulimit + yum install -y epel-release + yum install -y gcc-aarch64-linux-gnu gcc-c++-aarch64-linux-gnu gfortran-c++-aarch64-linux-gnu binutils-aarch64-linux-gnu binutils gcc-c++ glibc-devel + yum clean all + + local td + td="$(mktemp -d)" + + pushd "${td}" + + local target=aarch64-linux-gnu + local prefix="/usr/${target}" + local kernel_v4="4.18.20" + + curl --retry 3 "https://mirrors.edge.kernel.org/pub/linux/kernel/v4.x/linux-${kernel_v4}.tar.xz" -O + tar -xvf "linux-${kernel_v4}.tar.xz" + pushd "linux-${kernel_v4}" + make ARCH=arm64 INSTALL_HDR_PATH="${prefix}" headers_install + popd + + curl --retry 3 http://ftp.gnu.org/gnu/glibc/glibc-2.17.tar.xz -O + tar -xvf glibc-2.17.tar.xz + mkdir build + pushd build + CC=/usr/bin/aarch64-linux-gnu-gcc \ + CXX=/usr/bin/aarch64-linux-gnu-g++ \ + LD=/usr/bin/aarch64-linux-gnu-ld \ + AR=/usr/bin/aarch64-linux-gnu-ar \ + RANLIB=/usr/bin/aarch64-linux-gnu-ranlib \ + ../glibc-2.17/configure \ + --prefix="${prefix}" \ + --build="${MACHTYPE}" \ + --host="${target}" \ + --target="${target}" \ + --with-arch=aarch64 \ + --with-headers="${prefix}/include" \ + --libdir="${prefix}/lib" \ + --libexecdir="${prefix}/lib" + + make -j && make install + popd + + mkdir -p "${prefix}"/{include,lib} + mkdir -p "/usr/lib/gcc/aarch64-linux-gnu"/{4.8.2,4.8.5} + + mkdir libgcc + pushd libgcc + unpack_rpm "libgcc-4.8.5-44.el7.aarch64.rpm" + mv lib64/* "${prefix}/lib" + # C++ support needs `libgcc.so`, even though it warns about `libgcc_s.so` + symlink_gcc_lib "${prefix}" "libgcc_s.so.1" "libgcc_s.so" "libgcc.so" + popd + + mkdir libstdcpp + pushd libstdcpp + unpack_rpm "libstdc++-4.8.5-44.el7.aarch64.rpm" + unpack_rpm "libstdc++-devel-4.8.5-44.el7.aarch64.rpm" + unpack_rpm "libstdc++-static-4.8.5-44.el7.aarch64.rpm" + mv usr/include/* "${prefix}/include" + mv usr/lib64/* "${prefix}/lib" + symlink_gcc_lib "${prefix}" "libstdc++.so.6" "libstdc++.so" + cp_gcc_archive "libstdc++.a" + cp_gcc_archive "libsupc++.a" + popd + + local cpp_include=/usr/aarch64-linux-gnu/include/c++ + local cpp_482="${cpp_include}/4.8.2" + local cpp_485="${cpp_include}/4.8.5" + local redhat_482="${cpp_482}/aarch64-redhat-linux" + local redhat_485="${cpp_485}/aarch64-redhat-linux" + mv "${redhat_482}/bits"/* "${cpp_482}/bits" + mv "${redhat_482}/ext"/* "${cpp_482}/ext" + # these are currently empty, but might contain content later + mv "${redhat_485}/bits"/* "${cpp_485}/bits" || true + mv "${redhat_485}/ext"/* "${cpp_485}/ext" || true + + popd + + rm -rf "${td}" + rm "${0}" +} + +main "${@}" diff --git a/docker/aarch64-linux-musl-gcc.sh b/docker/aarch64-linux-musl-gcc.sh deleted file mode 100755 index 8cfe320db..000000000 --- a/docker/aarch64-linux-musl-gcc.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -# this linker wrapper works around issue https://github.com/rust-lang/rust/issues/46651 -# which affects toolchains older than 1.48 -# older toolchains require the `-lgcc` linker flag otherwise they fail to link - -set -x -set -euo pipefail - -main() { - local release= - release=$(rustc -Vv | grep '^release:' | cut -d ':' -f2) - # NOTE we assume `major` is always "1" - local minor= - minor=$(echo "$release" | cut -d '.' -f2) - - if (( minor >= 48 )); then - # no workaround - aarch64-linux-musl-gcc "${@}" - else - # apply workaround - aarch64-linux-musl-gcc "${@}" -lgcc - fi -} - -main "${@}" diff --git a/docker/android-ndk.sh b/docker/android-ndk.sh index ff1b7e052..6f4852236 100755 --- a/docker/android-ndk.sh +++ b/docker/android-ndk.sh @@ -6,49 +6,77 @@ set -euo pipefail # shellcheck disable=SC1091 . lib.sh -NDK_URL=https://dl.google.com/android/repository/android-ndk-r21d-linux-x86_64.zip - main() { - local arch="${1}" \ - api="${2}" + local arch="${1}" - install_packages curl unzip python + # python3 is still needed for newer NDK versions, just since it + # simplifies making symlinks even though the toolchain is prebuilt + install_packages curl python python3 + get_ndk_info + if [[ "${NDK_VERSION}" -le 9 ]]; then + install_packages bzip2 + else + install_packages unzip + fi local td td="$(mktemp -d)" pushd "${td}" curl --retry 3 -sSfL "${NDK_URL}" -O - unzip -q android-ndk-*.zip - rm android-ndk-*.zip - pushd android-ndk-* - ./build/tools/make_standalone_toolchain.py \ - --install-dir /android-ndk \ - --arch "${arch}" \ - --api "${api}" + if [[ "${NDK_VERSION}" -le 9 ]]; then + tar -xjf "${NDK_FILENAME}" + else + unzip -q "${NDK_FILENAME}" + fi + rm "${NDK_FILENAME}" + pushd "android-ndk-${ANDROID_NDK}" + # android NDK versions <= 13 error without the verbose flag + local build_cmd= + local api= + if [[ "${NDK_VERSION}" -le 11 ]]; then + build_cmd=make-standalone-toolchain.sh + api=--platform="android-${ANDROID_SDK}" + else + build_cmd=make_standalone_toolchain.py + api=--api="${ANDROID_SDK}" + fi + "./build/tools/${build_cmd}" \ + --install-dir=/android-ndk \ + --arch="${arch}" \ + "${api}" \ + --verbose + + # the android bash script installs the executables with 750, not 755 + # permissions, and the other files without read permissions. + if [[ "${NDK_VERSION}" -le 11 ]]; then + chmod -R 755 /android-ndk/bin + chmod -R 755 /android-ndk/libexec + chmod -R +r /android-ndk + fi # clean up unused toolchains to reduce image size local triple local triples local triple_arch="${arch}" case "${arch}" in - arm64) - triple_arch="aarch64" - ;; - x86) - triple_arch="i686" - ;; + arm64) + triple_arch="aarch64" + ;; + x86) + triple_arch="i686" + ;; esac triples=( - "aarch64-linux-android" - "arm-linux-androideabi" - "i686-linux-android" - "x86_64-linux-android" + "aarch64-linux-android" + "arm-linux-androideabi" + "i686-linux-android" + "x86_64-linux-android" ) for triple in "${triples[@]}"; do - if ! [[ "${triple}" =~ ^"${triple_arch}".* ]]; then - rm -rf "/android-ndk/sysroot/usr/lib/${triple}" - fi + if ! [[ "${triple}" =~ ^"${triple_arch}".* ]]; then + rm -rf "/android-ndk/sysroot/usr/lib/${triple}" + fi done purge_packages @@ -60,4 +88,27 @@ main() { rm "${0}" } +get_ndk_info() { + local ndk_os=linux + local ndk_platform="${ndk_os}-x86_64" + # format is generally r21d, r25b, etc. it can however, be r24, for example. + NDK_VERSION=$(echo "${ANDROID_NDK}" | tr -dc '0-9') + # android NDK 23 and higher moved from `linux-x86_64` to `linux` + if [[ "${NDK_VERSION}" -ge 23 ]]; then + NDK_FILENAME="android-ndk-${ANDROID_NDK}-${ndk_os}.zip" + elif [[ "${NDK_VERSION}" -le 9 ]]; then + NDK_FILENAME="android-ndk-${ANDROID_NDK}-${ndk_platform}.tar.bz2" + else + NDK_FILENAME="android-ndk-${ANDROID_NDK}-${ndk_platform}.zip" + fi + if [[ "${NDK_VERSION}" -le 9 ]]; then + NDK_URL="https://dl.google.com/android/ndk/${NDK_FILENAME}" + else + NDK_URL="https://dl.google.com/android/repository/${NDK_FILENAME}" + fi + export NDK_VERSION + export NDK_FILENAME + export NDK_URL +} + main "${@}" diff --git a/docker/android-runner b/docker/android-runner index 476e310ac..02570746f 100755 --- a/docker/android-runner +++ b/docker/android-runner @@ -31,7 +31,10 @@ case "${arch}" in esac libdir="/android-ndk/sysroot/usr/lib/${android_abi}" -export LD_PRELOAD="${libdir}/libc++_shared.so" +# Android 5.x doesn't support C++. +if [[ -f "${libdir}/libc++_shared.so" ]]; then + export LD_PRELOAD="${libdir}/libc++_shared.so" +fi case "${CROSS_RUNNER}" in native) exec "${@}" diff --git a/docker/android-symlink.sh b/docker/android-symlink.sh new file mode 100755 index 000000000..5995b0904 --- /dev/null +++ b/docker/android-symlink.sh @@ -0,0 +1,81 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2125,SC2207 + +set -x +set -euo pipefail + +main() { + local arch="${1}" + local target="${2}" + local libdir="/android-ndk/lib64/clang/"*"/lib/linux/${arch}/" + local expanded=($(echo "/android-ndk/lib64/clang/"*"/lib/linux/${arch}/")) + + if [[ "${#expanded[@]}" == "1" ]] && [[ "${expanded[0]}" != "${libdir}" ]]; then + libdir=$(realpath "/android-ndk/lib64/clang/"*"/lib/linux/${arch}/") + + # In Android NDK versions r23-beta3, libgcc has been replaced by libunwind + # Older Rust versions always link to libgcc, so we need a symlink. + # https://github.com/rust-lang/rust/pull/85806 + if [[ -f "${libdir}/libunwind.a" ]]; then + ln -s "${libdir}/libunwind.a" "${libdir}/libgcc.a" + fi + fi + + # older SDK versions install the libraries directly in the lib directory. + local sysroot=/android-ndk/sysroot + local ndk_libdir="${sysroot}/usr/lib/${target}/" + if [[ "${ANDROID_SYSTEM_NONE}" != "1" ]]; then + if [[ -d "${ndk_libdir}/" ]]; then + cp "${ndk_libdir}/${ANDROID_SDK}/libz.so" /system/lib/ + else + cp "${sysroot}/usr/lib/libz.so" /system/lib/ + fi + fi + + # later NDK versions switch to using `llvm-${tool}` rather than `${target}-tool` + # want to ensure we just have backwards-compatible aliases + local tool= + local tool_src= + local tool_dst= + for tool in ar as nm objcopy objdump ranlib readelf size string strip; do + tool_src="/android-ndk/bin/llvm-${tool}" + tool_dst="/android-ndk/bin/${target}-${tool}" + if [[ ! -f "${tool_dst}" ]] && [[ -f "${tool_src}" ]]; then + ln -s "${tool_src}" "${tool_dst}" + elif [[ "${tool}" == "ld" ]] && [[ ! -f "${tool_dst}" ]]; then + ln -s "/android-ndk/bin/${tool}" "${tool_dst}" + fi + done + + # this is required for CMake builds, since the first pass doesn't + # add on the SDK API level to the linker search path. for example, + # it will set the linker search path to `${sysroot}/usr/lib/${target}/`, + # but not to `${sysroot}/usr/lib/${target}/${ANDROID_SDK}`. this isn't + # fixable seemingly with **any** environment variable or CMake option: + # cmake with `CMAKE_ANDROID_STANDALONE_TOOLCHAIN` seemingly ignores: + # - `LD_LIBRARY_PATH` + # - `CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES` + # - `CMAKE_C_COMPILER` + # - `CMAKE_CXX_COMPILER` + # + # running the cmake configuration a second time works, but this isn't + # adequate. the resulting config sets `CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES` + # but this is ignored in our toolchain file. likewise, not testing the + # compiler via `set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)` fails + # because during the build it will not add the API level to the linker + # search path. + local lib= + local libname= + if [[ -d "${ndk_libdir}" ]] && [[ -d "${ndk_libdir}/${ANDROID_SDK}" ]]; then + for lib in "${ndk_libdir}/${ANDROID_SDK}"/*; do + libname=$(basename "${lib}") + if [[ ! -f "${ndk_libdir}/${libname}" ]]; then + ln -s "${lib}" "${ndk_libdir}/${libname}" + fi + done + fi + + rm "${0}" +} + +main "${@}" diff --git a/docker/android-system.sh b/docker/android-system.sh index 9a5ddb6b1..412758fdb 100755 --- a/docker/android-system.sh +++ b/docker/android-system.sh @@ -1,4 +1,31 @@ #!/usr/bin/env bash +# The API level details are mentioned here: +# https://developer.android.com/studio/releases/platforms +# These are controlled by `ANDROID_VERSION` and `ANDROID_SDK`, +# for example, `ANDROID_SDK=30` and `ANDROID_VERSION=11.0.0_r48`. +# +# You can also build the entire Android source tree with +# `ANDROID_SYSTEM_COMPLETE`, or skip it altogether with +# `ANDROID_SYSTEM_NONE`. Note that runners will not be +# available if the the Android system is not built. +# +# The versions are: +# 5.0: 21 (tested at NDK 10e and r13b, 5.0.0_r1) +# 5.1: 22 (tested at NDK r21d, 5.1.1_r38, unused DT) +# 6.0: 23 (tested at NDK r21dm 6.0.1_r81) +# 7.0: 24 (tested at NDK r21d, 7.0.0_r36) +# 7.1: 25 (tested at NDK r21d, 7.1.2_r39, not supported) +# 8.0: 26 (tested at NDK r21d, 8.0.0_r51) +# 8.1: 27 (tested at NDK r21d, 8.1.0_r81) +# 9.0: 28 (tested at NDK r21d and r25b, 9.0.0_r1) +# 10.0: 29 (tested at NDK r25b, 10.0.0_r47) +# 11.0: 30 (tested at NDK r25b, 11.0.0_r48) +# 12.0: 31 (unable to build at 12.0.0_r34) +# 12.1: 32 (unable to build at 12.1.0_r27) +# 13.0: 33 +# +# API level 25 seems to be missing from Android NDK versions, +# and therefore is not supported. set -x set -euo pipefail @@ -7,32 +34,39 @@ set -euo pipefail . lib.sh main() { - local arch="${1}" - local td - td="$(mktemp -d)" - pushd "${td}" + export ARCH="${1}" + MAJOR_VERSION=$(echo "${ANDROID_VERSION}" | cut -d '.' -f 1) + MINOR_VERSION=$(echo "${ANDROID_VERSION}" | cut -d '.' -f 2) + TAG="android-${ANDROID_VERSION}" - # fake java and javac, it is not necessary for what we build, but the build - # script ask for it - cat << EOF > /usr/bin/java -#!/usr/bin/env bash -echo "java version \"1.7.0\"" -echo "OpenJDK Runtime Environment (IcedTea 2.6.9)" -echo "OpenJDK 64-Bit Server VM (build 24.131-b00, mixed mode)" -EOF + export MAJOR_VERSION + export MINOR_VERSION + export TAG - cat << EOF > /usr/bin/javac -#!/usr/bin/env bash -echo "javac 1.7.0" -EOF + if [[ "${ANDROID_SYSTEM_NONE}" == "1" ]]; then + rm -rf "${PYTHON_TMPDIR}" + rm "${0}" + return + fi - chmod +x /usr/bin/java - chmod +x /usr/bin/javac + if [[ "${ANDROID_SYSTEM_COMPLETE}" != "1" ]] && [[ "${MAJOR_VERSION}" -ge 12 ]]; then + echo "Android versions 12 and higher couple APEX tightly into the build system." 1>&2 + echo "These are currently unsupported, and are unlikely to ever be supported." 1>&2 + echo "Try using a complete Android system build or disable building Android system." 1>&2 + echo "Note that a complete Android system build is slow and creates massive images." 1>&2 + echo "Disabling the Android system build will prevent the use of Android runners." 1>&2 + echo "If you want support for newer versions, contributions are always appreciated." 1>&2 + exit 1 + elif [[ "${MAJOR_VERSION}" -eq 7 ]] && [[ "${MINOR_VERSION}" -eq 1 ]]; then + echo "Android version 7.1 is not supported." 1>&2 + exit 1 + fi - # more faking - export ANDROID_JAVA_HOME=/tmp - mkdir /tmp/lib/ - touch /tmp/lib/tools.jar + local td + td="$(mktemp -d)" + pushd "${td}" + + fake_java install_packages ca-certificates \ curl \ @@ -46,64 +80,481 @@ EOF python \ python3 \ xz-utils - purge_list+=(default-jre) curl --retry 3 -sSfL https://storage.googleapis.com/git-repo-downloads/repo -O chmod +x repo + python3 ./repo init -u https://android.googlesource.com/platform/manifest -b "${TAG}" + + local tools=( + cat chmod chown cmp cp ctrlaltdel date df dmesg du hd id ifconfig + iftop insmod ioctl ionice kill ln log ls lsmod lsof lsusb md5 mkdir + mount mv nandread netstat notify printenv ps reboot renice rm rmdir + rmmod route schedtop sendevent setconsole setprop sleep smd start + stop sync top touch umount uptime vmstat watchprops wipe + ) + if [[ "${ANDROID_SYSTEM_COMPLETE}" == "1" ]]; then + android_repo_complete + else + case "${MAJOR_VERSION}" in + 5) + android_repo_v5 + tools+=(dd getevent getprop grep newfs_msdos) + ;; + 6) + android_repo_v6 + ;; + 7) + android_repo_v7 + ;; + 8) + android_repo_v8 + ;; + 9) + android_repo_v9 + ;; + 10) + android_repo_v10 + ;; + 11) + android_repo_v11 + ;; + *) + echo "Currently unsupported Android version ${MAJOR_VERSION}." 1>&2 + echo "Please submit a feature request if you need support." 1>&2 + exit 1 + ;; + esac + fi + + build_android + install_android "${tools[@]}" + + remove_java + purge_packages + + popd + + rm -rf "${td}" + rm -rf "${PYTHON_TMPDIR}" + rm "${0}" +} + +# java isn't required for the build, but the build expects to +# find a java compiler. the supported android versions are: +# https://source.android.com/docs/setup/start/older-versions +# Android 7: OpenJDK-8 +fake_java() { + local java_type= + local java_version= + local jre_info= + local build_info= + + case "${MAJOR_VERSION}" in + 5|6) + java_type=java + java_version=1.7.0 + jre_info="IcedTea 2.6.9" + build_info="build 24.131-b00, mixed mode" + ;; + *) + java_type=openjdk + java_version=1.8.0_342 + jre_info="build 1.8.0_342-8u342-b07-0ubuntu1~20.04-b07" + build_info="build 25.342-b07, mixed mode" + ;; + esac + + cat << EOF > /usr/bin/java +#!/usr/bin/env bash +echo "${java_type} version \"${java_version}\"" +echo "OpenJDK Runtime Environment (${jre_info})" +echo "OpenJDK 64-Bit Server VM (${build_info})" +EOF + + cat << EOF > /usr/bin/javac +#!/usr/bin/env bash +echo "javac ${java_version}" +EOF + + chmod +x /usr/bin/java + chmod +x /usr/bin/javac + + # more faking + export ANDROID_JAVA_HOME=/tmp + mkdir -p /tmp/lib/ + touch /tmp/lib/tools.jar +} + +remove_java() { + rm /usr/bin/java + rm /usr/bin/javac + rm /tmp/lib/tools.jar +} + +build_android() { + if [[ "${ANDROID_SYSTEM_COMPLETE}" != "1" ]]; then + export ALLOW_MISSING_DEPENDENCIES=true + fi + + set +u + # shellcheck disable=SC1091 + source build/envsetup.sh + lunch "aosp_${ARCH}-user" + if [[ "${ANDROID_SYSTEM_COMPLETE}" != "1" ]]; then + mmma bionic/ + mmma external/mksh/ + mmma system/core/toolbox/ + else + mma + fi + if [[ "${ANDROID_SYSTEM_COMPLETE}" != "1" ]] && [[ "${MAJOR_VERSION}" -ge 11 ]]; then + # for some reason, building bionic doesn't build linker64 on the first pass + # doing a partial build and a rebuild is just as fast though. + rm -rf out/target/product/generic + mmma bionic/ + fi + set -u +} + +install_android() { + local outdir= + if [[ "${ARCH}" = "arm" ]]; then + outdir=out/target/product/generic + else + outdir="out/target/product/generic_${ARCH}" + fi + mv "${outdir}/system/" / + if [[ "${ANDROID_SYSTEM_COMPLETE}" == "1" ]] && [[ -d "${outdir}/apex" ]]; then + # can use the APEX linker, no need to use the bootstrap one + mv "${outdir}/apex/" / + elif [[ "${MAJOR_VERSION}" -ge 10 ]]; then + symlink_bootstrap + fi + + # list from https://elinux.org/Android_toolbox + local tool= + for tool in "${@}"; do + if [[ ! -f "/system/bin/${tool}" ]]; then + ln -s /system/bin/toolbox "/system/bin/${tool}" + fi + done + + echo "127.0.0.1 localhost" > /system/etc/hosts +} + +symlink_bootstrap() { + # for Android 10+, we need to use the bootstrap linker rather than + # the APEX linker, which is gigantic. we also symlink the ASAN + # linker just in case using the bootstrapped one. + local linker + local file + + if compgen -G /system/bin/bootstrap/* >/dev/null 2>&1; then + for linker in /system/bin/bootstrap/*; do + file=$(basename "${linker}") + unlink "/system/bin/${file}" + ln -s "/system/bin/bootstrap/${file}" "/system/bin/${file}" + done + fi + + # also need to ensure the shared libraries aren't symlinks + local lib + local libdir + for libdir in /system/lib{,64}; do + if compgen -G "${libdir}/bootstrap/"* >/dev/null 2>&1; then + for lib in "${libdir}/bootstrap/"*; do + file=$(basename "${lib}") + unlink "${libdir}/${file}" + ln -s "${libdir}/bootstrap/${file}" "${libdir}/${file}" + done + fi + done +} + +# this are the minimum set of modules that are need to build bionic +# this was created by trial and error. this is based on the minimum +# set of modules required for each android version, starting with +# a minimal number of dependencies. for android 10+ versions, we use +# the bootstrap linker rather than the APEX linker for non-complete +# system builds, as the APEX linker drags in nearly the entire Android +# runtime, requiring 60+GB images. for minimal builds, we need to avoid +# APEX altogether, and this gets trickier starting in Android 10 +# and much more difficult in newer versions. + +android_repo_complete() { + python3 ./repo sync -c +} + +# tested on 5.0.0_r1 (SDK 21) +# tested on 5.1.1_r38 (SDK 22) +android_repo_v5() { + sync bionic + sync build + sync external/compiler-rt + sync external/jemalloc + sync external/libcxx + sync external/libcxxabi + sync external/libselinux + sync external/mksh + sync external/openssl + sync external/pcre + sync external/stlport + sync prebuilts/clang/linux-x86/host/3.5 + sync system/core + + case "${ARCH}" in + arm) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.8 + ;; + arm64) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.8 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.8 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.8 + ;; + esac + + # avoid build tests + rm bionic/linker/tests/Android.mk + rm bionic/tests/Android.mk + rm bionic/benchmarks/Android.mk + + # patch the linker to avoid the error + # FATAL: kernel did not supply AT_SECURE + sed -i -e 's/if (!kernel_supplied_AT_SECURE)/if (false)/g' bionic/linker/linker_environ.cpp +} + +# tested on 6.0.1_r81 (SDK 23) +android_repo_v6() { + sync bionic + sync build + sync external/compiler-rt + sync external/elfutils + sync external/jemalloc + sync external/libcxx + sync external/libcxxabi + sync external/libselinux + sync external/mksh + sync external/pcre + sync external/safe-iop + sync external/zlib + sync libnativehelper + sync prebuilts/clang/linux-x86/host/3.6 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 + sync prebuilts/misc + sync system/core + + case "${ARCH}" in + arm) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac + + # avoid build tests + rm bionic/linker/tests/Android.mk + rm bionic/tests/Android.mk + rm bionic/benchmarks/Android.mk + # we don't need the relocation packer, and removing + # the unittests from it is a bit of work. + rm bionic/tools/relocation_packer/Android.mk +} + +# tested on 7.0.0_r36 (SDK 24) +# tested on 7.1.2_r39 (SDK 25, not supported) +# API level 25, requires for Android 7.1, is not provided in NDKs +android_repo_v7() { + sync bionic + sync build + sync build/kati + sync external/boringssl + sync external/compiler-rt + sync external/elfutils + sync external/jemalloc + sync external/libcxx + sync external/libcxxabi + sync external/libselinux + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/mksh + sync external/pcre + sync external/safe-iop + sync external/zlib + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 + sync prebuilts/misc + sync prebuilts/ndk + sync prebuilts/ninja/linux-x86 + sync system/core + + case "${ARCH}" in + arm) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac + + # avoid build tests + rm bionic/linker/tests/Android.mk + rm bionic/tests/Android.mk + rm bionic/benchmarks/Android.mk + rm prebuilts/misc/common/android-support-test/Android.mk + # we don't need the relocation packer, and removing + # the unittests from it is a bit of work. + rm bionic/tools/relocation_packer/Android.mk + + remove_tests +} + +# tested on 8.0.0_r51 (SDK 26) +# tested on 8.1.0_r81 (SDK 27) +android_repo_v8() { + # need to build LLVM components, or libLLVM is disabled. + export FORCE_BUILD_LLVM_COMPONENTS=true + + sync bionic + sync build/blueprint + sync build/make + sync build/soong + sync external/boringssl + sync external/clang + sync external/compiler-rt + sync external/elfutils + sync external/jemalloc + sync external/libcxx + sync external/libcxxabi + sync external/libevent + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/lzma + sync external/mksh + sync external/pcre + sync external/safe-iop + sync external/selinux + sync external/zlib + sync libnativehelper + sync prebuilts/build-tools + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 + sync prebuilts/go/linux-x86 + # we only need the relocation packer binary. everything else + # interferes with the build, so we remove the makefiles below. + sync prebuilts/misc + sync prebuilts/ndk + sync system/core + sync toolchain/binutils + + case "${ARCH}" in + arm) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac + + # avoid build tests + rm bionic/linker/tests/Android.mk + rm bionic/tests/Android.mk + rm bionic/tests/Android.bp + rm bionic/benchmarks/Android.bp + rm bionic/tests/libs/Android.bp - # this is the minimum set of modules that are need to build bionic - # this was created by trial and error - python3 ./repo init -u https://android.googlesource.com/platform/manifest -b android-9.0.0_r1 - - python3 ./repo sync -c art - python3 ./repo sync -c bionic - python3 ./repo sync -c build/make - python3 ./repo sync -c build/blueprint - python3 ./repo sync -c build/soong - python3 ./repo sync -c external/clang - python3 ./repo sync -c external/compiler-rt - python3 ./repo sync -c external/elfutils - python3 ./repo sync -c external/jemalloc - python3 ./repo sync -c external/libcxx - python3 ./repo sync -c external/libcxxabi - python3 ./repo sync -c external/libunwind - python3 ./repo sync -c external/libunwind_llvm - python3 ./repo sync -c external/llvm - python3 ./repo sync -c external/lzma - python3 ./repo sync -c external/mksh - python3 ./repo sync -c external/safe-iop - python3 ./repo sync -c external/valgrind - python3 ./repo sync -c external/vixl - python3 ./repo sync -c external/zlib - python3 ./repo sync -c frameworks/hardware/interfaces - python3 ./repo sync -c hardware/interfaces - python3 ./repo sync -c libnativehelper - python3 ./repo sync -c prebuilts/build-tools - python3 ./repo sync -c prebuilts/clang/host/linux-x86 - python3 ./repo sync -c prebuilts/clang-tools - #python3 ./repo sync -c prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 - #python3 ./repo sync -c prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 - python3 ./repo sync -c prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 - python3 ./repo sync -c prebuilts/go/linux-x86 - python3 ./repo sync -c prebuilts/misc - python3 ./repo sync -c prebuilts/sdk - python3 ./repo sync -c system/core - python3 ./repo sync -c system/libhidl - python3 ./repo sync -c system/tools/hidl - - case "${arch}" in + # remove extra utilities + rm system/core/libgrallocusage/Android.bp + rm system/core/libmemtrack/Android.bp + rm system/core/libsysutils/Android.bp + local path= + find prebuilts/misc/ -name 'Android.mk' | while IFS= read -r path; do + rm "${path}" + done + + # avoid java dependencies + rm external/lzma/Java/Tukaani/Android.mk + + remove_tests +} + +# tested on 9.0.0_r1 (SDK 28) +android_repo_v9() { + sync art + sync bionic + sync build/blueprint + sync build/make + sync build/soong + sync external/clang + sync external/compiler-rt + sync external/elfutils + sync external/jemalloc + sync external/libcxx + sync external/libcxxabi + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/lzma + sync external/mksh + sync external/safe-iop + sync external/valgrind + sync external/vixl + sync external/zlib + sync frameworks/hardware/interfaces + sync hardware/interfaces + sync libnativehelper + sync prebuilts/build-tools + sync prebuilts/clang-tools + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.15-4.8 + sync prebuilts/go/linux-x86 + sync prebuilts/misc + sync prebuilts/sdk + sync system/core + sync system/libhidl + sync system/tools/hidl + + case "${ARCH}" in arm) - python3 ./repo sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 ;; arm64) - python3 ./repo sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 - python3 ./repo sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 ;; x86) - python3 ./repo sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 ;; x86_64) - python3 ./repo sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 ;; esac @@ -116,49 +567,264 @@ EOF rm bionic/tests/headers/Android.bp rm bionic/tests/headers/posix/Android.bp - sed -i -z -e 's/cc_test {.*}//g' bionic/libc/malloc_debug/Android.bp - sed -i -z -e 's/cc_test {.*}//g' bionic/libc/malloc_hooks/Android.bp - sed -i -z -e 's/cc_test_host {.*}//g' bionic/tools/relocation_packer/Android.bp + remove_tests +} - export ALLOW_MISSING_DEPENDENCIES=true +# tested on 10.0.0_r47 (SDK 29) +android_repo_v10() { + sync art + sync bionic + sync build/blueprint + sync build/make + sync build/soong + sync external/clang + sync external/compiler-rt + sync external/elfutils + sync external/golang-protobuf + sync external/jemalloc + sync external/jemalloc_new + sync external/libcxx + sync external/libcxxabi + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/lzma + sync external/mksh + sync external/vixl + sync external/zlib + sync libnativehelper + sync prebuilts/build-tools + sync prebuilts/clang-tools + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8 + sync prebuilts/go/linux-x86 + sync prebuilts/ndk + sync prebuilts/sdk + sync prebuilts/vndk/v28 + sync system/core + sync system/sepolicy - # patch the linker to avoid the error - # FATAL: kernel did not supply AT_SECURE - #sed -i -e 's/if (!kernel_supplied_AT_SECURE)/if (false)/g' bionic/linker/linker_environ.cpp + case "${ARCH}" in + arm) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac - set +u - # shellcheck disable=SC1091 - source build/envsetup.sh - lunch "aosp_${arch}-user" - mmma bionic/ - mmma external/mksh/ - mmma system/core/toolbox/ - set -u + # avoid build tests + rm bionic/tests/Android.mk + rm bionic/tests/Android.bp + rm bionic/benchmarks/Android.bp + rm bionic/tests/libs/Android.bp + rm bionic/tests/headers/Android.bp + rm bionic/tests/headers/posix/Android.bp - if [[ "${arch}" = "arm" ]]; then - mv out/target/product/generic/system/ / - else - mv "out/target/product/generic_${arch}/system"/ / - fi + remove_tests +} - # list from https://elinux.org/Android_toolbox - for tool in cat chmod chown cmp cp ctrlaltdel date df dmesg du \ - hd id ifconfig iftop insmod ioctl ionice kill ln log ls \ - lsmod lsof lsusb md5 mkdir mount mv nandread netstat notify \ - printenv ps reboot renice rm rmdir rmmod route schedtop sendevent \ - setconsole setprop sleep smd start stop sync top touch umount \ - uptime vmstat watchprops wipe; do - ln -s /system/bin/toolbox "/system/bin/${tool}" - done +android_repo_v11() { + sync art + sync bionic + sync bootable/recovery + sync build/blueprint + sync build/make + sync build/soong + sync external/clang + sync external/compiler-rt + sync external/elfutils + sync external/fmtlib + sync external/golang-protobuf + sync external/gwp_asan + sync external/jemalloc + sync external/jemalloc_new + sync external/libcxx + sync external/libcxxabi + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/lzma + sync external/mksh + sync external/scudo + sync external/zlib + sync prebuilts/build-tools + sync prebuilts/clang-tools + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8 + sync prebuilts/go/linux-x86 + sync prebuilts/sdk + sync prebuilts/vndk/v28 + sync prebuilts/vndk/v29 + sync system/core + sync system/sepolicy - echo "127.0.0.1 localhost" > /system/etc/hosts + case "${ARCH}" in + arm) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac - purge_packages + # avoid build tests + rm bionic/tests/Android.mk + rm bionic/tests/Android.bp + rm bionic/benchmarks/Android.bp + rm bionic/tests/libs/Android.bp + rm bionic/tests/headers/Android.bp + rm bionic/tests/headers/posix/Android.bp - popd + # make sure we don't build benchmarks or apex + rm -r bionic/apex + rm -r bionic/benchmarks/ - rm -rf "${td}" - rm "${0}" + # libziparchive has tests in the header, remove them + local libziparchive_h="system/core/libziparchive/include/ziparchive/zip_writer.h" + sed -i -e 's/#include //g' "${libziparchive_h}" + sed -i -e 's/FRIEND_TEST(zipwriter, WriteToUnseekableFile);//g' "${libziparchive_h}" + + remove_tests +} + +android_repo_v12() { + # FIXME: this is a work in progress, and is unlikely to ever + # be completed, since apex is now heavily integrated into the + # build system. `external/mksh` and `system/core/toolbox` build, + # however, `bionic`, the most import module, does not. + # + # the error messages are of the following: + # internal error: panic in GenerateBuildActions for module "com.android.example.apex" variant "android_common_com.android.example.apex_image" + # fixing this requires either a comprehensive removal of APEX from the build + # or adding numerous APEX dependencies, which defeats the purpose of a + # minimal bionic build. + sync art + sync bionic + sync build/blueprint + sync build/make + sync build/soong + sync external/apache-xml + sync external/bouncycastle + sync external/clang + sync external/compiler-rt + sync external/conscrypt + sync external/elfutils + sync external/fmtlib + sync external/golang-protobuf + sync external/gwp_asan + sync external/icu + sync external/jemalloc + sync external/jemalloc_new + sync external/libcxx + sync external/libcxxabi + sync external/libunwind + sync external/libunwind_llvm + sync external/llvm + sync external/lzma + sync external/mksh + sync external/okhttp + sync external/scudo + sync external/starlark-go + sync external/zlib + sync libcore + sync prebuilts/build-tools + sync prebuilts/clang-tools + sync prebuilts/clang/host/linux-x86 + sync prebuilts/gcc/linux-x86/host/x86_64-linux-glibc2.17-4.8 + sync prebuilts/go/linux-x86 + sync prebuilts/sdk + sync prebuilts/vndk/v28 + sync prebuilts/vndk/v29 + sync prebuilts/vndk/v30 + sync system/core + sync system/libbase + sync system/linkerconfig + sync system/logging + sync system/sepolicy + sync system/tools/xsdc + sync tools/metalava + # these tools also seem to be required, since apex is now tightly + # coupled with the bionic build. unfortunately, we want to avoid + # building apex at all costs. + #sync system/apex + #sync system/tools/aidl + + case "${ARCH}" in + arm) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + ;; + arm64) + sync external/arm-optimized-routines + sync prebuilts/gcc/linux-x86/arm/arm-linux-androideabi-4.9 + sync prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9 + ;; + x86) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + x86_64) + sync prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9 + ;; + esac + + # avoid build tests + rm bionic/tests/Android.mk + rm bionic/tests/Android.bp + rm bionic/benchmarks/Android.bp + rm bionic/tests/libs/Android.bp + rm bionic/tests/headers/Android.bp + rm bionic/tests/headers/posix/Android.bp + + # make sure we don't build benchmarks or apex + rm -r bionic/apex + rm -r bionic/benchmarks/ + rm -r bionic/tests/ + rm -r system/linkerconfig/testmodules + + remove_tests +} + +remove_tests() { + install_packages python3-pip + + local version= + version=$(python3 -c 'import sys +major = sys.version_info.major +minor = sys.version_info.minor +print(f"{major}.{minor}")') + set +u + export PYTHONPATH="${PYTHON_TMPDIR}/lib/python${version}/site-packages/:${PYTHONPATH}" + set -u + mkdir -p "${PYTHON_TMPDIR}" + python3 -m pip install sly==0.4.0 --prefix "${PYTHON_TMPDIR}" + python3 -m pip install google-re2==1.0 --prefix "${PYTHON_TMPDIR}" + + python3 "${PYTHON_TMPDIR}/scripts/build-system.py" \ + --remove-tests \ + --verbose +} + +sync() { + python3 ./repo sync -c --no-clone-bundle "${1}" } main "${@}" diff --git a/docker/android.cmake b/docker/android.cmake new file mode 100644 index 000000000..9403b8c93 --- /dev/null +++ b/docker/android.cmake @@ -0,0 +1,25 @@ +# toolchain file for android targets, see #1110 + +set(CMAKE_SYSTEM_NAME "$ENV{CROSS_CMAKE_SYSTEM_NAME}") +set(CMAKE_SYSTEM_PROCESSOR "$ENV{CROSS_CMAKE_SYSTEM_PROCESSOR}") +set(CMAKE_ANDROID_STANDALONE_TOOLCHAIN /android-ndk) +set(CMAKE_ANDROID_API "$ENV{CROSS_ANDROID_SDK}") +if(DEFINED ENV{CROSS_TARGET_RUNNER}) + set(runner "$ENV{CROSS_TARGET_RUNNER}") + separate_arguments(runner) + set(CMAKE_CROSSCOMPILING_EMULATOR ${runner}) +endif() + +# these are cached so any build system that compiled outside of the rust +# build system, such as a third-party cmake build and install of a shared +# library, will still work. however, cmake-rs can override these values +if(DEFINED ENV{CROSS_CMAKE_OBJECT_FLAGS}) + set(CMAKE_C_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "C Compiler options") + set(CMAKE_CXX_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "C++ Compiler options") + set(CMAKE_ASM_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "ASM Compiler options") +endif() + +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY BOTH) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH) +set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE BOTH) diff --git a/docker/android/README.md b/docker/android/README.md new file mode 100644 index 000000000..f433c2ad7 --- /dev/null +++ b/docker/android/README.md @@ -0,0 +1,4 @@ +android +======= + +Utilities for working with the Android project, particularly, for modifying and working with the Android build system. This facilitates modifying both Soong and Make build files, to remove unittests to speed up builds and minimize the number of dependencies. diff --git a/docker/android/android/__init__.py b/docker/android/android/__init__.py new file mode 100644 index 000000000..4b7224d20 --- /dev/null +++ b/docker/android/android/__init__.py @@ -0,0 +1,15 @@ +import sys + +# we run this script once every build, and we'd rather +# have much smaller image sizes, so copying without +# any bytecode is a better idea. +sys.dont_write_bytecode = True + +__version__ = '0.0.0-dev.0' +__version_info__ = (0, 0, 0, 'dev.0') +__license__ = 'MIT OR Apache-2.0' + +__all__ = [ + "make", + "soong", +] diff --git a/docker/android/android/make.py b/docker/android/android/make.py new file mode 100644 index 000000000..428a51af6 --- /dev/null +++ b/docker/android/android/make.py @@ -0,0 +1,513 @@ +''' + make + ==== + + utilities to process makefiles. this parser is not sophisticated + nor correct, but it tries to avoid a few common pitfalls by + handling conditional blocks, and first separating all conditional + blocks into sections, and then parsing comment blocks within those + sections. + + validate conditional directives are: + - ifeq + - ifneq + - ifdef + - ifndef + - else + - endif + + makefiles are whitespace-sensitive, but not with leading whitespace + for conditional directives. for example, this is valid (replacing the + spaces with tabs): + + # --------------- + # Section 1. + # --------------- + ifneq ($(USE_A),) + # ----------- + # Section 2. + # ----------- + ifneq ($(USE_B),) + SOURCES=b.cc + else + SOURCES=a.cc + endif + else + SOURCES=c.cc + endif + + our goals are fairly different from a regular parser: we want to detect + and excise sections based on the comments, while ensuring that we do + not produce invalid output. other than unbalanced conditional directives, + we do not actually care about the actual contents. + + for this, we use a 3 step parsing approach: + 1. break up document into blocks separated by directives + - each block can be a regular or directive block + - directive blocks have a start and end directive as well as contents + - directives can be infinitely nested: the contents can also be a list + 2. break each text block based on comment sections + 3. group blocks within comment sections + + for example, in the above, we want the entire makefile to be inside the + section 1 comment block, so removing it would remove that whole tree. + similarly, the inner directive block should be inside the section 2 + comment block. we would therefore produce something like this: + + CommentBlock: Section 1 + Directive Block: + start=ifneq ($(USE_A),) + end=endif + children: + CommentBlock: Section 2 + Directive Block: + start=ifneq ($(USE_B),) + end=endif + children: + Block: `SOURCES=b.cc\nelse\nSOURCES=a.cc` + Block: `else\nSOURCES=c.cc` +''' + +import re2 as re + +from . import util + + +def loads(contents, *_, **__): + return Makefile.loads(contents) + + +def load(fp, *_, **__): + return Makefile.load(fp) + + +def dumps(makefile, *_, **__): + return makefile.dumps() + + +def dump(makefile, fp, *_, **__): + return makefile.dump(fp) + + +class Makefile(list): + @staticmethod + def loads(contents, *_, **__): + directives = _split_directives(iter(contents.splitlines()))[0] + blocks = directives.split_comments() + blocks.group_comments() + + return Makefile(blocks) + + @staticmethod + def load(fp, *_, **__): + return Makefile.loads(fp.read()) + + def dumps(self, *_, **__): + return str(self) + + def dump(self, fp, *_, **__): + fp.write(self.dumps() + '\n') + + def filter(self, op): + return _filter_list(self, op) + + def recurse(self, max_depth=-1, depth=0): + yield from _recurse_list(self, max_depth, depth) + + def __repr__(self): + return f'Makefile({str(self)})' + + def __str__(self): + return '\n'.join([str(i) for i in self]) + + +class Node: + def is_block(self): + return False + + def is_block_list(self): + return False + + def is_comment(self): + return False + + def is_directive(self): + return False + + def is_test(self): + return False + + def is_benchmark(self): + return False + + def is_dev(self): + return self.is_test() or self.is_benchmark() + + def has_block_list(self): + return False + + def filter(self, op): + raise NotImplementedError + + def recurse(self, max_depth=-1, depth=0): + raise NotImplementedError + + +class Block(str, Node): + @property + def child(self): + return str(self) + + def __repr__(self): + return f'Block({str(self)})' + + def __str__(self): + return super().__str__() + + def is_block(self): + return True + + def split_comments(self): + return _split_comments(str(self)) + + def group_comments(self): + pass + + def filter(self, op): + return op(self) + + +class BlockList(list, Node): + def __init__(self, *args, **kwds): + super().__init__(*args, **kwds) + assert all([isinstance(i, Node) for i in self]) + + @property + def child(self): + return self + + def __repr__(self): + return f'BlockList({str(self)})' + + def __str__(self): + return '\n'.join([str(i) for i in self]) + + def is_block_list(self): + return True + + def split_comments(self): + return BlockList(util.flatten([i.split_comments() for i in self])) + + def group_comments(self): + self[:] = _group_comments(self) + + def filter(self, op): + return _filter_list(self, op) + + def recurse(self, max_depth=-1, depth=0): + yield from _recurse_list(self, max_depth, depth) + + +class CommentBlock(Node): + # the child is either a Block or BlockList + def __init__(self, comment, title, child): + assert isinstance(child, Node) + + self.comment = comment + self.title = title + self.child = child + + def __eq__(self, other): + return (self.comment, self.title, self.child) == (other.comment, other.title, other.child) + + def __repr__(self): + return f'CommentBlock({str(self)})' + + def __str__(self): + return f'{self.comment}\n{str(self.child)}' + + def is_comment(self): + return True + + def is_test(self): + return self.title is not None and util.is_test(self.title) + + def is_benchmark(self): + return self.title is not None and util.is_benchmark(self.title) + + def has_block_list(self): + return self.child.is_block_list() + + def split_comments(self): + raise NotImplementedError('cannot split comments in split comment block') + + def group_comments(self): + raise NotImplementedError('grouping comments should be done outside a comment block') + + def flatten_single(self): + if isinstance(self.child, list) and len(self.child) == 1: + self.child = self.child[0] + + def filter(self, op): + return op(self) and self.child.filter(op) + + +class DirectiveBlock(Node): + # the child is either a Block or BlockList + def __init__(self, start, end, child): + assert isinstance(child, Node) + if isinstance(child, list) and len(child) == 1: + child = child[0] + + self.start = start + self.end = end + self.child = child + + def __eq__(self, other): + return (self.start, self.end, self.child) == (other.start, other.end, other.child) + + def __repr__(self): + return f'DirectiveBlock({str(self)})' + + def __str__(self): + result = f'{self.start}\n{str(self.child)}' + if self.end is not None: + result += f'\n{self.end}' + return result + + def is_directive(self): + return True + + def has_block_list(self): + return self.child.is_block_list() + + def split_comments(self): + child = self.child.split_comments() + # every caller expects a list, so we return a single-element list + return BlockList([DirectiveBlock(self.start, self.end, child)]) + + def group_comments(self): + self.child.group_comments() + self.flatten_single() + + def flatten_single(self): + if isinstance(self.child, list) and len(self.child) == 1: + self.child = self.child[0] + + def filter(self, op): + return op(self) and self.child.filter(op) + + +# split on comment sections, for example the below will split on the +# benchmarks section. +# +# LOCAL_PATH := $(call my-dir) +# +# # ----------------------------------------------------------------------------- +# # Benchmarks. +# # ----------------------------------------------------------------------------- +# +# test_tags := tests +def _split_comments(contents): + def new_comment(match, nxt=None): + comment = match.group(1) + groups = match.groups()[1:] + lines = [i for i in groups if i is not None] + title = '\n'.join([re.sub(r'[ \t]*#[ \t]*', '', i) for i in lines]) + if nxt is None: + data = contents[match.end():] + else: + data = contents[match.end():nxt.start()] + if nxt is not None: + assert data.endswith('\n') + data = data[:-1] + return CommentBlock(comment, title, Block(data)) + + # if we just have 1 or 2 characters, can falsely match. + # headers can be `# -----`, `# ======`, or `########`. + # the title can be prefixed, suffixed, or sandwiched by the header. + def title_pattern(): + line = fr'{sp}*#{sp}*{comment}' + return fr'(?:(?:{line}{nl})*{line})' + + def sandwich_pattern(sep): + # matches header-title-header + title = title_pattern() + return fr'{sp}*{sep}{nl}({title}){nl}{sp}*{sep}' + + def suffix_pattern(sep): + # matches title-header + title = title_pattern() + return fr'({title}){nl}{sp}*{sep}' + + def prefix_pattern(sep): + # matches header-title, needs to be last due to greedy regex + title = title_pattern() + return fr'{sp}*{sep}{nl}({title})' + + def sep_pattern(sep): + sandwich = sandwich_pattern(sep) + suffix = suffix_pattern(sep) + prefix = prefix_pattern(sep) + return fr'(?:{sandwich})|(?:{prefix})|(?:{suffix})' + + def create_pattern(*seps): + groups = [] + for sep in seps: + groups.append(fr'(?:{sep_pattern(sep)})') + return fr'(?m)^({"|".join(groups)}){nl}?' + + sep1 = r'#\s+={5,}' + sep2 = r'#\s+-{5,}' + sep3 = r'#{6,}' + sp = r'[ \t]' + nl = r'(?:\r\n|\r|\n)' + # can have empty headers, such as `#####\n#` + comment = r'[^\x00-\x08\x0A-\x1F]*' + pattern = create_pattern(sep1, sep2, sep3) + + blocks = BlockList() + if not contents: + return blocks + + matches = list(re.finditer(pattern, contents)) + if len(matches) == 0: + blocks.append(Block(contents)) + else: + first = matches[0] + last = matches[-1] + if first.start() != 0: + assert contents[first.start() - 1] == '\n' + blocks.append(Block(contents[:first.start() - 1])) + for (match, nxt) in util.windows(matches, 2): + blocks.append(new_comment(match, nxt)) + blocks.append(new_comment(last)) + + return blocks + + +# lines is an iterable over each line in the content. splits like something +# above into a start token of `ifneq ($(ENV2),)`, and end of `endif`, +# and the internal contents as a `Block`. +# +# ifneq ($(ENV2),) +# benchmark_src_files += bench1.cc +# else +# benchmark_src_files += bench2.cc +# endif +def _split_directives(lines, in_scope=False): + def add_current(blocks, current): + if current: + blocks.append(Block('\n'.join(current))) + + # we ignore else since removing it won't actually affect the code + start_directives = ('ifeq', 'ifneq', 'ifdef', 'ifndef') + end_directives = ('endif',) + + blocks = BlockList() + current = [] + for line in lines: + trimmed = line.lstrip() + if trimmed.startswith(start_directives): + start = line + add_current(blocks, current) + child, end = _split_directives(lines, True) + directive = DirectiveBlock(start, end, child) + directive.flatten_single() + blocks.append(directive) + current = [] + elif in_scope and trimmed.startswith(end_directives): + end = line + add_current(blocks, current) + return blocks, end + else: + current.append(line) + + add_current(blocks, current) + + return blocks, None + + +# this groups directives and comments so any directives within a +# comment block are properly grouped. say i have the following: +# +# LOCAL_PATH := $(call my-dir) +# +# # ----------------------------------------------------------------------------- +# # Section 1. +# # ----------------------------------------------------------------------------- +# LOCAL_SRC_FILES := src.c +# ifneq ($(ENV2),) +# benchmark_src_files += bench1.cc +# else +# benchmark_src_files += bench2.cc +# endif +# +# # ----------------------------------------------------------------------------- +# # Section 2. +# # ----------------------------------------------------------------------------- +# LOCAL_CFLAGS := $(test_c_flags) +# +# normally, we'd have 5 sections: block, comment, directive, block, comment +# however, we want to group it in block, comment, comment, where the directive +# and subsequent block are in the comment. +def _group_comments(blocks): + def add_current(result, current): + if isinstance(current.child, list) and len(current.child) == 1: + current.child = current.child[0] + result.append(current) + + def new_comment(block): + current = CommentBlock(block.comment, block.title, BlockList()) + if block.child: + current.child.append(block.child) + return current + + result = BlockList() + current = BlockList() + for block in blocks: + # any comments cannot have been grouped already, so we assume str values + assert not block.is_comment() or isinstance(block.child, str) + assert not block.is_block_list() + if not block.is_comment(): + block.group_comments() + + if current.is_comment() and block.is_comment(): + # new comment replaces the old one + current.flatten_single() + result.append(current) + current = new_comment(block) + elif block.is_comment(): + # first comment block seen in the file + result += current + current = new_comment(block) + elif current.is_comment(): + # regular block after a comment block + current.child.append(block) + else: + # regular block before any comment blocks + current.append(block) + + if current.is_comment(): + current.flatten_single() + result.append(current) + else: + result += current + + return result + + +# retain all items matching the condition in a list +def _filter_list(lst, op): + # use slice assignment to ensure this happens in-place + lst[:] = [i for i in lst if i.filter(op)] + return lst + + +# yield iteratively all child blocks +def _recurse_list(lst, max_depth=-1, depth=0): + if depth != max_depth: + for node in lst: + yield node + if node.has_block_list(): + yield from node.child.recurse(max_depth, depth + 1) diff --git a/docker/android/android/soong.py b/docker/android/android/soong.py new file mode 100644 index 000000000..0788209a9 --- /dev/null +++ b/docker/android/android/soong.py @@ -0,0 +1,654 @@ +''' + soong + ===== + + utilities to process soong blueprint files. these are a go-like, + json-like data file format similar. they support nested maps, lists, + bools, strings, and use of variables. for example: + + array = ["..."] + cc_defaults { + name: "target", + options: array, + flags: ["..."], + } + cc_test { + name: "test", + defaults: ["target"], + srcs: ["test.cc"], + nested: { + array: { + option: false, + }, + }, + } + + the specification can be found below: + https://source.android.com/docs/core/tests/development/blueprints + https://android.googlesource.com/platform/build/soong/+/refs/heads/master/README.md + + they also support single-line C++-style and multiline C-style comments. + the valid types are: + - bool (`true`, `false`) + - int + - string + - list (of strings) + - map + + both lists and maps support optional trailing commas. any value type + can be present in a map, while only strings are allowed in lists. + integers, strings, arrays and maps also also support the `+` operator, + where `+` sums up integers. for strings and arrays, it appends the new + data. for maps, it produces the union of both keys, and for keys present + in both, it appends the value on the right-operand to the value in the + left one. + + variable assignment produces immutable types, except for the `+=` operator. + `+=` does the described operation above in-place. + + this parser doesn't need to be exactly correct: it does not need to reject + subtley invalid input. for example `name = { }` may or may not be correct, + but it's fine to accept it as long as we output it identically. this is + supposed to handle all correct input and outputs it as correct output: + it doesn't need to validate type correctness. + + this uses LALR parsing since it makes the grammar very easy to define and + the parsing simple. since the build step and repository synchronization + is much slower, the performance here is practically irrelevant. +''' + +import json +import sys + +import sly + +from . import util + +# dictionaries got insertion order in 3.6, guaranteed in 3.7 +assert sys.version_info >= (3, 6) + +# base character defs +_H = r'[0-9a-f]' +_NL = r'\n|\r\n|\r|\f' +_UNICODE = fr'\\{_H}{1,6}(\r\n|[ \t\r\n\f])?' +_ESCAPE = r'{_UNICODE}|\\[^\r\n\f0-9a-f]' +_SINGLELINE_COMMENT = r'\/\/.*' +# can't use reflags without setting them for all, so do manual dotall +_MULTILINE_COMMENT = r'\/\*[\u0000-\U0010FFFF]*?\*\/' +_COMMENT = fr'(?:{_SINGLELINE_COMMENT})|(?:{_MULTILINE_COMMENT})' + + +def loads(contents, *_, **__): + return Ast.loads(contents) + + +def load(fp, *_, **__): + return Ast.load(fp) + + +def dumps(soong, pretty=True, indent=4, *_, **__): + return soong.dumps(pretty, indent) + + +def dump(soong, fp, pretty=True, indent=4, *_, **__): + return soong.dump(fp, pretty, indent) + + +class Lexer(sly.Lexer): + tokens = { + BOOL, + INTEGER, + IDENT, + STRING, + LBRACKET, + RBRACKET, + LBRACE, + RBRACE, + COLON, + COMMA, + EQUALS, + PLUS, + } + ignore = ' \t' + ignore_comment = _COMMENT + + # Tokens + # this uses a string regex based on the CSS2.1 grammar + STRING = fr'"([^\n\r\f\\"]|\\{_NL}|{_ESCAPE})*"' + INTEGER = r'\d+' + BOOL = '(?:true)|(?:false)' + IDENT = r'[a-zA-Z_][a-zA-Z0-9_]*' + LBRACKET = r'\[' + RBRACKET = r'\]' + LBRACE = r'\{' + RBRACE = r'\}' + COLON = r':' + COMMA = r',' + EQUALS = r'=' + PLUS = r'\+' + + @_(r'\n+') + def newline(self, token): + self.lineno += token.value.count('\n') + + def error(self, token): + raise ValueError(f'Illegal character \'{token.value[0]}\'') + + +class Parser(sly.Parser): + tokens = Lexer.tokens + + precedence = ( + ('left', PLUS), + ) + + @_('rules') + def ast(self, prod): + return Ast(prod.rules) + + @_('empty') + def ast(self, prod): + return Ast() + + @_('rules rule') + def rules(self, prod): + return prod.rules + [prod.rule] + + @_('rule') + def rules(self, prod): + return [prod.rule] + + @_('assignment', 'binary_operator_assignment', 'scope') + def rule(self, prod): + return prod[0] + + @_('ident EQUALS expr') + def assignment(self, prod): + return Assignment(prod.ident, prod.expr) + + @_('ident PLUS EQUALS expr') + def binary_operator_assignment(self, prod): + return BinaryOperatorAssignment( + prod.ident, + f'{prod[1]}{prod[2]}', + prod.expr, + ) + + @_('expr PLUS expr') + def binary_operator(self, prod): + return BinaryOperator(prod[0], prod[1], prod[2]) + + @_('ident map') + def scope(self, prod): + return Scope(prod.ident, prod.map) + + @_('LBRACE pairs RBRACE', 'LBRACE pairs COMMA RBRACE') + def map(self, prod): + return Map(prod.pairs) + + @_('LBRACE RBRACE') + def map(self, prod): + return Map() + + @_('pairs COMMA pair') + def pairs(self, prod): + return prod.pairs + [prod.pair] + + @_('pair') + def pairs(self, prod): + return [prod.pair] + + @_('ident COLON expr', 'ident EQUALS expr') + def pair(self, prod): + return (prod.ident, MapValue(prod[1], prod.expr)) + + @_('ident', 'binary_operator', 'map', 'list', 'string', 'integer', 'bool') + def expr(self, prod): + return prod[0] + + @_('LBRACKET sequence RBRACKET', 'LBRACKET sequence COMMA RBRACKET') + def list(self, prod): + return List(prod.sequence) + + @_('LBRACKET RBRACKET') + def list(self, prod): + return List() + + @_('sequence COMMA list_item') + def sequence(self, prod): + return prod.sequence + [prod.list_item] + + @_('list_item') + def sequence(self, prod): + return [prod.list_item] + + @_('list_item PLUS list_item') + def list_item(self, prod): + return BinaryOperator(prod[0], '+', prod[2]) + + @_('string', 'ident', 'map') + def list_item(self, prod): + return prod[0] + + @_('IDENT') + def ident(self, prod): + return Ident(prod.IDENT) + + @_('STRING') + def string(self, prod): + return String(prod.STRING) + + @_('INTEGER') + def integer(self, prod): + return Integer(prod.INTEGER) + + @_('BOOL') + def bool(self, prod): + return Bool(json.loads(prod.BOOL)) + + # needed in case no tokens are produced + @_('') + def empty(self, p): + pass + + def error(self, token): + raise ValueError(f'Illegal token {repr(token)}') + + +class Node: + def is_assignment(self): + return False + + def is_binary_operator_assignment(self): + return False + + def is_binary_operator(self): + return False + + def is_scope(self): + return False + + def is_map(self): + return False + + def is_list(self): + return False + + def is_map_value(self): + return False + + def is_ident(self): + return False + + def is_string(self): + return False + + def is_integer(self): + return False + + def is_bool(self): + return False + + +class Ast(list, Node): + def __init__(self, values=None): + if values is None: + values = [] + valid_nodes = (Assignment, BinaryOperatorAssignment, Scope) + assert all(isinstance(i, valid_nodes) for i in values) + super().__init__(values) + + def __repr__(self): + return f'Ast({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + assert depth == 0 + return '\n'.join([i.to_str(pretty, indent, depth) for i in self]) + + @staticmethod + def loads(contents, *_, **__): + lexer = Lexer() + tokens = lexer.tokenize(contents) + parser = Parser() + return parser.parse(tokens) + + @staticmethod + def load(fp, *_, **__): + return Ast.loads(fp.read()) + + def dumps(self, pretty=True, indent=4, *_, **__): + return self.to_str(pretty, indent) + + def dump(self, fp, pretty=True, indent=4, *_, **__): + # always write a trailing newline + fp.write(self.dumps(pretty, indent) + '\n') + + def filter(self, op): + # use slice assignment to ensure this happens in-place + self[:] = [i for i in self if op(i)] + + +class Assignment(Node): + def __init__(self, name, expr): + self.name = name + self.expr = expr + + def __repr__(self): + return f'Assignment({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + return f'{str(self.name)} = {self.expr.to_str(pretty, indent, depth)}' + + def is_assignment(self): + return True + + def __eq__(self, other): + return (self.name, self.expr) == (other.name, other.expr) + + +class BinaryOperatorAssignment(Node): + def __init__(self, name, op, expr): + self.name = name + self.op = op + self.expr = expr + + def __repr__(self): + return f'BinaryOperatorAssignment({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + expr = self.expr.to_str(pretty, indent, depth) + return f'{str(self.name)} {self.op} {expr}' + + def is_binary_operator_assignment(self): + return True + + def __eq__(self, other): + return (self.name, self.op, self.expr) == (other.name, other.op, other.expr) + + +class BinaryOperator(Node): + def __init__(self, lhs, op, rhs): + self.lhs = lhs + self.op = op + self.rhs = rhs + + def __repr__(self): + return f'BinaryOperator({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + lhs = self.lhs.to_str(pretty, indent, depth) + rhs = self.rhs.to_str(pretty, indent, depth) + return f'{lhs} {self.op} {rhs}' + + def is_binary_operator(self): + return True + + def str_op(self, cmp): + return ( + (self.lhs.is_string() and self.lhs.str_op(cmp)) + or (self.rhs.is_string() and self.rhs.str_op(cmp)) + ) + + def __eq__(self, other): + return (self.lhs, self.op, self.rhs) == (other.lhs, other.op, other.rhs) + + +class Scope(Node): + def __init__(self, name, map): + self.name = name + self.map = map + + def __repr__(self): + return f'Scope({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + return f'{str(self.name)} {self.map.to_str(pretty, indent, depth)}' + + def is_scope(self): + return True + + def __eq__(self, other): + return (self.name, self.map) == (other.name, other.map) + + def is_art_check(self): + return 'art-check' in self.name.lower() or self.map.is_art_check() + + def is_test(self): + return util.is_test(self.name) or self.map.is_test() + + def is_benchmark(self): + return util.is_benchmark(self.name) or self.map.is_benchmark() + + def is_dev(self): + return self.is_art_check() or self.is_test() or self.is_benchmark() + + +class Map(dict, Node): + def __repr__(self): + return f'Map({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + fmt = lambda x: x.to_str(pretty, indent, depth + 1) + result = '{' + pairs = [f'{fmt(k)}{fmt(v)}' for k, v in self.items()] + if len(self) == 0: + result += '}' + elif pretty: + result += '\n' + for pair in pairs: + result += _indent(indent, depth + 1) + f'{pair},\n' + result += _indent(indent, depth) + '}' + else: + result += ', '.join(pairs) + '}' + + return result + + def is_map(self): + return True + + def is_art_check(self): + name = self.get('name') + if name is None: + return False + return 'art-check' in name.value.lower() + + def is_test(self): + name = self.get('name') + if name is None: + return False + # cannot remove `py2-c-module-_ctypes_test` type tests, + # since they're needed to be linked in the final binary. + lower = name.value.lower() + return util.is_test(lower) and 'py2-c-module' not in lower + + def is_benchmark(self): + name = self.get('name') + if name is None: + return False + return util.is_benchmark(name.value) + + def is_dev(self): + return self.is_test() or self.is_benchmark() + + def filter(self, op): + filtered = {k: v for k, v in self.items() if op(k, v)} + self.clear() + self.update(filtered) + + def recurse(self, max_depth=-1, depth=0): + # recursively find all key/value pairs the current and any submaps + if depth != max_depth: + for key, value in self.items(): + yield (key, value, depth + 1, self) + if value.value.is_map(): + yield from value.value.recurse(max_depth, depth + 1) + + +class List(list, Node): + def __repr__(self): + return f'List({str(self)})' + + def __str__(self): + return self.to_str(pretty=False) + + def to_str(self, pretty=True, indent=4, depth=0): + def fmt(x): + if x.is_map(): + return x.to_str(pretty, indent, depth) + return x.to_str(pretty, indent, depth + 1) + result = '[' + if len(self) <= 1 or not pretty: + result += ', '.join([fmt(i) for i in self]) + ']' + else: + result += '\n' + for element in self: + result += _indent(indent, depth + 1) + f'{fmt(element)},\n' + result += _indent(indent, depth) + ']' + + return result + + def is_list(self): + return True + + def filter(self, op): + # use slice assignment to ensure this happens in-place + self[:] = [i for i in self if op(i)] + + +class MapValue(Node): + def __init__(self, delimiter, value): + # map key/value separators can be `:` or `=`. + assert delimiter in (':', '=') + self.delimiter = delimiter + self.value = value + + def __repr__(self): + return f'MapValue({str(self)})' + + def __str__(self): + return self.to_str(False) + + def __eq__(self, other): + # delimiter doesn't matter for equality comparison + if isinstance(other, MapValue): + return self.value == other.value + return self.value == other + + def __len__(self): + return len(self.value) + + def to_str(self, pretty=True, indent=4, depth=0): + value = self.value.to_str(pretty, indent, depth) + if self.delimiter == '=': + return f' = {value}' + return f': {value}' + + def str_op(self, cmp): + return self.value.str_op(cmp) + + def is_map_value(self): + return True + + def filter(self, op): + self.value.filter(op) + + +class Ident(str, Node): + def __repr__(self): + return f'Ident({str(self)})' + + def __str__(self): + return super().__str__() + + def to_str(self, *_, **__): + return str(self) + + def is_ident(self): + return True + + +class String(str, Node): + def __repr__(self): + return f'String({self.to_str()})' + + def to_str(self, *_, **__): + return f'{super().__str__()}' + + def str_op(self, cmp): + return cmp(self) + + def __str__(self): + # `"target"` should be shown as `'target'`, not `'"target"'` + return super().__str__()[1:-1] + + def __eq__(self, other): + if type(other) is String: + return str(self) == str(other) + # we want to be compare equal to the string's value + return str(self) == other + + def __ne__(self, other): + # need to override `__ne__` which normally uses a pyslot + return not self.__eq__(other) + + def is_string(self): + return True + + +class Integer(int, Node): + def __repr__(self): + return f'Integer({str(self)})' + + def __str__(self): + return str(int(self)) + + def to_str(self, *_, **__): + return str(self) + + def is_integer(self): + return True + + +class Bool(Node): + def __init__(self, value=False): + self.value = value + + def __bool__(self): + return self.value + + def __repr__(self): + return f'Bool({json.dumps(self.value)})' + + def __str__(self): + return json.dumps(self.value) + + def to_str(self, *_, **__): + return str(self) + + def is_bool(self): + return True + + def __eq__(self, other): + return self.value == other.value + + +def _indent(indent=4, depth=0, char=' '): + return char * indent * depth diff --git a/docker/android/android/util.py b/docker/android/android/util.py new file mode 100644 index 000000000..143810c17 --- /dev/null +++ b/docker/android/android/util.py @@ -0,0 +1,28 @@ +import re2 as re + + +def windows(sequence, count): + for i in range(len(sequence) - count + 1): + yield sequence[i:i + count] + + +def flatten(lst): + return [i for sublist in lst for i in sublist] + + +def _is_match(pattern, string): + return re.search(pattern, string) is not None + + +def is_test(string): + # need to consider that works like `latest` exist + # also need to consider `non-test` for `fmtlib`. + if 'non-test' in string.lower(): + return False + pattern = r'(?i)(?:^|[^A-Za-z0-9]|g)test' + return _is_match(pattern, string) + + +def is_benchmark(string): + pattern = r'(?i)(?:^|[^A-Za-z0-9])benchmark' + return _is_match(pattern, string) diff --git a/docker/android/pyproject.toml b/docker/android/pyproject.toml new file mode 100644 index 000000000..a60ed4bc5 --- /dev/null +++ b/docker/android/pyproject.toml @@ -0,0 +1,12 @@ +[project] +name = "android" +version = "0.0.0-dev.0" +license = { text = "MIT OR Apache-2.0" } +dependencies = ["sly==0.4", "google-re2==1.0"] + +[build-system] +requires = [ + "setuptools >= 35.0.2", + "setuptools_scm >= 2.0.0, <3" +] +build-backend = "setuptools.build_meta" diff --git a/docker/android/scripts/build-system.py b/docker/android/scripts/build-system.py new file mode 100644 index 000000000..1a79ab311 --- /dev/null +++ b/docker/android/scripts/build-system.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python +''' + Remove most unittests from Android soong blueprint + files, most of which are identified via a `cc_test*` + scope identifier, as well as some additional `subdirs` + identifiers and Makefile specifiers. + + This also allows you to backup and restore these scripts. + The build files are automatically backed up by default. +''' + +import argparse +import glob +import os +import shutil +import subprocess +import sys + +SCRIPTS_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_DIR = os.path.dirname(SCRIPTS_DIR) +sys.path.insert(0, PROJECT_DIR) + +import android +import android.make +import android.soong + + +def print_verbose(message, verbose): + if verbose: + print(message) + + +def backup(src, args, *_): + dst = src + '.bak' + print_verbose(f'creating backup of file "{src}" at "{dst}"', args.verbose) + shutil.copy2(src, dst) + + +def restore(dst, args, *_): + src = dst + '.bak' + if os.path.exists(src): + print_verbose(f'restoring from backup "{src}" to "{dst}"', args.verbose) + shutil.copy2(src, dst) + + +def filter_map(map, remove): + keys = list(map) + for key in keys: + if not item_op(map[key].value, remove): + del map[key] + return True + + +def filter_list(lst, remove): + lst.filter(lambda x: item_op(x, remove)) + return True + + +def item_op(item, remove): + if item.is_map(): + return filter_map(item, remove) + elif item.is_list(): + return filter_list(item, remove) + elif item.is_string() or item.is_binary_operator(): + return item.str_op(lambda y: not any(i in y.lower() for i in remove)) + raise TypeError(f'got unexpected type of {type(item)}') + + +def remove_soong_tests(path, args, *_): + print_verbose(f'removing soong tests from "{path}"', args.verbose) + with open(path) as file: + ast = android.soong.load(file) + # remove the test or benchmark scopes, IE, this with `cc_test` + # or those with `{name: "test"}`, etc. + ast.filter(lambda x: not (x.is_scope() and x.is_dev())) + # need to remove test and benchmark subdirs + test_names = ('test', 'benchmark') + subdirs = [i for i in ast if i.name == 'subdirs'] + for sub in subdirs: + assert type(sub.expr) is android.soong.List + filter_list(sub.expr, test_names) + # remove gtest dependencies from regular targets. + for node in ast: + map = None + if not node.is_scope() and not node.expr.is_map(): + continue + if node.is_scope(): + map = node.map + else: + map = node.expr + test_names = ('libgtest', 'test-proto', 'starlarktest') + for key, value, *_ in map.recurse(): + if value.value.is_list(): + if key == 'testSrcs': + value.value.clear() + else: + filter_list(value, test_names) + + with open(path, 'w') as file: + ast.dump(file) + + +def remove_makefile_tests(path, args, *_): + print_verbose(f'removing makefile tests from "{path}"', args.verbose) + with open(path) as file: + makefile = android.make.load(file) + makefile.filter(lambda x: not x.is_dev()) + with open(path, 'w') as file: + makefile.dump(file) + + +def remove_tests(path, args, processor): + if os.path.exists(path + '.bak'): + restore(path, args) + elif not args.disable_backup: + backup(path, args) + processor(path, args) + + +def stash(root): + git_glob = f'{root}/**/.git' + for path in glob.iglob(git_glob, recursive=True): + os.chdir(os.path.dirname(path)) + subprocess.check_call(['git', 'stash']) + + +def main(): + parser = argparse.ArgumentParser() + action_group = parser.add_mutually_exclusive_group(required=True) + action_group.add_argument( + '--backup', + help='backup build files', + action='store_true', + ) + action_group.add_argument( + '--restore', + help='restore build files', + action='store_true', + ) + action_group.add_argument( + '--remove-tests', + help='remove most tests from the build system.', + action='store_true', + ) + action_group.add_argument( + '--stash', + help='stash all local changes.', + action='store_true', + ) + parser.add_argument( + '--disable-backup', + help='disable automatic backup of build files during processing.', + action='store_false', + ) + flags_group = parser.add_mutually_exclusive_group() + flags_group.add_argument( + '--soong-only', + help='only process soong build files.', + action='store_true', + ) + flags_group.add_argument( + '--makefile-only', + help='only process makefiles.', + action='store_true', + ) + parser.add_argument( + '-V', + '--version', + action='version', + version=android.__version__ + ) + parser.add_argument( + '-v', + '--verbose', + help='display verbose diagnostic info.', + action='store_true', + ) + args = parser.parse_args() + if args.backup: + action = backup + elif args.restore: + action = restore + elif args.remove_tests: + action = remove_tests + elif args.stash: + action = stash + + # root_dir is only available 3.10+ + root = os.environ.get('ANDROID_ROOT') + if root is None: + root = os.getcwd() + if args.stash: + return stash(root) + + if not args.makefile_only: + soong_glob = f'{root}/**/Android.bp' + for path in glob.iglob(soong_glob, recursive=True): + action(path, args, remove_soong_tests) + + if not args.soong_only: + make_glob = f'{root}/**/Android.mk' + for path in glob.iglob(make_glob, recursive=True): + action(path, args, remove_makefile_tests) + + +if __name__ == '__main__': + main() diff --git a/docker/android/tests/Addition.bp b/docker/android/tests/Addition.bp new file mode 100644 index 000000000..03c2fb001 --- /dev/null +++ b/docker/android/tests/Addition.bp @@ -0,0 +1,62 @@ +// special file testing `+` and `+=` operators +// this is assignment + add assignment +list = ["value1"] +list += ["value2"] +number = 1 +number += 2 +string = "string" +string += "_suffix" +scope { + name: "target", +} +scope += { + name: "_suffix", + srcs: [ + // sequence items just have to evaluate to strings + "tree.cc" + string, + "lib.cc", + ], +} +// this is addition with lhs idents +lhs_sum = number + 4 +lhs_string = string + "_suffix" +lhs_list = list + ["value3"] +lhs_scope = scope + { + name: "_suffix", + cflags: [ + "-Wall", + ], +} +// this is addition with rhs idents +rhs_sum = 4 + number +rhs_string = "prefix_" + string +rhs_list = ["value0"] + list +rhs_scope = { + name: "_suffix", + cflags: [ + "-Wall", + ], +} + scope +// this is addition with both being non-idents +expr_sum = 4 + 1 +expr_string = "prefix_" + "suffix" +expr_list = ["value0"] + ["value1"] +expr_scope = {} + { + name: "target", +} +// test multiple binary ops +tri_sum = 4 + 1 + 2 +tri_string = "prefix_" + "middle" + "_suffix" +tri_list = ["value0"] + ["value1"] + ["value2"] +tri_scope = {} + { + name: "target", +} + {} +// test sequence lhs and rhs strings +home = "dir/" +test = "test.c" + +files = [ + home + "file.c", + "test/" + test, + home + test, +] diff --git a/docker/android/tests/Android.bp b/docker/android/tests/Android.bp new file mode 100644 index 000000000..c0e5cf6ea --- /dev/null +++ b/docker/android/tests/Android.bp @@ -0,0 +1,80 @@ +// sample heading comment +sample_array = [ + "value1", + "value2", +] +/** + sample + multiline + comment + */ +cc_defaults { + name: "target", + cflags: [ + "-Wall", + "-fstrict-aliasing", + ], + option: true, + tidy_checks: sample_array, + tidy_checks_as_errors: sample_array, + array: [ + "-short", + "--root='/path/to/dir'", + ], +} +cc_library_static { + name: "static_lib", + srcs: [ + "tree.cc", + "lib.cc", + ], + include_dirs: ["bionic/libc"], + export_include_dirs: ["."], +} +cc_library { + name: "lib", + srcs: [ + "tree.cc", + "lib.cc", + ], + include_dirs: ["bionic/libc"], + export_include_dirs: ["."], +} +cc_test { + name: "test", + defaults: ["target"], + srcs: ["test.cc"], + nested: { + array: { + option: false, + }, + }, +} +cc_test_host { + name: "host_test", + include_dirs: ["path/to/lib"], + compile_multilib: "64", + static_libs: [ + "libm", + "libz", + ], + host_ldlibs: [ + "-ldl", + "-lzstd", + "-l" + "z", + ], + shared_libs: [], + cflags = [ + "-Wall", + "-fstrict-aliasing", + ], +} +cc_defaults { + name: "custom", + shared_libs: ["libcustom"], + whole_static_libs: [ + "libz", + "libgtest_main", + ], + host_ldlibs: ["-lgtest"], +} diff --git a/docker/android/tests/Android.mk b/docker/android/tests/Android.mk new file mode 100644 index 000000000..3a87ec550 --- /dev/null +++ b/docker/android/tests/Android.mk @@ -0,0 +1,101 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_SRC_FILES := config.c +LOCAL_MODULE := config +LOCAL_SHARED_LIBRARIES := libcutils +LOCAL_CFLAGS := -Werror + +include $(BUILD_HOST_EXECUTABLE) + +LOCAL_PATH := $(call my-dir) + +# ----------------------------------------------------------------------------- +# Benchmarks. +# ----------------------------------------------------------------------------- + +test_tags := tests + +benchmark_c_flags := \ + -Wall -Wextra \ + -Werror \ + -fno-builtin \ + +benchmark_src_files := \ + benchmark_main.cc \ + bench.cc + +# Build benchmarks. +include $(CLEAR_VARS) +LOCAL_MODULE := benchmarks +LOCAL_MODULE_TAGS := tests +LOCAL_CFLAGS += $(benchmark_c_flags) +LOCAL_SHARED_LIBRARIES += libm libdl +LOCAL_SRC_FILES := $(benchmark_src_files) + +# ----------------------------------------------------------------------------- +# Unit tests. +# ----------------------------------------------------------------------------- + +test_c_flags := \ + -g \ + -Wall \ + -Werror + +################################## +# test executable +LOCAL_MODULE := module +LOCAL_SRC_FILES := src.c +LOCAL_SHARED_LIBRARIES := libcutils +LOCAL_CFLAGS := $(test_c_flags) +LOCAL_MODULE_RELATIVE_PATH := config-tests + +# Unit tests. +# ========================================================= + +include $(CLEAR_VARS) +LOCAL_MODULE := init_tests +LOCAL_SRC_FILES := \ + init_parser_test.cc \ + property_service_test.cc \ + service_test.cc \ + util_test.cc \ + +################################## +# test executable +LOCAL_MODULE := module +LOCAL_SRC_FILES := src.c +LOCAL_SHARED_LIBRARIES := libcutils +LOCAL_CFLAGS := $(test_c_flags) +LOCAL_MODULE_RELATIVE_PATH := config-tests +LOCAL_SHARED_LIBRARIES += \ + libcutils \ + libbase \ + +LOCAL_STATIC_LIBRARIES := libinit +LOCAL_SANITIZE := integer +LOCAL_CLANG := true +LOCAL_CPPFLAGS := -Wall -Wextra -Werror +include $(BUILD_NATIVE_TEST) + +# Other section. +# ========================================================= +include $(call all-makefiles-under,$(LOCAL_PATH)) + +# ============================================================================= +# Unit tests. +# ============================================================================= + +test_c_flags := \ + -g \ + -Wall \ + -Werror + +################################## +# test executable +LOCAL_MODULE := mod2 +LOCAL_SRC_FILES := mod.c +LOCAL_SHARED_LIBRARIES := libcutils +LOCAL_CFLAGS := $(test_c_flags) +LOCAL_MODULE_RELATIVE_PATH := mod2-tests diff --git a/docker/android/tests/Comments.mk b/docker/android/tests/Comments.mk new file mode 100644 index 000000000..e89ae9c22 --- /dev/null +++ b/docker/android/tests/Comments.mk @@ -0,0 +1,5 @@ +# 1) sample grouping: +# - text + suffix +# - some more text (the format) +# - API and policy info +# - more API + policy info diff --git a/docker/android/tests/Empty.bp b/docker/android/tests/Empty.bp new file mode 100644 index 000000000..30e5411e8 --- /dev/null +++ b/docker/android/tests/Empty.bp @@ -0,0 +1 @@ +// this file only has comments diff --git a/docker/android/tests/Empty.mk b/docker/android/tests/Empty.mk new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/docker/android/tests/Empty.mk @@ -0,0 +1 @@ + diff --git a/docker/android/tests/FakeTitle.mk b/docker/android/tests/FakeTitle.mk new file mode 100644 index 000000000..40b366221 --- /dev/null +++ b/docker/android/tests/FakeTitle.mk @@ -0,0 +1,3 @@ +######################################################################## +# +LOCAL_PATH := $(call my-dir) diff --git a/docker/android/tests/Grouped.mk b/docker/android/tests/Grouped.mk new file mode 100644 index 000000000..d50eebe98 --- /dev/null +++ b/docker/android/tests/Grouped.mk @@ -0,0 +1,22 @@ +LOCAL_PATH := $(call my-dir) +# ----------------------------------------------------------------------------- +# Section 1. +# ----------------------------------------------------------------------------- +LOCAL_SRC_FILES := src.c +ifneq ($(ENV1),) + # ----------------------------------------------------------------------------- + # Section 2. + # ----------------------------------------------------------------------------- + ifneq ($(ENV2),) + benchmark_src_files += bench1.cc + else + benchmark_src_files += bench2.cc + endif +else + benchmark_src_files += bench3.cc +endif + +# ----------------------------------------------------------------------------- +# Section 3. +# ----------------------------------------------------------------------------- +LOCAL_CFLAGS := $(test_c_flags) diff --git a/docker/android/tests/ListMap.bp b/docker/android/tests/ListMap.bp new file mode 100644 index 000000000..bf8449e2f --- /dev/null +++ b/docker/android/tests/ListMap.bp @@ -0,0 +1,9 @@ +// this contains a list of maps +scope { + key: [ + { + name: "art", + deps: ["dependency"], + }, + ], +} diff --git a/docker/android/tests/Multiline.mk b/docker/android/tests/Multiline.mk new file mode 100644 index 000000000..ca0196c49 --- /dev/null +++ b/docker/android/tests/Multiline.mk @@ -0,0 +1,15 @@ +# this is a special makefile checking support for multiline comments + +LOCAL_PATH := $(call my-dir) + +ifneq ($(ENV1),) + +########################################################### +# new rules +# $(1): rule 1 +# $(2): rule 2 +########################################################### + +include $(call all-makefiles-under,$(LOCAL_PATH)) + +endif diff --git a/docker/android/tests/Nested.mk b/docker/android/tests/Nested.mk new file mode 100644 index 000000000..98908ea71 --- /dev/null +++ b/docker/android/tests/Nested.mk @@ -0,0 +1,58 @@ +# this is a special makefile checking we handle nested +# conditionals properly, that removing sections won't +# cause unequal conditional blocks. it may still lead +# to missing definitions, but it won't fail due to +# unmatched if and endif directives. + +LOCAL_PATH := $(call my-dir) + +ifneq ($(ENV1),) + +# ----------------------------------------------------------------------------- +# Benchmarks. +# ----------------------------------------------------------------------------- + +test_tags := tests + +benchmark_c_flags := \ + -Wall -Wextra \ + -Werror \ + -fno-builtin \ + +benchmark_src_files := benchmark_main.cc +ifneq ($(ENV2),) + benchmark_src_files += bench1.cc +else + benchmark_src_files += bench2.cc +endif + +# Build benchmarks. +include $(CLEAR_VARS) +LOCAL_MODULE := benchmarks +LOCAL_MODULE_TAGS := tests +LOCAL_CFLAGS += $(benchmark_c_flags) +LOCAL_SHARED_LIBRARIES += libm libdl +LOCAL_SRC_FILES := $(benchmark_src_files) + +endif + +# Other section. +# ========================================================= +include $(call all-makefiles-under,$(LOCAL_PATH)) + +# ============================================================================= +# Unit tests. +# ============================================================================= + +test_c_flags := \ + -g \ + -Wall \ + -Werror + +################################## +# test executable +LOCAL_MODULE := mod2 +LOCAL_SRC_FILES := mod.c +LOCAL_SHARED_LIBRARIES := libcutils +LOCAL_CFLAGS := $(test_c_flags) +LOCAL_MODULE_RELATIVE_PATH := mod2-tests diff --git a/docker/android/tests/NonTest.bp b/docker/android/tests/NonTest.bp new file mode 100644 index 000000000..81a7f594f --- /dev/null +++ b/docker/android/tests/NonTest.bp @@ -0,0 +1,6 @@ +cc_defaults { + name: "lib-non-test-defaults", + cflags: ["-Wall"], + srcs: ["src/libc.cc"], + min_sdk_version: "29", +} diff --git a/docker/android/tests/README.md b/docker/android/tests/README.md new file mode 100644 index 000000000..1cf494adb --- /dev/null +++ b/docker/android/tests/README.md @@ -0,0 +1,23 @@ +android +======= + +Contains sample Soong blueprint files and Makefiles to test removal of unittests for build configurations. + +This requires a Python3 interpreter, and therefore is not run as part of the core test suite. Running the test suite requires: +- sly >= 0.4 +- google-re2 >= 1.0 +- pytest >= 7 +- toml >= 0.10 + +The module itself and the scripts only require: +- python >= 3.6 +- sly >= 0.4 +- google-re2 >= 1.0 + +google-re2 is needed to avoid backtracking regexes, which destroy performance on near-misses for section headers. The below example, if provided with 10,000 characters after the header, will likely never complete. With re2, this completes nearly instantly. + +```Makefile +######################################################################## +# +.... +``` diff --git a/docker/android/tests/Single.mk b/docker/android/tests/Single.mk new file mode 100644 index 000000000..e2e303279 --- /dev/null +++ b/docker/android/tests/Single.mk @@ -0,0 +1,22 @@ +# this is a special makefile without any blocks + +LOCAL_PATH := $(call my-dir) + +test_tags := tests + +benchmark_c_flags := \ + -Wall -Wextra \ + -Werror \ + -fno-builtin \ + +benchmark_src_files := \ + benchmark_main.cc \ + bench.cc + +# Build benchmarks. +include $(CLEAR_VARS) +LOCAL_MODULE := benchmarks +LOCAL_MODULE_TAGS := tests +LOCAL_CFLAGS += $(benchmark_c_flags) +LOCAL_SHARED_LIBRARIES += libm libdl +LOCAL_SRC_FILES := $(benchmark_src_files) diff --git a/docker/android/tests/test_make.py b/docker/android/tests/test_make.py new file mode 100644 index 000000000..b96f4fb1b --- /dev/null +++ b/docker/android/tests/test_make.py @@ -0,0 +1,358 @@ +import copy +import os +import sys + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_DIR = os.path.dirname(TEST_DIR) +sys.path.insert(0, PROJECT_DIR) + +from android import make + + +def test(): + path = os.path.join(TEST_DIR, 'Android.mk') + contents = open(path).read() + makefile = make.loads(contents) + stripped = contents[:-1] + assert repr(makefile) == f'Makefile({stripped})' + assert str(makefile) == stripped + assert len(makefile) == 9 + + assert not makefile[0].is_dev() + assert makefile[1].is_dev() + assert makefile[1].is_benchmark() + assert makefile[2].is_dev() + assert makefile[2].is_test() + assert makefile[6].title == 'Other section.' + + filtered = copy.deepcopy(makefile) + filtered.filter(lambda x: not x.is_dev()) + assert type(filtered) is make.Makefile + assert len(filtered) == 2 + assert not filtered[0].is_comment() + assert filtered[1].title == 'Other section.' + + assert makefile == make.load(open(path)) + assert contents == makefile.dumps() + '\n' + + +def test_nested(): + path = os.path.join(TEST_DIR, 'Nested.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 6 + + assert makefile[0].is_block() + assert makefile[0].child.startswith('# this is a special makefile') + + assert makefile[1].is_directive() + assert len(makefile[1].child) == 2 + assert makefile[1].child[0].is_block() + assert makefile[1].child[1].is_comment() + assert makefile[1].child[1].title == 'Benchmarks.' + + outer = makefile[1].child[1] + assert len(outer.child) == 3 + assert outer.child[0].is_block() + assert outer.child[1].is_directive() + assert outer.child[2].is_block() + + inner = outer.child[1] + assert inner.child.is_block() + + +def test_comments(): + path = os.path.join(TEST_DIR, 'Comments.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 1 + + assert makefile[0].is_block() + assert makefile[0].child.startswith('# 1) sample grouping:') + + +def test_grouped(): + path = os.path.join(TEST_DIR, 'Grouped.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 3 + + assert makefile[0].is_block() + assert makefile[0].child.startswith('LOCAL_PATH := $(call my-dir)') + + comment = makefile[1] + assert comment.is_comment() + assert len(comment.child) == 3 + assert comment.child[0].child.startswith('LOCAL_SRC_FILES := src.c') + assert comment.child[1].is_directive() + assert len(comment.child[2].child) == 0 + + directives = comment.child[1] + inner_comment = directives.child + assert inner_comment.is_comment() + assert len(inner_comment.child) == 2 + assert inner_comment.child[0].is_directive() + assert inner_comment.child[1].child.startswith('else') + + inner = inner_comment.child[0] + assert inner.child.lstrip().startswith('benchmark_src_files') + + assert makefile[2].is_comment() + + +def test_recurse(): + path = os.path.join(TEST_DIR, 'Nested.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + nodes = list(makefile.recurse()) + assert len(nodes) == 11 + + assert nodes[0] == makefile[0] + assert nodes[1] == makefile[1] + assert nodes[2] == makefile[1].child[0] + assert nodes[3] == makefile[1].child[1] + assert nodes[4] == makefile[1].child[1].child[0] + assert nodes[5] == makefile[1].child[1].child[1] + assert nodes[6] == makefile[1].child[1].child[2] + assert nodes[7] == makefile[2] + assert nodes[8] == makefile[3] + assert nodes[9] == makefile[4] + assert nodes[10] == makefile[5] + + +def test_multiline(): + path = os.path.join(TEST_DIR, 'Multiline.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 2 + + assert makefile[0].is_block() + assert makefile[0].child.startswith('# this is a special makefile') + + assert makefile[1].is_directive() + comment = makefile[1].child[1] + assert comment.is_comment() + assert comment.title == 'new rules\n$(1): rule 1\n$(2): rule 2' + assert str(comment.child).startswith('\ninclude') + + +def test_fake_title(): + path = os.path.join(TEST_DIR, 'FakeTitle.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 1 + + comment = makefile[0] + assert comment.is_comment() + assert comment.title == '' + assert str(comment.child).startswith('LOCAL_PATH := $(call my-dir)') + + +def test_filter(): + path = os.path.join(TEST_DIR, 'Nested.mk') + contents = open(path).read() + makefile = make.loads(contents) + assert str(makefile) + '\n' == contents + assert len(makefile) == 6 + assert makefile[1].is_directive() + assert len(makefile[1].child) == 2 + + filtered = copy.deepcopy(makefile) + filtered.filter(lambda x: not x.is_dev()) + assert len(filtered) == 4 + assert filtered[0].is_block() + assert filtered[1].is_directive() + assert filtered[2].is_block() + assert filtered[3].is_comment() + + directive = filtered[1] + assert len(directive.child) == 1 + assert directive.child[0].is_block() + + assert filtered[3].title.lstrip().startswith('Other section.') + + +def test_split_directives(): + path = os.path.join(TEST_DIR, 'Nested.mk') + contents = open(path).read() + iterable = iter(contents.splitlines()) + blocks = make._split_directives(iterable)[0] + assert len(blocks) == 3 + + assert blocks[0].is_block() + assert blocks[0].startswith('# this is a special makefile') + + assert blocks[2].is_block() + assert blocks[2].lstrip().startswith('# Other section.') + + assert not blocks[1].is_comment() + assert blocks[1].is_directive() + assert blocks[1].has_block_list() + + directives = blocks[1].child + assert len(directives) == 3 + assert directives[0].is_block() + assert directives[1].is_directive() + assert directives[2].is_block() + + assert not directives[1].child.has_block_list() + assert directives[1].child.lstrip().startswith('benchmark_src_files') + + path = os.path.join(TEST_DIR, 'Grouped.mk') + contents = open(path).read() + iterable = iter(contents.splitlines()) + blocks = make._split_directives(iterable)[0] + assert len(blocks) == 3 + + assert blocks[0].is_block() + assert blocks[1].is_directive() + assert blocks[2].is_block() + + directives = blocks[1].child + assert len(directives) == 3 + assert directives[0].is_block() + assert directives[1].is_directive() + assert directives[2].is_block() + + +def test_split_comments(): + path = os.path.join(TEST_DIR, 'Android.mk') + contents = open(path).read() + blocks = make._split_comments(contents) + assert repr(blocks) == f'BlockList({contents})' + assert str(blocks) == contents + assert len(blocks) == 9 + + assert not blocks[0].is_dev() + assert blocks[1].is_dev() + assert blocks[1].is_benchmark() + assert blocks[1].title == 'Benchmarks.' + assert blocks[2].is_dev() + assert blocks[2].is_test() + assert blocks[2].title == 'Unit tests.' + assert blocks[3].is_test() + assert blocks[3].title == 'test executable' + assert blocks[4].is_test() + assert blocks[4].title == 'Unit tests.' + assert blocks[5].is_test() + assert blocks[5].title == 'test executable' + assert not blocks[6].is_dev() + assert blocks[6].title == 'Other section.' + assert blocks[7].is_test() + assert blocks[7].title == 'Unit tests.' + assert blocks[8].is_test() + assert blocks[8].title == 'test executable' + + path = os.path.join(TEST_DIR, 'Empty.mk') + contents = open(path).read() + blocks = make._split_comments(contents) + assert len(blocks) == 1 + assert repr(blocks) == 'BlockList(\n)' + assert str(blocks) == '\n' + assert str(blocks[0]) == '\n' + + blocks = make._split_comments('') + assert len(blocks) == 0 + assert repr(blocks) == 'BlockList()' + assert str(blocks) == '' + + +def test_block(): + data = '''LOCAL_PATH := $(call my-dir) +include $(CLEAR_VARS)''' + block = make.Block(data) + assert repr(block) == f'Block({data})' + assert str(block) == data + assert block.is_block() + assert not block.is_block_list() + assert not block.is_comment() + assert not block.is_directive() + assert not block.is_dev() + + +def test_block_list(): + data1 = 'LOCAL_PATH := $(call my-dir)' + data2 = 'test_tags := tests' + blocks = make.BlockList([make.Block(data1), make.Block(data2)]) + assert repr(blocks) == f'BlockList({data1}\n{data2})' + assert str(blocks) == f'{data1}\n{data2}' + assert not blocks.is_block() + assert blocks.is_block_list() + assert not blocks.is_comment() + assert not blocks.is_directive() + assert not blocks.is_dev() + + +def test_comment_block(): + # single block + comment = '''# ----------------------------------------------------------------------------- +# Benchmarks. +# ----------------------------------------------------------------------------- +''' + title = 'Benchmarks.' + data = 'test_tags := tests' + block = make.CommentBlock(comment, title, make.Block(data)) + assert repr(block) == f'CommentBlock({comment}\n{data})' + assert str(block) == f'{comment}\n{data}' + assert not block.is_block() + assert not block.is_block_list() + assert block.is_comment() + assert not block.is_directive() + assert block.is_dev() + + title = 'Other Section.' + blocks = make.BlockList([ + make.Block('LOCAL_PATH := $(call my-dir)'), + make.Block('test_tags := tests'), + ]) + block = make.CommentBlock(comment, title, blocks) + assert repr(block) == f'CommentBlock({comment}\n{str(blocks)})' + assert str(block) == f'{comment}\n{str(blocks)}' + assert not block.is_block() + assert not block.is_block_list() + assert block.is_comment() + assert not block.is_directive() + assert not block.is_dev() + + +def test_directive_block(): + start_inner = ' ifneq ($(USE_B),)' + end_inner = ' endif' + data_inner = ''' SOURCES=b.cc + else + SOURCES=a.cc''' + inner = make.DirectiveBlock(start_inner, end_inner, make.Block(data_inner)) + str_inner = f'{start_inner}\n{data_inner}\n{end_inner}' + assert repr(inner) == f'DirectiveBlock({str_inner})' + assert str(inner) == str_inner + assert not inner.is_block() + assert not inner.is_block_list() + assert not inner.is_comment() + assert inner.is_directive() + assert not inner.is_dev() + + data_else = '''else + SOURCES=c.cc''' + else_block = make.Block(data_else) + blocks = make.BlockList([inner, else_block]) + str_blocks = '\n'.join([str(i) for i in blocks]) + assert repr(blocks) == f'BlockList({str_blocks})' + assert str(blocks) == str_blocks + + start = 'ifneq ($(USE_A),)' + end = 'endif' + block = make.DirectiveBlock(start, end, blocks) + str_block = f'{start}\n{str_blocks}\n{end}' + assert repr(block) == f'DirectiveBlock({str_block})' + assert str(block) == str_block + assert not block.is_block() + assert not block.is_block_list() + assert not block.is_comment() + assert block.is_directive() + assert not block.is_dev() diff --git a/docker/android/tests/test_metadata.py b/docker/android/tests/test_metadata.py new file mode 100644 index 000000000..b12a38c3e --- /dev/null +++ b/docker/android/tests/test_metadata.py @@ -0,0 +1,24 @@ +import os +import sys + +import toml + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_DIR = os.path.dirname(TEST_DIR) +sys.path.insert(0, PROJECT_DIR) + +import android + + +# ensure our pyproject and module metadata don't go out-of-date +def test_metadata(): + pyproject_path = open(os.path.join(PROJECT_DIR, 'pyproject.toml')) + pyproject = toml.load(pyproject_path) + project = pyproject['project'] + assert project['name'] == android.__name__ + assert project['version'] == android.__version__ + assert project['license']['text'] == android.__license__ + + version, dev = android.__version__.split('-') + major, minor, patch = [int(i) for i in version.split('.')] + assert (major, minor, patch, dev) == android.__version_info__ diff --git a/docker/android/tests/test_soong.py b/docker/android/tests/test_soong.py new file mode 100644 index 000000000..1d49dc45f --- /dev/null +++ b/docker/android/tests/test_soong.py @@ -0,0 +1,366 @@ +import copy +import os +import sys + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_DIR = os.path.dirname(TEST_DIR) +sys.path.insert(0, PROJECT_DIR) + +from android import soong + + +def test(): + path = os.path.join(TEST_DIR, 'Android.bp') + contents = open(path).read() + lexer = soong.Lexer() + tokens = list(lexer.tokenize(contents)) + assert (tokens[0].type, tokens[0].value) == ('IDENT', 'sample_array') + assert (tokens[51].type, tokens[51].value) == ('IDENT', 'srcs') + assert (tokens[52].type, tokens[52].value) == ('COLON', ':') + assert (tokens[53].type, tokens[53].value) == ('LBRACKET', '[') + assert (tokens[54].type, tokens[54].value) == ('STRING', '"tree.cc"') + + parser = soong.Parser() + result = parser.parse(iter(tokens)) + assert len(result) == 7 + + assert result[0].is_assignment() + assert result[0].to_str() == '''sample_array = [ + "value1", + "value2", +]''' + + assert result[1].is_scope() + assert result[1].name == 'cc_defaults' + assert result[1].name.is_ident() + assert result[1].map['name'] == 'target' + assert result[1].map['tidy_checks'] == 'sample_array' + assert result[1].map.get('srcs') is None + assert result[1].map.is_map() + + assert result[2].is_scope() + assert result[2].name == 'cc_library_static' + assert result[2].map['name'] == 'static_lib' + + ast = soong.loads(contents) + assert ast == result + ast = soong.load(open(path)) + assert ast == result + lines = contents.splitlines() + assert ast.dumps() == '\n'.join(lines[1:5] + lines[10:]) + + assert ast[4].is_test() + assert ast[4].map.is_test() + + filtered = copy.deepcopy(ast) + filtered.filter(lambda x: not (x.is_scope() and x.is_dev())) + assert type(filtered) is soong.Ast + assert len(filtered) == 5 + assert filtered == ast[:4] + [ast[6]] + + map = filtered[1].map + assert 'cflags' in map + map.filter(lambda k, v: k != 'cflags') + assert 'cflags' not in map + assert len(map['array']) == 2 + map['array'].filter(lambda x: x != '-short') + assert len(map['array']) == 1 + + custom = filtered[4].map + assert 'whole_static_libs' in custom + custom['whole_static_libs'].filter(lambda x: x.str_op(lambda y: 'gtest' not in y.lower())) + assert custom['whole_static_libs'] == ['libz'] + + assert 'host_ldlibs' in custom + custom['host_ldlibs'].filter(lambda x: x.str_op(lambda y: 'gtest' not in y.lower())) + assert custom['host_ldlibs'] == [] + + +def test_addition(): + path = os.path.join(TEST_DIR, 'Addition.bp') + ast = soong.load(open(path)) + assert len(ast) == 27 + assert ast[0].is_assignment() + assert ast[1].is_binary_operator_assignment() + assert ast[2].is_assignment() + assert ast[3].is_binary_operator_assignment() + assert ast[4].is_assignment() + assert ast[5].is_binary_operator_assignment() + assert ast[6].is_scope() + assert ast[7].is_binary_operator_assignment() + assert ast[8].expr.is_binary_operator() + + assert ast[0].name == 'list' + assert ast[0].expr == ['value1'] + assert ast[1].name == 'list' + assert ast[1].op == '+=' + assert ast[1].expr == ['value2'] + + assert ast[8].expr.lhs == 'number' + assert ast[8].expr.op == '+' + assert ast[8].expr.rhs == 4 + assert ast[11].expr.lhs == 'scope' + assert ast[11].expr.op == '+' + assert ast[11].expr.rhs.is_map() + + assert ast[12].expr.lhs == 4 + assert ast[12].expr.op == '+' + assert ast[12].expr.rhs == 'number' + assert ast[15].expr.lhs.is_map() + assert ast[15].expr.op == '+' + assert ast[15].expr.rhs == 'scope' + + assert ast[16].expr.lhs == 4 + assert ast[16].expr.op == '+' + assert ast[16].expr.rhs == 1 + assert ast[19].expr.lhs == {} + assert ast[19].expr.op == '+' + assert ast[19].expr.rhs == {'name': 'target'} + + assert ast[20].expr.lhs.is_binary_operator() + assert ast[20].expr.lhs.lhs == 4 + assert ast[20].expr.lhs.rhs == 1 + assert ast[20].expr.op == '+' + assert ast[20].expr.rhs == 2 + + assert ast[26].name == 'files' + assert ast[26].expr.is_list() + assert len(ast[26].expr) == 3 + + assert ast[26].expr[0].lhs == 'home' + assert ast[26].expr[0].lhs.is_ident() + assert ast[26].expr[0].rhs == 'file.c' + assert ast[26].expr[0].rhs.is_string() + + assert ast[26].expr[1].lhs == 'test/' + assert ast[26].expr[1].lhs.is_string() + assert ast[26].expr[1].rhs == 'test' + assert ast[26].expr[1].rhs.is_ident() + + assert ast[26].expr[2].lhs == 'home' + assert ast[26].expr[2].lhs.is_ident() + assert ast[26].expr[2].rhs == 'test' + assert ast[26].expr[2].rhs.is_ident() + + # test a few binops, just in case + binop = ast[26].expr[1] + assert binop.str_op(lambda x: 'test' in x.lower()) + assert binop.lhs.str_op(lambda x: 'test' in x.lower()) + + +def test_empty(): + path = os.path.join(TEST_DIR, 'Empty.bp') + ast = soong.load(open(path)) + assert len(ast) == 0 + + +def test_list_map_parse(): + path = os.path.join(TEST_DIR, 'ListMap.bp') + ast = soong.load(open(path)) + assert len(ast) == 1 + + scope = ast[0] + assert scope.is_scope() + assert scope.name == 'scope' + map = scope.map['key'] + + assert map.value.is_list() + assert len(map.value) == 1 + assert map.value[0].is_map() + + inner = map.value[0] + assert len(inner) == 2 + assert inner['name'] == 'art' + assert inner['deps'].value == soong.List([soong.String('"dependency"')]) + + +def test_is_non_test(): + path = os.path.join(TEST_DIR, 'NonTest.bp') + ast = soong.load(open(path)) + assert len(ast) == 1 + + scope = ast[0] + assert scope.is_scope() + assert scope.name == 'cc_defaults' + assert scope.map['name'].value == 'lib-non-test-defaults' + + +def test_ast(): + array = soong.List([soong.String('"value1"'), soong.String('"value2"')]) + assignment = soong.Assignment(soong.Ident('name'), array) + value = soong.MapValue('=', soong.String('"value"')) + map = soong.Map({soong.Ident('key'): value}) + scope = soong.Scope(soong.Ident('name'), map) + ast = soong.Ast([assignment, scope]) + assert repr(ast) == '''Ast(name = ["value1", "value2"] +name {key = "value"})''' + assert str(ast) == '''name = ["value1", "value2"] +name {key = "value"}''' + assert ast.to_str() == '''name = [ + "value1", + "value2", +] +name { + key = "value", +}''' + + +def test_assignment(): + array = soong.List([soong.String('"value1"'), soong.String('"value2"')]) + assignment = soong.Assignment(soong.Ident('name'), array) + assert repr(assignment) == 'Assignment(name = ["value1", "value2"])' + assert str(assignment) == 'name = ["value1", "value2"]' + assert assignment.to_str(pretty=False) == 'name = ["value1", "value2"]' + assert assignment.to_str() == '''name = [ + "value1", + "value2", +]''' + assert assignment.to_str(depth=1) == '''name = [ + "value1", + "value2", + ]''' + + +def test_binary_operator_assignment(): + ident = soong.Ident('name') + expr = soong.Integer('1') + assignment = soong.BinaryOperatorAssignment(ident, '+=', expr) + assert repr(assignment) == 'BinaryOperatorAssignment(name += 1)' + assert str(assignment) == 'name += 1' + assert assignment.to_str(pretty=False) == 'name += 1' + assert assignment.to_str() == 'name += 1' + + +def test_binary_operator(): + ident = soong.Ident('name') + expr = soong.Integer('1') + operator = soong.BinaryOperator(ident, '+', expr) + assert repr(operator) == 'BinaryOperator(name + 1)' + assert str(operator) == 'name + 1' + assert operator.to_str(pretty=False) == 'name + 1' + assert operator.to_str() == 'name + 1' + + +def test_scope(): + value = soong.MapValue(':', soong.String('"value"')) + map = soong.Map({soong.Ident('key'): value}) + scope = soong.Scope(soong.Ident('name'), map) + assert repr(scope) == 'Scope(name {key: "value"})' + assert str(scope) == 'name {key: "value"}' + assert scope.to_str(pretty=False) == 'name {key: "value"}' + assert scope.to_str() == '''name { + key: "value", +}''' + assert scope.to_str(depth=1) == '''name { + key: "value", + }''' + + +def test_map(): + value = soong.MapValue(':', soong.String('"value"')) + map = soong.Map({soong.Ident('key'): value}) + assert repr(map) == 'Map({key: "value"})' + assert str(map) == '{key: "value"}' + assert map.to_str(pretty=False) == '{key: "value"}' + assert map.to_str() == '''{ + key: "value", +}''' + assert map.to_str(depth=1) == '''{ + key: "value", + }''' + + map = soong.Map() + assert str(map) == '{}' + assert map.to_str() == '{}' + + +def test_recurse(): + path = os.path.join(TEST_DIR, 'Android.bp') + ast = soong.load(open(path)) + cc_defaults = ast[1] + assert cc_defaults.name == 'cc_defaults' + for (key, value, depth, parent) in cc_defaults.map.recurse(): + assert depth == 1 + + cc_test = ast[4] + assert cc_test.name == 'cc_test' + seen = [] + for (key, value, depth, parent) in cc_test.map.recurse(): + if depth > 1 and parent.is_map(): + seen.append(key) + assert seen == ['array', 'option'] + + +def test_list(): + sequence = soong.List([soong.String('"value1"'), soong.String('"value2"')]) + assert repr(sequence) == 'List(["value1", "value2"])' + assert str(sequence) == '["value1", "value2"]' + assert sequence.to_str(pretty=False) == '["value1", "value2"]' + assert sequence.to_str() == '''[ + "value1", + "value2", +]''' + assert sequence.to_str(depth=1) == '''[ + "value1", + "value2", + ]''' + + sequence = soong.List([soong.String('"value"')]) + assert repr(sequence) == 'List(["value"])' + assert str(sequence) == '["value"]' + assert sequence.to_str() == '["value"]' + + sequence = soong.List([]) + assert sequence.to_str() == '[]' + + +def test_map_value(): + value = soong.MapValue(':', soong.String('"value"')) + assert repr(value) == 'MapValue(: "value")' + assert str(value) == ': "value"' + assert value.to_str() == ': "value"' + + value = soong.MapValue('=', soong.String('"value"')) + assert repr(value) == 'MapValue( = "value")' + assert str(value) == ' = "value"' + assert value.to_str() == ' = "value"' + + +def test_list_map(): + value = soong.MapValue(':', soong.String('"value"')) + map = soong.Map({soong.Ident('key'): value}) + sequence = soong.List([map]) + assert repr(sequence) == 'List([{key: "value"}])' + assert str(sequence) == '[{key: "value"}]' + assert sequence.to_str(pretty=False) == '[{key: "value"}]' + assert sequence.to_str() == '''[{ + key: "value", +}]''' + + +def test_ident(): + ident = soong.Ident('name') + assert repr(ident) == 'Ident(name)' + assert str(ident) == 'name' + assert ident.to_str() == 'name' + + +def test_string(): + string = soong.String('"value1"') + assert repr(string) == 'String("value1")' + assert str(string) == 'value1' + assert string.to_str() == '"value1"' + + +def test_integer(): + number = soong.Integer('3') + assert repr(number) == 'Integer(3)' + assert str(number) == '3' + assert number.to_str() == '3' + + +def test_bool(): + boolean = soong.Bool(True) + assert repr(boolean) == 'Bool(true)' + assert str(boolean) == 'true' + assert boolean.to_str() == 'true' diff --git a/docker/android/tests/test_util.py b/docker/android/tests/test_util.py new file mode 100644 index 000000000..05f9b018f --- /dev/null +++ b/docker/android/tests/test_util.py @@ -0,0 +1,23 @@ +import os +import sys + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +PROJECT_DIR = os.path.dirname(TEST_DIR) +sys.path.insert(0, PROJECT_DIR) + +from android import util + + +def test_is_test(): + assert not util.is_test('lib-non-test-defaults') + assert util.is_test('art-tests') + assert util.is_test('libgtest') + assert util.is_test('libgtest_main') + assert util.is_test('extra-tests') + + +def test_is_benchmark(): + assert util.is_benchmark('benchmark') + assert util.is_benchmark('benchmarks') + assert util.is_benchmark('-benchmarks') + assert not util.is_benchmark('gbenchmarks') diff --git a/docker/android/tox.ini b/docker/android/tox.ini new file mode 100644 index 000000000..8d14c40ad --- /dev/null +++ b/docker/android/tox.ini @@ -0,0 +1,31 @@ +[tox] +envlist = py36,py3 +skip_missing_interpreters = True +isolated_build = True + +[testenv] +deps = + sly >= 0.4 + google-re2 >= 1.0 + pytest + toml +commands = pytest -o cache_dir={toxworkdir}/.pytest_cache +passenv = + PYTHONDONTWRITEBYTECODE + PYTHONPYCACHEPREFIX + PYTHON_EGG_CACHE + +[flake8] +max-line-length = 100 +ignore = + # we use lambdas for short, one-line conditions and formatters + E731 + # opt-in to new behavior with operators after line breaks + W503 +per-file-ignores = + # the sly grammar uses variables before they are defined via a metaclass + # likewise, it uses redefinitions to extend parsers via SLR grammar + android/soong.py: F811 F821 + # need to add the project to the path for our tests and scripts + tests/*.py: E402 + scripts/*.py: E402 diff --git a/docker/base-runner.sh b/docker/base-runner.sh new file mode 100644 index 000000000..5bf523640 --- /dev/null +++ b/docker/base-runner.sh @@ -0,0 +1,142 @@ +#!/usr/bin/env bash + +host_architecture() { + # there's numerous compatibility modes, so we want + # to ensure that these are valid. we also want to + # use dpkg if it's available since it gives hard-float + # information on compatible architectures + local host + local arch + if dpkg >/dev/null 2>&1; then + host=$(dpkg --print-architecture) + arch="${host}" + else + host=$(uname -m) + arch="${host}" + + case "${arch}" in + aarch64|armv8b|armv8l) + arch=arm64 + ;; + aarch64_be) + arch=arm64be + ;; + arm*) + arch=unknown + ;; + ppc) + arch=powerpc + ;; + ppc64le) + arch=ppc64el + ;; + s390) + arch=s390x + ;; + i?86) + arch=i386 + ;; + x64|x86_64) + arch=amd64 + ;; + *) + ;; + esac + fi + + echo "${arch}" +} + +normalize_arch() { + local arch="${1}" + local debian + + debian="${arch}" + case "${arch}" in + aarch64) + debian=arm64 + ;; + x86_64) + debian=amd64 + ;; + arm) + debian=armel + ;; + armv7) + debian=arm + ;; + armv7hf) + debian=armhf + ;; + i?86) + debian=i386 + ;; + powerpc64) + debian=ppc64 + ;; + powerpc64le) + debian=ppc64el + ;; + riscv64*) + debian=riscv64 + ;; + *) + ;; + esac + + echo "${debian}" +} + +is_native_binary() { + # determines if the binary can run natively on the host + local arch="${1}" + local host + local target + host=$(host_architecture) + target=$(normalize_arch "${arch}") + + # FIXME: this is not comprehensive. add more compatible architectures. + case "${host}" in + amd64) + if [[ "${target}" == i386 ]] || [[ "${target}" == amd64 ]]; then + return 0 + fi + ;; + *) + if [[ "${host}" == "${target}" ]]; then + return 0 + fi + ;; + esac + + return 1 +} + +qemu_arch() { + # select qemu arch + local arch="${1}" + local qarch="${arch}" + case "${arch}" in + arm|armhf|armv7|armv7hf) + qarch="arm" + ;; + i?86) + qarch="i386" + ;; + powerpc) + qarch="ppc" + ;; + powerpc64) + qarch="ppc64" + ;; + powerpc64le) + if [ "${CROSS_RUNNER}" = "qemu-user" ]; then + qarch="ppc64le" + else + qarch="ppc64" + fi + ;; + esac + + echo "${qarch}" +} diff --git a/docker/common.sh b/docker/common.sh index 24d56194d..aca4d7ab3 100755 --- a/docker/common.sh +++ b/docker/common.sh @@ -30,11 +30,13 @@ install_packages \ if_centos install_packages \ clang-devel \ gcc-c++ \ + gcc-gfortran \ glibc-devel \ pkgconfig if_ubuntu install_packages \ g++ \ + gfortran \ libc6-dev \ libclang-dev \ pkg-config diff --git a/docker/crosstool-config/arm-unknown-linux-gnueabihf.config b/docker/crosstool-config/arm-unknown-linux-gnueabihf.config index 0b4038819..f5b04faaa 100644 --- a/docker/crosstool-config/arm-unknown-linux-gnueabihf.config +++ b/docker/crosstool-config/arm-unknown-linux-gnueabihf.config @@ -1,470 +1,30 @@ -# -# Automatically generated file; DO NOT EDIT. -# crosstool-NG Configuration -# -# This file was adapted from: -# https://github.com/rust-lang/rust/blob/0595ea1d12cf745e0a672d05341429ecb0917e66/src/ci/docker/host-x86_64/dist-armhf-linux/arm-linux-gnueabihf.config -CT_CONFIGURE_has_static_link=y -CT_CONFIGURE_has_cxx11=y -CT_CONFIGURE_has_wget=y -CT_CONFIGURE_has_curl=y -CT_CONFIGURE_has_make_3_81_or_newer=y -CT_CONFIGURE_has_make_4_0_or_newer=y -CT_CONFIGURE_has_libtool_2_4_or_newer=y -CT_CONFIGURE_has_libtoolize_2_4_or_newer=y -CT_CONFIGURE_has_autoconf_2_65_or_newer=y -CT_CONFIGURE_has_autoreconf_2_65_or_newer=y -CT_CONFIGURE_has_automake_1_15_or_newer=y -CT_CONFIGURE_has_gnu_m4_1_4_12_or_newer=y -CT_CONFIGURE_has_python_3_4_or_newer=y -CT_CONFIGURE_has_bison_2_7_or_newer=y -CT_CONFIGURE_has_python=y -CT_CONFIGURE_has_git=y -CT_CONFIGURE_has_md5sum=y -CT_CONFIGURE_has_sha1sum=y -CT_CONFIGURE_has_sha256sum=y -CT_CONFIGURE_has_sha512sum=y -CT_CONFIGURE_has_install_with_strip_program=y -CT_CONFIG_VERSION_CURRENT="3" -CT_CONFIG_VERSION="3" -CT_MODULES=y - -# -# Paths and misc options -# - -# -# crosstool-NG behavior -# -# CT_OBSOLETE is not set -# CT_EXPERIMENTAL is not set -# CT_DEBUG_CT is not set - -# -# Paths -# -CT_LOCAL_TARBALLS_DIR="${HOME}/src" -CT_SAVE_TARBALLS=y -# CT_TARBALLS_BUILDROOT_LAYOUT is not set -CT_WORK_DIR="${CT_TOP_DIR}/.build" -CT_BUILD_TOP_DIR="${CT_WORK_DIR:-${CT_TOP_DIR}/.build}/${CT_HOST:+HOST-${CT_HOST}/}${CT_TARGET}" +CT_CONFIG_VERSION="4" CT_PREFIX_DIR="/x-tools/${CT_TARGET}" -CT_RM_RF_PREFIX_DIR=y -CT_REMOVE_DOCS=y -CT_INSTALL_LICENSES=y -CT_PREFIX_DIR_RO=y -CT_STRIP_HOST_TOOLCHAIN_EXECUTABLES=y -# CT_STRIP_TARGET_TOOLCHAIN_EXECUTABLES is not set - -# -# Downloading -# -CT_DOWNLOAD_AGENT_WGET=y -# CT_DOWNLOAD_AGENT_CURL is not set -# CT_DOWNLOAD_AGENT_NONE is not set -# CT_FORBID_DOWNLOAD is not set -# CT_FORCE_DOWNLOAD is not set -CT_CONNECT_TIMEOUT=10 -CT_DOWNLOAD_WGET_OPTIONS="--passive-ftp --tries=3 -nc --progress=dot:binary" -# CT_ONLY_DOWNLOAD is not set -# CT_USE_MIRROR is not set -CT_VERIFY_DOWNLOAD_DIGEST=y -CT_VERIFY_DOWNLOAD_DIGEST_SHA512=y -# CT_VERIFY_DOWNLOAD_DIGEST_SHA256 is not set -# CT_VERIFY_DOWNLOAD_DIGEST_SHA1 is not set -# CT_VERIFY_DOWNLOAD_DIGEST_MD5 is not set -CT_VERIFY_DOWNLOAD_DIGEST_ALG="sha512" -# CT_VERIFY_DOWNLOAD_SIGNATURE is not set - -# -# Extracting -# -# CT_FORCE_EXTRACT is not set -CT_OVERRIDE_CONFIG_GUESS_SUB=y -# CT_ONLY_EXTRACT is not set -CT_PATCH_BUNDLED=y -# CT_PATCH_BUNDLED_LOCAL is not set -CT_PATCH_ORDER="bundled" - -# -# Build behavior -# -CT_PARALLEL_JOBS=0 -CT_LOAD="" -CT_USE_PIPES=y -CT_EXTRA_CFLAGS_FOR_BUILD="" -CT_EXTRA_LDFLAGS_FOR_BUILD="" -CT_EXTRA_CFLAGS_FOR_HOST="" -CT_EXTRA_LDFLAGS_FOR_HOST="" -# CT_CONFIG_SHELL_SH is not set -# CT_CONFIG_SHELL_ASH is not set -CT_CONFIG_SHELL_BASH=y -# CT_CONFIG_SHELL_CUSTOM is not set -CT_CONFIG_SHELL="${bash}" - -# -# Logging -# -# CT_LOG_ERROR is not set -# CT_LOG_WARN is not set -# CT_LOG_INFO is not set -CT_LOG_EXTRA=y -# CT_LOG_ALL is not set -# CT_LOG_DEBUG is not set -CT_LOG_LEVEL_MAX="EXTRA" -# CT_LOG_SEE_TOOLS_WARN is not set -CT_LOG_PROGRESS_BAR=y -CT_LOG_TO_FILE=y -CT_LOG_FILE_COMPRESS=y - -# -# Target options -# -# CT_ARCH_ALPHA is not set -# CT_ARCH_ARC is not set +CT_DOWNLOAD_AGENT_CURL=y CT_ARCH_ARM=y -# CT_ARCH_AVR is not set -# CT_ARCH_M68K is not set -# CT_ARCH_MIPS is not set -# CT_ARCH_NIOS2 is not set -# CT_ARCH_POWERPC is not set -# CT_ARCH_S390 is not set -# CT_ARCH_SH is not set -# CT_ARCH_SPARC is not set -# CT_ARCH_X86 is not set -# CT_ARCH_XTENSA is not set -CT_ARCH="arm" -CT_ARCH_CHOICE_KSYM="ARM" -CT_ARCH_CPU="" -CT_ARCH_TUNE="" -CT_ARCH_ARM_SHOW=y - -# -# Options for arm -# -CT_ARCH_ARM_PKG_KSYM="" -CT_ARCH_ARM_MODE="arm" -CT_ARCH_ARM_MODE_ARM=y -# CT_ARCH_ARM_MODE_THUMB is not set -# CT_ARCH_ARM_INTERWORKING is not set -CT_ARCH_ARM_EABI_FORCE=y -CT_ARCH_ARM_EABI=y -CT_ARCH_ARM_TUPLE_USE_EABIHF=y -CT_ALL_ARCH_CHOICES="ALPHA ARC ARM AVR M68K MICROBLAZE MIPS MOXIE MSP430 NIOS2 POWERPC RISCV S390 SH SPARC X86 XTENSA" -CT_ARCH_SUFFIX="" -# CT_OMIT_TARGET_VENDOR is not set - -# -# Generic target options -# -# CT_MULTILIB is not set -CT_DEMULTILIB=y -CT_ARCH_SUPPORTS_BOTH_MMU=y -CT_ARCH_DEFAULT_HAS_MMU=y -CT_ARCH_USE_MMU=y -CT_ARCH_SUPPORTS_FLAT_FORMAT=y -CT_ARCH_SUPPORTS_EITHER_ENDIAN=y -CT_ARCH_DEFAULT_LE=y -# CT_ARCH_BE is not set -CT_ARCH_LE=y -CT_ARCH_ENDIAN="little" -CT_ARCH_SUPPORTS_32=y -CT_ARCH_SUPPORTS_64=y -CT_ARCH_DEFAULT_32=y -CT_ARCH_BITNESS=32 -CT_ARCH_32=y -# CT_ARCH_64 is not set - -# -# Target optimisations -# -CT_ARCH_SUPPORTS_WITH_ARCH=y -CT_ARCH_SUPPORTS_WITH_CPU=y -CT_ARCH_SUPPORTS_WITH_TUNE=y -CT_ARCH_SUPPORTS_WITH_FLOAT=y -CT_ARCH_SUPPORTS_WITH_FPU=y -CT_ARCH_SUPPORTS_SOFTFP=y -CT_ARCH_EXCLUSIVE_WITH_CPU=y CT_ARCH_ARCH="armv6" CT_ARCH_FPU="vfp" -# CT_ARCH_FLOAT_AUTO is not set CT_ARCH_FLOAT_HW=y -# CT_ARCH_FLOAT_SOFTFP is not set -# CT_ARCH_FLOAT_SW is not set -CT_TARGET_CFLAGS="" -CT_TARGET_LDFLAGS="" -CT_ARCH_FLOAT="hard" - -# -# Toolchain options -# - -# -# General toolchain options -# -CT_FORCE_SYSROOT=y -CT_USE_SYSROOT=y -CT_SYSROOT_NAME="sysroot" -CT_SYSROOT_DIR_PREFIX="" -CT_WANTS_STATIC_LINK=y -CT_WANTS_STATIC_LINK_CXX=y -# CT_STATIC_TOOLCHAIN is not set -CT_SHOW_CT_VERSION=y -CT_TOOLCHAIN_PKGVERSION="" -CT_TOOLCHAIN_BUGURL="" - -# -# Tuple completion and aliasing -# -CT_TARGET_VENDOR="unknown" -CT_TARGET_ALIAS_SED_EXPR="" -CT_TARGET_ALIAS="" - -# -# Toolchain type -# -CT_CROSS=y -# CT_CANADIAN is not set -CT_TOOLCHAIN_TYPE="cross" - -# -# Build system -# -CT_BUILD="" -CT_BUILD_PREFIX="" -CT_BUILD_SUFFIX="" - -# -# Misc options -# -# CT_TOOLCHAIN_ENABLE_NLS is not set - -# -# Operating System -# -CT_KERNEL_SUPPORTS_SHARED_LIBS=y -# CT_KERNEL_BARE_METAL is not set CT_KERNEL_LINUX=y -CT_KERNEL="linux" -CT_KERNEL_CHOICE_KSYM="LINUX" -CT_KERNEL_LINUX_SHOW=y - -# -# Options for linux -# -CT_KERNEL_LINUX_PKG_KSYM="LINUX" -CT_LINUX_DIR_NAME="linux" -CT_LINUX_PKG_NAME="linux" -CT_LINUX_SRC_RELEASE=y -CT_LINUX_PATCH_ORDER="global" CT_LINUX_V_4_19=y # CT_LINUX_NO_VERSIONS is not set -CT_LINUX_VERSION="4.19.21" - -CT_LINUX_MIRRORS="$(CT_Mirrors kernel.org linux ${CT_LINUX_VERSION})" -CT_LINUX_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_LINUX_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_LINUX_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_LINUX_SIGNATURE_FORMAT="unpacked/.sign" +CT_LINUX_VERSION="4.19.287" CT_LINUX_later_than_4_8=y CT_LINUX_4_8_or_later=y CT_LINUX_later_than_3_7=y CT_LINUX_3_7_or_later=y CT_LINUX_later_than_3_2=y CT_LINUX_3_2_or_later=y - -CT_KERNEL_LINUX_VERBOSITY_0=y -# CT_KERNEL_LINUX_VERBOSITY_1 is not set -# CT_KERNEL_LINUX_VERBOSITY_2 is not set -CT_KERNEL_LINUX_VERBOSE_LEVEL=0 -CT_KERNEL_LINUX_INSTALL_CHECK=y -CT_ALL_KERNEL_CHOICES="BARE_METAL LINUX WINDOWS" - -# -# Common kernel options -# -CT_SHARED_LIBS=y - -# -# Binary utilities -# -CT_ARCH_BINFMT_ELF=y -CT_BINUTILS_BINUTILS=y -CT_BINUTILS="binutils" -CT_BINUTILS_CHOICE_KSYM="BINUTILS" -CT_BINUTILS_BINUTILS_SHOW=y - -# -# Options for binutils -# -CT_BINUTILS_BINUTILS_PKG_KSYM="BINUTILS" -CT_BINUTILS_DIR_NAME="binutils" -CT_BINUTILS_USE_GNU=y -CT_BINUTILS_USE="BINUTILS" -CT_BINUTILS_PKG_NAME="binutils" -CT_BINUTILS_SRC_RELEASE=y -CT_BINUTILS_PATCH_ORDER="global" CT_BINUTILS_V_2_32=y -# CT_BINUTILS_V_2_31 is not set -# CT_BINUTILS_V_2_30 is not set -# CT_BINUTILS_V_2_29 is not set -# CT_BINUTILS_V_2_28 is not set -# CT_BINUTILS_V_2_27 is not set -# CT_BINUTILS_V_2_26 is not set -# CT_BINUTILS_NO_VERSIONS is not set -CT_BINUTILS_VERSION="2.32" -CT_BINUTILS_MIRRORS="$(CT_Mirrors GNU binutils) $(CT_Mirrors sourceware binutils/releases)" -CT_BINUTILS_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_BINUTILS_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_BINUTILS_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_BINUTILS_SIGNATURE_FORMAT="packed/.sig" -CT_BINUTILS_later_than_2_30=y -CT_BINUTILS_2_30_or_later=y -CT_BINUTILS_later_than_2_27=y -CT_BINUTILS_2_27_or_later=y -CT_BINUTILS_later_than_2_25=y -CT_BINUTILS_2_25_or_later=y -CT_BINUTILS_later_than_2_23=y -CT_BINUTILS_2_23_or_later=y - -# -# GNU binutils -# -CT_BINUTILS_HAS_HASH_STYLE=y -CT_BINUTILS_HAS_GOLD=y -CT_BINUTILS_HAS_PLUGINS=y -CT_BINUTILS_HAS_PKGVERSION_BUGURL=y -CT_BINUTILS_GOLD_SUPPORTS_ARCH=y -CT_BINUTILS_GOLD_SUPPORT=y -CT_BINUTILS_FORCE_LD_BFD_DEFAULT=y -CT_BINUTILS_LINKER_LD=y -# CT_BINUTILS_LINKER_LD_GOLD is not set -CT_BINUTILS_LINKERS_LIST="ld" -CT_BINUTILS_LINKER_DEFAULT="bfd" -# CT_BINUTILS_PLUGINS is not set -CT_BINUTILS_RELRO=m -CT_BINUTILS_EXTRA_CONFIG_ARRAY="" -# CT_BINUTILS_FOR_TARGET is not set -CT_ALL_BINUTILS_CHOICES="BINUTILS" - -# -# C-library -# -CT_LIBC_GLIBC=y -# CT_LIBC_UCLIBC is not set -CT_LIBC="glibc" -CT_LIBC_CHOICE_KSYM="GLIBC" -CT_THREADS="nptl" -CT_LIBC_GLIBC_SHOW=y - -# -# Options for glibc -# -CT_LIBC_GLIBC_PKG_KSYM="GLIBC" -CT_GLIBC_DIR_NAME="glibc" -CT_GLIBC_USE_GNU=y -CT_GLIBC_USE="GLIBC" -CT_GLIBC_PKG_NAME="glibc" -CT_GLIBC_SRC_RELEASE=y -CT_GLIBC_PATCH_ORDER="global" -CT_GLIBC_V_2_17=y +CT_GLIBC_V_2_31=y # CT_GLIBC_NO_VERSIONS is not set -CT_GLIBC_VERSION="2.17" - -CT_GLIBC_MIRRORS="$(CT_Mirrors GNU glibc)" -CT_GLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_GLIBC_SIGNATURE_FORMAT="packed/.sig" -CT_GLIBC_2_29_or_older=y -CT_GLIBC_older_than_2_29=y -CT_GLIBC_2_27_or_older=y -CT_GLIBC_older_than_2_27=y -CT_GLIBC_2_26_or_older=y -CT_GLIBC_older_than_2_26=y -CT_GLIBC_2_25_or_older=y -CT_GLIBC_older_than_2_25=y -CT_GLIBC_2_24_or_older=y -CT_GLIBC_older_than_2_24=y -CT_GLIBC_2_23_or_older=y -CT_GLIBC_older_than_2_23=y -CT_GLIBC_2_20_or_older=y -CT_GLIBC_older_than_2_20=y +CT_GLIBC_VERSION="2.31" CT_GLIBC_2_17_or_later=y -CT_GLIBC_2_17_or_older=y CT_GLIBC_later_than_2_14=y CT_GLIBC_2_14_or_later=y - -CT_GLIBC_DEP_KERNEL_HEADERS_VERSION=y -CT_GLIBC_DEP_BINUTILS=y -CT_GLIBC_DEP_GCC=y -CT_GLIBC_DEP_PYTHON=y -CT_GLIBC_HAS_NPTL_ADDON=y -CT_GLIBC_HAS_PORTS_ADDON=y -CT_GLIBC_HAS_LIBIDN_ADDON=y -CT_GLIBC_USE_PORTS_ADDON=y -CT_GLIBC_USE_NPTL_ADDON=y -# CT_GLIBC_USE_LIBIDN_ADDON is not set -CT_GLIBC_HAS_OBSOLETE_RPC=y -CT_GLIBC_EXTRA_CONFIG_ARRAY="" -CT_GLIBC_CONFIGPARMS="" -CT_GLIBC_EXTRA_CFLAGS="" -CT_GLIBC_ENABLE_OBSOLETE_RPC=y -# CT_GLIBC_DISABLE_VERSIONING is not set -CT_GLIBC_OLDEST_ABI="" -CT_GLIBC_FORCE_UNWIND=y -# CT_GLIBC_LOCALES is not set -# CT_GLIBC_KERNEL_VERSION_NONE is not set -CT_GLIBC_KERNEL_VERSION_AS_HEADERS=y -# CT_GLIBC_KERNEL_VERSION_CHOSEN is not set -CT_GLIBC_MIN_KERNEL="3.2.101" -CT_ALL_LIBC_CHOICES="AVR_LIBC BIONIC GLIBC MINGW_W64 MOXIEBOX MUSL NEWLIB NONE UCLIBC" -CT_LIBC_SUPPORT_THREADS_ANY=y -CT_LIBC_SUPPORT_THREADS_NATIVE=y - -# -# Common C library options -# -CT_THREADS_NATIVE=y -# CT_CREATE_LDSO_CONF is not set -CT_LIBC_XLDD=y - -# -# C compiler -# -CT_CC_CORE_PASSES_NEEDED=y -CT_CC_CORE_PASS_1_NEEDED=y -CT_CC_CORE_PASS_2_NEEDED=y -CT_CC_SUPPORT_CXX=y -CT_CC_SUPPORT_FORTRAN=y -CT_CC_SUPPORT_ADA=y -CT_CC_SUPPORT_OBJC=y -CT_CC_SUPPORT_OBJCXX=y -CT_CC_SUPPORT_GOLANG=y -CT_CC_GCC=y -CT_CC="gcc" -CT_CC_CHOICE_KSYM="GCC" -CT_CC_GCC_SHOW=y - -# -# Options for gcc -# -CT_CC_GCC_PKG_KSYM="GCC" -CT_GCC_DIR_NAME="gcc" -CT_GCC_USE_GNU=y -CT_GCC_USE="GCC" -CT_GCC_PKG_NAME="gcc" -CT_GCC_SRC_RELEASE=y -CT_GCC_PATCH_ORDER="global" CT_GCC_V_8=y # CT_GCC_NO_VERSIONS is not set -CT_GCC_VERSION="8.3.0" - -CT_GCC_MIRRORS="$(CT_Mirrors GNU gcc/gcc-${CT_GCC_VERSION}) $(CT_Mirrors sourceware gcc/releases/gcc-${CT_GCC_VERSION})" -CT_GCC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GCC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GCC_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_GCC_SIGNATURE_FORMAT="" +CT_GCC_VERSION="8.5.0" CT_GCC_later_than_7=y CT_GCC_7_or_later=y CT_GCC_later_than_6=y @@ -475,250 +35,10 @@ CT_GCC_later_than_4_9=y CT_GCC_4_9_or_later=y CT_GCC_later_than_4_8=y CT_GCC_4_8_or_later=y - -CT_CC_GCC_HAS_LIBMPX=y -CT_CC_GCC_ENABLE_CXX_FLAGS="" -CT_CC_GCC_CORE_EXTRA_CONFIG_ARRAY="" -CT_CC_GCC_EXTRA_CONFIG_ARRAY="" -CT_CC_GCC_STATIC_LIBSTDCXX=y -# CT_CC_GCC_SYSTEM_ZLIB is not set -CT_CC_GCC_CONFIG_TLS=m - -# -# Optimisation features -# -CT_CC_GCC_USE_GRAPHITE=y -CT_CC_GCC_USE_LTO=y - -# -# Settings for libraries running on target -# -CT_CC_GCC_ENABLE_TARGET_OPTSPACE=y -# CT_CC_GCC_LIBMUDFLAP is not set -# CT_CC_GCC_LIBGOMP is not set -# CT_CC_GCC_LIBSSP is not set -# CT_CC_GCC_LIBQUADMATH is not set -# CT_CC_GCC_LIBSANITIZER is not set - -# -# Misc. obscure options. -# -CT_CC_CXA_ATEXIT=y -# CT_CC_GCC_DISABLE_PCH is not set -CT_CC_GCC_SJLJ_EXCEPTIONS=m -CT_CC_GCC_LDBL_128=m -# CT_CC_GCC_BUILD_ID is not set -CT_CC_GCC_LNK_HASH_STYLE_DEFAULT=y -# CT_CC_GCC_LNK_HASH_STYLE_SYSV is not set -# CT_CC_GCC_LNK_HASH_STYLE_GNU is not set -# CT_CC_GCC_LNK_HASH_STYLE_BOTH is not set -CT_CC_GCC_LNK_HASH_STYLE="" -CT_CC_GCC_DEC_FLOAT_AUTO=y -# CT_CC_GCC_DEC_FLOAT_BID is not set -# CT_CC_GCC_DEC_FLOAT_DPD is not set -# CT_CC_GCC_DEC_FLOATS_NO is not set -CT_ALL_CC_CHOICES="GCC" - -# -# Additional supported languages: -# CT_CC_LANG_CXX=y -# CT_CC_LANG_FORTRAN is not set - -# -# Debug facilities -# -# CT_DEBUG_DUMA is not set -# CT_DEBUG_GDB is not set -# CT_DEBUG_LTRACE is not set -# CT_DEBUG_STRACE is not set -CT_ALL_DEBUG_CHOICES="DUMA GDB LTRACE STRACE" - -# -# Companion libraries -# -# CT_COMPLIBS_CHECK is not set -# CT_COMP_LIBS_CLOOG is not set -# CT_COMP_LIBS_EXPAT is not set -CT_COMP_LIBS_GETTEXT=y -CT_COMP_LIBS_GETTEXT_PKG_KSYM="GETTEXT" -CT_GETTEXT_DIR_NAME="gettext" -CT_GETTEXT_PKG_NAME="gettext" -CT_GETTEXT_SRC_RELEASE=y -CT_GETTEXT_PATCH_ORDER="global" CT_GETTEXT_V_0_19_8_1=y -# CT_GETTEXT_NO_VERSIONS is not set -CT_GETTEXT_VERSION="0.19.8.1" -CT_GETTEXT_MIRRORS="$(CT_Mirrors GNU gettext)" -CT_GETTEXT_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GETTEXT_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GETTEXT_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.gz" -CT_GETTEXT_SIGNATURE_FORMAT="packed/.sig" -CT_COMP_LIBS_GMP=y -CT_COMP_LIBS_GMP_PKG_KSYM="GMP" -CT_GMP_DIR_NAME="gmp" -CT_GMP_PKG_NAME="gmp" -CT_GMP_SRC_RELEASE=y -CT_GMP_PATCH_ORDER="global" CT_GMP_V_6_1=y -# CT_GMP_NO_VERSIONS is not set -CT_GMP_VERSION="6.1.2" -CT_GMP_MIRRORS="https://gmplib.org/download/gmp https://gmplib.org/download/gmp/archive $(CT_Mirrors GNU gmp)" -CT_GMP_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_GMP_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_GMP_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2" -CT_GMP_SIGNATURE_FORMAT="packed/.sig" -CT_GMP_later_than_5_1_0=y -CT_GMP_5_1_0_or_later=y -CT_GMP_later_than_5_0_0=y -CT_GMP_5_0_0_or_later=y -CT_GMP_REQUIRE_5_0_0_or_later=y -CT_COMP_LIBS_ISL=y -CT_COMP_LIBS_ISL_PKG_KSYM="ISL" -CT_ISL_DIR_NAME="isl" -CT_ISL_PKG_NAME="isl" -CT_ISL_SRC_RELEASE=y -CT_ISL_PATCH_ORDER="global" CT_ISL_V_0_20=y -# CT_ISL_V_0_19 is not set -# CT_ISL_V_0_18 is not set -# CT_ISL_V_0_17 is not set -# CT_ISL_V_0_16 is not set -# CT_ISL_V_0_15 is not set -# CT_ISL_NO_VERSIONS is not set -CT_ISL_VERSION="0.20" -CT_ISL_MIRRORS="https://ci-mirrors.rust-lang.org/rustc" -CT_ISL_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_ISL_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_ISL_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz" -CT_ISL_SIGNATURE_FORMAT="" -CT_ISL_later_than_0_18=y -CT_ISL_0_18_or_later=y -CT_ISL_later_than_0_15=y -CT_ISL_0_15_or_later=y -CT_ISL_REQUIRE_0_15_or_later=y -CT_ISL_later_than_0_14=y -CT_ISL_0_14_or_later=y -CT_ISL_REQUIRE_0_14_or_later=y -CT_ISL_later_than_0_13=y -CT_ISL_0_13_or_later=y -CT_ISL_later_than_0_12=y -CT_ISL_0_12_or_later=y -CT_ISL_REQUIRE_0_12_or_later=y -# CT_COMP_LIBS_LIBELF is not set -CT_COMP_LIBS_LIBICONV=y -CT_COMP_LIBS_LIBICONV_PKG_KSYM="LIBICONV" -CT_LIBICONV_DIR_NAME="libiconv" -CT_LIBICONV_PKG_NAME="libiconv" -CT_LIBICONV_SRC_RELEASE=y -CT_LIBICONV_PATCH_ORDER="global" CT_LIBICONV_V_1_15=y -# CT_LIBICONV_NO_VERSIONS is not set -CT_LIBICONV_VERSION="1.15" -CT_LIBICONV_MIRRORS="$(CT_Mirrors GNU libiconv)" -CT_LIBICONV_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_LIBICONV_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_LIBICONV_ARCHIVE_FORMATS=".tar.gz" -CT_LIBICONV_SIGNATURE_FORMAT="packed/.sig" -CT_COMP_LIBS_MPC=y -CT_COMP_LIBS_MPC_PKG_KSYM="MPC" -CT_MPC_DIR_NAME="mpc" -CT_MPC_PKG_NAME="mpc" -CT_MPC_SRC_RELEASE=y -CT_MPC_PATCH_ORDER="global" -CT_MPC_V_1_1=y -# CT_MPC_V_1_0 is not set -# CT_MPC_NO_VERSIONS is not set -CT_MPC_VERSION="1.1.0" -CT_MPC_MIRRORS="http://www.multiprecision.org/downloads $(CT_Mirrors GNU mpc)" -CT_MPC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_MPC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_MPC_ARCHIVE_FORMATS=".tar.gz" -CT_MPC_SIGNATURE_FORMAT="packed/.sig" -CT_MPC_1_1_0_or_later=y -CT_MPC_1_1_0_or_older=y -CT_COMP_LIBS_MPFR=y -CT_COMP_LIBS_MPFR_PKG_KSYM="MPFR" -CT_MPFR_DIR_NAME="mpfr" -CT_MPFR_PKG_NAME="mpfr" -CT_MPFR_SRC_RELEASE=y -CT_MPFR_PATCH_ORDER="global" -CT_MPFR_V_4_0=y -# CT_MPFR_V_3_1 is not set -# CT_MPFR_NO_VERSIONS is not set -CT_MPFR_VERSION="4.0.2" -CT_MPFR_MIRRORS="http://www.mpfr.org/mpfr-${CT_MPFR_VERSION} $(CT_Mirrors GNU mpfr)" -CT_MPFR_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_MPFR_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_MPFR_ARCHIVE_FORMATS=".tar.xz .tar.bz2 .tar.gz .zip" -CT_MPFR_SIGNATURE_FORMAT="packed/.asc" -CT_MPFR_later_than_4_0_0=y -CT_MPFR_4_0_0_or_later=y -CT_MPFR_later_than_3_0_0=y -CT_MPFR_3_0_0_or_later=y -CT_MPFR_REQUIRE_3_0_0_or_later=y -CT_COMP_LIBS_NCURSES=y -CT_COMP_LIBS_NCURSES_PKG_KSYM="NCURSES" -CT_NCURSES_DIR_NAME="ncurses" -CT_NCURSES_PKG_NAME="ncurses" -CT_NCURSES_SRC_RELEASE=y -CT_NCURSES_PATCH_ORDER="global" CT_NCURSES_V_6_1=y -# CT_NCURSES_V_6_0 is not set -# CT_NCURSES_NO_VERSIONS is not set -CT_NCURSES_VERSION="6.1" -CT_NCURSES_MIRRORS="ftp://invisible-island.net/ncurses $(CT_Mirrors GNU ncurses)" -CT_NCURSES_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_NCURSES_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_NCURSES_ARCHIVE_FORMATS=".tar.gz" -CT_NCURSES_SIGNATURE_FORMAT="packed/.sig" -CT_NCURSES_HOST_CONFIG_ARGS="" -CT_NCURSES_HOST_DISABLE_DB=y -CT_NCURSES_HOST_FALLBACKS="linux,xterm,xterm-color,xterm-256color,vt100" -CT_NCURSES_TARGET_CONFIG_ARGS="" -# CT_NCURSES_TARGET_DISABLE_DB is not set -CT_NCURSES_TARGET_FALLBACKS="" -CT_COMP_LIBS_ZLIB=y -CT_COMP_LIBS_ZLIB_PKG_KSYM="ZLIB" -CT_ZLIB_DIR_NAME="zlib" -CT_ZLIB_PKG_NAME="zlib" -CT_ZLIB_SRC_RELEASE=y -CT_ZLIB_PATCH_ORDER="global" -CT_ZLIB_V_1_2_11=y -# CT_ZLIB_NO_VERSIONS is not set -CT_ZLIB_VERSION="1.2.11" -CT_ZLIB_MIRRORS="https://downloads.sourceforge.net/project/libpng/zlib/${CT_ZLIB_VERSION}" -CT_ZLIB_ARCHIVE_FILENAME="@{pkg_name}-@{version}" -CT_ZLIB_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" -CT_ZLIB_ARCHIVE_FORMATS=".tar.xz .tar.gz" -CT_ZLIB_SIGNATURE_FORMAT="packed/.asc" -CT_ALL_COMP_LIBS_CHOICES="CLOOG EXPAT GETTEXT GMP ISL LIBELF LIBICONV MPC MPFR NCURSES ZLIB" -CT_LIBICONV_NEEDED=y -CT_GETTEXT_NEEDED=y -CT_GMP_NEEDED=y -CT_MPFR_NEEDED=y -CT_ISL_NEEDED=y -CT_MPC_NEEDED=y -CT_NCURSES_NEEDED=y -CT_ZLIB_NEEDED=y -CT_LIBICONV=y -CT_GETTEXT=y -CT_GMP=y -CT_MPFR=y -CT_ISL=y -CT_MPC=y -CT_NCURSES=y -CT_ZLIB=y -# -# Companion tools -# -# CT_COMP_TOOLS_FOR_HOST is not set -# CT_COMP_TOOLS_AUTOCONF is not set -# CT_COMP_TOOLS_AUTOMAKE is not set -# CT_COMP_TOOLS_BISON is not set -# CT_COMP_TOOLS_DTC is not set -# CT_COMP_TOOLS_LIBTOOL is not set -# CT_COMP_TOOLS_M4 is not set -# CT_COMP_TOOLS_MAKE is not set -CT_ALL_COMP_TOOLS_CHOICES="AUTOCONF AUTOMAKE BISON DTC LIBTOOL M4 MAKE" diff --git a/docker/crosstool-config/loongarch64-unknown-linux-gnu.config b/docker/crosstool-config/loongarch64-unknown-linux-gnu.config new file mode 100644 index 000000000..f45344d87 --- /dev/null +++ b/docker/crosstool-config/loongarch64-unknown-linux-gnu.config @@ -0,0 +1,39 @@ +CT_CONFIG_VERSION="4" +CT_EXPERIMENTAL=y +CT_PREFIX_DIR="/x-tools/${CT_TARGET}" +CT_ARCH_LOONGARCH=y +# CT_DEMULTILIB is not set +CT_ARCH_USE_MMU=y +CT_ARCH_ARCH="loongarch64" +CT_KERNEL_LINUX=y +CT_LINUX_V_5_19=y +# CT_LINUX_NO_VERSIONS is not set +CT_LINUX_VERSION="5.19.16" +CT_LINUX_later_than_4_8=y +CT_LINUX_4_8_or_later=y +CT_LINUX_later_than_3_7=y +CT_LINUX_3_7_or_later=y +CT_LINUX_later_than_3_2=y +CT_LINUX_3_2_or_later=y +CT_GLIBC_V_2_36=y +# CT_GLIBC_NO_VERSIONS is not set +CT_GLIBC_VERSION="2.36" +CT_GLIBC_2_17_or_later=y +CT_GLIBC_later_than_2_14=y +CT_GLIBC_2_14_or_later=y +CT_GCC_V_13=y +# CT_GCC_NO_VERSIONS is not set +CT_GCC_VERSION="13.2.0" +CT_GCC_later_than_7=y +CT_GCC_7_or_later=y +CT_GCC_later_than_6=y +CT_GCC_6_or_later=y +CT_GCC_later_than_5=y +CT_GCC_5_or_later=y +CT_GCC_later_than_4_9=y +CT_GCC_4_9_or_later=y +CT_GCC_later_than_4_8=y +CT_GCC_4_8_or_later=y +CT_CC_GCC_ENABLE_DEFAULT_PIE=y +CT_CC_LANG_CXX=y + diff --git a/docker/crosstool-ng.sh b/docker/crosstool-ng.sh index 6c76bd01c..fd5a5c8b8 100755 --- a/docker/crosstool-ng.sh +++ b/docker/crosstool-ng.sh @@ -17,8 +17,8 @@ silence_stdout() { main() { local config="${1}" local nproc="${2}" - local ctng_version=1.24.0 - local ctng_url="https://ci-mirrors.rust-lang.org/rustc/crosstool-ng-${ctng_version}.tar.gz" + local ctng_version=1.26.0 + local ctng_url="https://github.com/crosstool-ng/crosstool-ng/archive/crosstool-ng-${ctng_version}.tar.gz" local username=crosstool local crosstooldir=/opt/crosstool local buildir @@ -39,6 +39,7 @@ main() { python3 \ python3-dev \ python3-pip \ + rsync \ texinfo \ wget \ unzip \ @@ -70,6 +71,7 @@ main() { pushd "${buildir}" cp /"${config}" .config chown "${username}":"${username}" .config + su "${username}" -c "${crosstooldir}/bin/ct-ng olddefconfig" # the download steps can stall indefinitely, so we want to set a timeout to # ensure it always completes. we therefore attempt to download until diff --git a/docker/deny-debian-packages.sh b/docker/deny-debian-packages.sh new file mode 100755 index 000000000..6aa0f5343 --- /dev/null +++ b/docker/deny-debian-packages.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -x +set -euo pipefail + +deny_package() { + local package="${1}" + local filename="${2}" + echo "Package: ${package}:${TARGET_ARCH} +Pin: release * +Pin-Priority: -1" > "/etc/apt/preferences.d/${filename}" +} + +main() { + if [[ $# -eq 0 ]]; then + deny_package '*' "all-packages" + else + local package + for package in "${@}"; do + deny_package "${package}" "${package}" + echo "${package}" + done + fi + + rm "${0}" +} + +main "${@}" diff --git a/docker/dragonfly.sh b/docker/dragonfly.sh index 167aff4ad..b61ac9e47 100755 --- a/docker/dragonfly.sh +++ b/docker/dragonfly.sh @@ -9,15 +9,14 @@ set -euo pipefail main() { local nproc= local binutils=2.32 \ - dragonfly=6.0.1_REL \ - gcc=5.3.0 \ - target=x86_64-unknown-dragonfly \ - url="https://mirror-master.dragonflybsd.org/iso-images" + dragonfly=6.0.1_REL \ + gcc=10.3.0 \ + target=x86_64-unknown-dragonfly if [[ $# != "0" ]]; then nproc="${1}" fi - install_packages bsdtar \ + install_packages libarchive-tools \ bzip2 \ ca-certificates \ curl \ @@ -33,60 +32,28 @@ main() { pushd "${td}" mkdir "${td}"/{binutils,gcc}{,-build} "${td}/dragonfly" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/binutils/binutils-${binutils}.tar.bz2" -O + download_binutils "${binutils}" "bz2" tar -C "${td}/binutils" --strip-components=1 -xjf "binutils-${binutils}.tar.bz2" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/gcc/gcc-${gcc}/gcc-${gcc}.tar.gz" -O + download_gcc "${gcc}" "gz" tar -C "${td}/gcc" --strip-components=1 -xf "gcc-${gcc}.tar.gz" cd gcc sed -i -e 's/ftp:/https:/g' ./contrib/download_prerequisites ./contrib/download_prerequisites - patch -p0 <<'EOF' ---- libatomic/configure.tgt.orig 2015-07-09 16:08:55 UTC -+++ libatomic/configure.tgt -@@ -110,7 +110,7 @@ case "${target}" in - ;; - - *-*-linux* | *-*-gnu* | *-*-k*bsd*-gnu \ -- | *-*-netbsd* | *-*-freebsd* | *-*-openbsd* \ -+ | *-*-netbsd* | *-*-freebsd* | *-*-openbsd* | *-*-dragonfly* \ - | *-*-solaris2* | *-*-sysv4* | *-*-irix6* | *-*-osf* | *-*-hpux11* \ - | *-*-darwin* | *-*-aix* | *-*-cygwin*) - # POSIX system. The OS is supported. -EOF - - patch -p0 <<'EOF' ---- libstdc++-v3/config/os/bsd/dragonfly/os_defines.h.orig 2015-07-09 16:08:54 UTC -+++ libstdc++-v3/config/os/bsd/dragonfly/os_defines.h -@@ -29,4 +29,9 @@ - // System-specific #define, typedefs, corrections, etc, go here. This - // file will come before all others. - -+#define _GLIBCXX_USE_C99_CHECK 1 -+#define _GLIBCXX_USE_C99_DYNAMIC (!(__ISO_C_VISIBLE >= 1999)) -+#define _GLIBCXX_USE_C99_LONG_LONG_CHECK 1 -+#define _GLIBCXX_USE_C99_LONG_LONG_DYNAMIC (_GLIBCXX_USE_C99_DYNAMIC || !defined __LONG_LONG_SUPPORTED) -+ - #endif -EOF - - patch -p0 <<'EOF' ---- libstdc++-v3/configure.orig 2016-05-26 18:34:47.163132921 +0200 -+++ libstdc++-v3/configure 2016-05-26 18:35:29.594590648 +0200 -@@ -52013,7 +52013,7 @@ - - ;; - -- *-freebsd*) -+ *-freebsd* | *-dragonfly*) - SECTION_FLAGS='-ffunction-sections -fdata-sections' - - + patch libstdc++-v3/configure <<'EOF' +47159c47159 +< *-freebsd*) +--- +> *-freebsd* | *-dragonfly*) EOF cd .. - curl --retry 3 -sSfL "${url}/dfly-x86_64-${dragonfly}.iso.bz2" -O + local mirrors=( + "https://mirror-master.dragonflybsd.org/iso-images" + "https://avalon.dragonflybsd.org/iso-images/" + ) + download_mirrors "" "dfly-x86_64-${dragonfly}.iso.bz2" "${mirrors[@]}" bzcat "dfly-x86_64-${dragonfly}.iso.bz2" | bsdtar xf - -C "${td}/dragonfly" ./usr/include ./usr/lib ./lib cd binutils-build @@ -105,7 +72,7 @@ EOF cp "${td}/dragonfly/usr/lib/libexecinfo.so.1" "${destdir}/lib" cp "${td}/dragonfly/usr/lib/libpthread.so" "${destdir}/lib/libpthread.so" cp "${td}/dragonfly/usr/lib/librt.so.0" "${destdir}/lib" - cp "${td}"/dragonfly/usr/lib/lib{c,m,util}.a "${destdir}/lib" + cp "${td}"/dragonfly/usr/lib/lib{c,m,util,kvm}.a "${destdir}/lib" cp "${td}/dragonfly/usr/lib/thread/libthread_xu.so.2" "${destdir}/lib/libpthread.so.0" cp "${td}"/dragonfly/usr/lib/{crt1,Scrt1,crti,crtn}.o "${destdir}/lib/" @@ -128,7 +95,7 @@ EOF --disable-lto \ --disable-multilib \ --disable-nls \ - --enable-languages=c,c++ \ + --enable-languages=c,c++,fortran \ --target="${target}" make "-j${nproc}" make install diff --git a/docker/dropbear.sh b/docker/dropbear.sh index 097814e86..5351cc8f4 100755 --- a/docker/dropbear.sh +++ b/docker/dropbear.sh @@ -8,6 +8,10 @@ set -euo pipefail main() { local version=2022.82 + local mirrors=( + "https://matt.ucc.asn.au/dropbear/releases" + "https://mirror.dropbear.nl/mirror" + ) install_packages \ autoconf \ @@ -24,7 +28,7 @@ main() { pushd "${td}" - curl --retry 3 -sSfL "https://matt.ucc.asn.au/dropbear/releases/dropbear-${version}.tar.bz2" -O + download_mirrors "" "dropbear-${version}.tar.bz2" "${mirrors[@]}" tar --strip-components=1 -xjf "dropbear-${version}.tar.bz2" # Remove some unwanted message @@ -32,15 +36,15 @@ main() { sed -i '/failed to identify current user/d' cli-runopts.c ./configure \ - --disable-syslog \ - --disable-shadow \ - --disable-lastlog \ - --disable-utmp \ - --disable-utmpx \ - --disable-wtmp \ - --disable-wtmpx \ - --disable-pututline \ - --disable-pututxline + --disable-syslog \ + --disable-shadow \ + --disable-lastlog \ + --disable-utmp \ + --disable-utmpx \ + --disable-wtmp \ + --disable-wtmpx \ + --disable-pututline \ + --disable-pututxline make "-j$(nproc)" PROGRAMS=dbclient cp dbclient /usr/local/bin/ diff --git a/docker/freebsd-common.sh b/docker/freebsd-common.sh index 514ff524e..a1e8665f6 100755 --- a/docker/freebsd-common.sh +++ b/docker/freebsd-common.sh @@ -3,14 +3,20 @@ set -x set -euo pipefail -export BSD_ARCH= +# shellcheck disable=SC1091 +. /freebsd-arch.sh + +export FREEBSD_ARCH= case "${ARCH}" in + aarch64) # releases are under http://ftp.freebsd.org/pub/FreeBSD/releases/ + FREEBSD_ARCH=arm64 # http://ftp.freebsd.org/pub/FreeBSD/releases/arm64/ + ;; x86_64) - BSD_ARCH=amd64 + FREEBSD_ARCH=amd64 ;; i686) - BSD_ARCH=i386 + FREEBSD_ARCH=i386 ;; esac -export BSD_HOME="ftp.freebsd.org/pub/FreeBSD/releases" -export BSD_MAJOR=12 + +export FREEBSD_MAJOR=13 diff --git a/docker/freebsd-extras.sh b/docker/freebsd-extras.sh index c3f4f2348..09efa6907 100755 --- a/docker/freebsd-extras.sh +++ b/docker/freebsd-extras.sh @@ -3,55 +3,31 @@ set -x set -euo pipefail -export ARCH="${1}" # shellcheck disable=SC1091 -. lib.sh +. /lib.sh # shellcheck disable=SC1091 -. freebsd-common.sh - -main() { - local pkg_source="https://pkg.freebsd.org/FreeBSD:${BSD_MAJOR}:${BSD_ARCH}/quarterly" - install_packages curl jq xz-utils - - local td - td="$(mktemp -d)" - - mkdir "${td}"/{openssl,sqlite,packagesite} - - pushd "${td}" - - curl --retry 3 -sSfL "${pkg_source}/packagesite.txz" -O - tar -C "${td}/packagesite" -xJf packagesite.txz - local openssl_ver - local sqlite_ver - openssl_ver=$(jq -c '. | select ( .name == "openssl" ) | .version' "${td}/packagesite/packagesite.yaml") - sqlite_ver=$(jq -c '. | select ( .name == "sqlite3" ) | .version' "${td}/packagesite/packagesite.yaml") - openssl_ver=${openssl_ver//'"'/} - sqlite_ver=${sqlite_ver//'"'/} - - local target="${ARCH}-unknown-freebsd${BSD_MAJOR}" - - # Adding openssl lib - curl --retry 3 -sSfL "${pkg_source}/All/openssl-${openssl_ver}.txz" -O - tar -C "${td}/openssl" -xJf "openssl-${openssl_ver}.txz" /usr/local/lib /usr/local/include/ - - # Adding sqlite3 - curl --retry 3 -sSfL "${pkg_source}/All/sqlite3-${sqlite_ver}.txz" -O - tar -C "${td}/sqlite" -xJf "sqlite3-${sqlite_ver}.txz" /usr/local/lib +. /freebsd-common.sh +# shellcheck disable=SC1091 +. /freebsd-install.sh - # Copy the linked library - local destdir="/usr/local/${target}" - cp -r "${td}/openssl/usr/local/include" "${destdir}" - cp "${td}/openssl/usr/local/lib"/lib{crypto,ssl}.a "${destdir}/lib" - cp "${td}/openssl/usr/local/lib"/lib{crypto,ssl}.so* "${destdir}/lib" - cp "${td}/sqlite/usr/local/lib"/libsqlite3.so* "${destdir}/lib" +case "${FREEBSD_ARCH}" in + arm64) # extras mirrors are under https://pkg.freebsd.org/ + FREEBSD_ARCH=aarch64 # https://pkg.freebsd.org/FreeBSD:13:aarch64/ + ;; +esac - purge_packages +main() { + apt-get update && apt-get install --assume-yes --no-install-recommends \ + curl \ + dnsutils \ + jq \ + xz-utils - # clean up - popd + local url= + url=$(fetch_best_freebsd_mirror) + FREEBSD_MIRROR="${url}" setup_freebsd_packagesite + FREEBSD_MIRROR="${url}" install_freebsd_package openssl sqlite3 - rm -rf "${td}" rm "${0}" } diff --git a/docker/freebsd-fetch-best-mirror.sh b/docker/freebsd-fetch-best-mirror.sh new file mode 100755 index 000000000..41f263db6 --- /dev/null +++ b/docker/freebsd-fetch-best-mirror.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +# shellcheck disable=SC1091 +. /freebsd-install.sh +fetch_best_freebsd_mirror "$@" diff --git a/docker/freebsd-gcc.sh b/docker/freebsd-gcc.sh new file mode 100755 index 000000000..751d0ef07 --- /dev/null +++ b/docker/freebsd-gcc.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +# the freebsd images need libstdc++ to be linked as well +# otherwise, we get `undefined reference to `std::ios_base::Init::Init()'` + +set -x +set -euo pipefail + +main() { + if [[ $# -eq 0 ]]; then + exec "${CROSS_TOOLCHAIN_PREFIX}gcc" "${@}" + else + exec "${CROSS_TOOLCHAIN_PREFIX}gcc" "${@}" -lc++ -lstdc++ + fi +} + +main "${@}" diff --git a/docker/freebsd-install-package.sh b/docker/freebsd-install-package.sh new file mode 100755 index 000000000..c9afe10e9 --- /dev/null +++ b/docker/freebsd-install-package.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +# shellcheck disable=SC1091 +. /freebsd-install.sh +install_freebsd_package "$@" diff --git a/docker/freebsd-install.sh b/docker/freebsd-install.sh new file mode 100755 index 000000000..11f409e94 --- /dev/null +++ b/docker/freebsd-install.sh @@ -0,0 +1,169 @@ +#!/usr/bin/env bash + +set -x +set -euo pipefail + +# shellcheck disable=SC1091 +. /freebsd-common.sh + +# list of SRV records to query if the default mirror fails +FREEBSD_HTTP_TCP_SOURCES=( + # these return all mirrors, including local ones + "_http._tcp.pkg.all.freebsd.org" + # this only returns geodns mirrors + "_http._tcp.pkg.freebsd.org" +) +FREEBSD_PACKAGEDIR="/opt/freebsd-packagesite" +FREEBSD_PACKAGESITE="${FREEBSD_PACKAGEDIR}/packagesite.yaml" +FREEBSD_TARGET="${ARCH}-unknown-freebsd${FREEBSD_MAJOR}" +FREEBSD_DEFAULT_MIRROR="pkg.freebsd.org" +# NOTE: these mirrors were known to work as of 2022-11-28. +# no availability guarantees are made for any of them. +FREEBSD_BACKUP_MIRRORS=( + "pkg0.syd.freebsd.org" + "pkg0.bme.freebsd.org" + "pkg0.bra.freebsd.org" + "pkg0.fra.freebsd.org" + "pkg0.jinx.freebsd.org" + "pkg0.kul.freebsd.org" + "pkg0.kwc.freebsd.org" + "pkg0.nyi.freebsd.org" + "pkg0.tuk.freebsd.org" + "pkg0.twn.freebsd.org" +) + +# NOTE: out of convention, we use `url` for mirrors with the scheme, +# and `mirror` for those without the scheme for consistent naming. +freebsd_package_source() { + local url="${1}" + echo "${url}/FreeBSD:${FREEBSD_MAJOR}:${FREEBSD_ARCH}/quarterly" +} + +freebsd_mirror_works() { + local mirror="${1}" + local scheme="${2}" + local pkg_source= + + # meta.conf is a small file for quick confirmation the mirror works + pkg_source=$(freebsd_package_source "${scheme}://${mirror}") + local path="${pkg_source}/meta.conf" + + timeout 20s curl --retry 3 -sSfL "${path}" >/dev/null 2>&1 +} + +_fetch_best_freebsd_mirror() { + # in case if the default mirror is down, we can use various known + # fallbacks, or at worst, SRV fallbacks to find the ideal package + # site. no individual mirror other than the default mirror is + # guaranteed to exist, so we use a tiered approach. only + # the default mirror supports https. + if freebsd_mirror_works "${FREEBSD_DEFAULT_MIRROR}" "https"; then + echo "https://${FREEBSD_DEFAULT_MIRROR}" + return 0 + fi + + # if we've gotten here, it could be a DNS issue, so using a DNS + # resolver to fetch SRV fallbacks may not work. let's first try + # a few previously tested mirrors and see if any work. + local mirror= + for mirror in "${FREEBSD_BACKUP_MIRRORS[@]}"; do + if freebsd_mirror_works "${mirror}" "http"; then + echo "http://${mirror}" + return 0 + fi + done + + local http_tcp_source= + local response= + local lines= + # shellcheck disable=SC2016 + local regex='/\d+\s+\d+\s+\d+\s+(.*)\./; print $1' + for http_tcp_source in "${FREEBSD_HTTP_TCP_SOURCES[@]}"; do + # the output will have the following format, but we only want the + # target and ignore everything else: + # $priority $port $weight $target. + # + # some output may not match, so we skip those lines, for example: + # 96.47.72.71 + response=$(dig +short srv "${http_tcp_source}") + readarray -t lines <<< "${response}" + for line in "${lines[@]}"; do + mirror=$(echo "${line}" | perl -nle "${regex}") + if [[ -n "${mirror}" ]]; then + if freebsd_mirror_works "${mirror}" "http"; then + echo "http://${mirror}" + return 0 + fi + fi + done + done + + echo -e "\e[31merror:\e[0m could not find a working FreeBSD package mirror." 1>&2 + exit 1 +} + +fetch_best_freebsd_mirror() { + set +e + _fetch_best_freebsd_mirror + code=$? + set -e + + return "${code}" +} + +setup_freebsd_packagesite() { + local url="${FREEBSD_MIRROR:-}" + local pkg_source= + + if [[ -z "${url}" ]]; then + url=$(fetch_best_freebsd_mirror) + fi + pkg_source=$(freebsd_package_source "${url}") + + mkdir -p "${FREEBSD_PACKAGEDIR}" + curl --retry 3 -sSfL "${pkg_source}/packagesite.txz" -O + tar -C "${FREEBSD_PACKAGEDIR}" -xJf packagesite.txz + + rm packagesite.txz +} + +# don't provide the mirror as a positional argument, so it can be optional +install_freebsd_package() { + local url="${FREEBSD_MIRROR:-}" + local pkg_source= + local name + local path + local pkg + local td + local destdir="/usr/local/${FREEBSD_TARGET}" + + if [[ -z "${url}" ]]; then + url=$(fetch_best_freebsd_mirror) + fi + pkg_source=$(freebsd_package_source "${url}") + + td="$(mktemp -d)" + pushd "${td}" + + for name in "${@}"; do + path=$(jq -c '. | select ( .name == "'"${name}"'" ) | .repopath' "${FREEBSD_PACKAGESITE}") + if [[ -z "${path}" ]]; then + echo "Unable to find package ${name}" >&2 + exit 1 + fi + path=${path//'"'/} + pkg=$(basename "${path}") + + mkdir "${td}"/package + curl --retry 3 -sSfL "${pkg_source}/${path}" -O + tar -C "${td}/package" -xJf "${pkg}" + cp -r "${td}/package/usr/local"/* "${destdir}"/ + + rm "${td:?}/${pkg}" + rm -rf "${td:?}/package" + done + + # clean up + popd + rm -rf "${td:?}" +} diff --git a/docker/freebsd-setup-packagesite.sh b/docker/freebsd-setup-packagesite.sh new file mode 100755 index 000000000..d67cce452 --- /dev/null +++ b/docker/freebsd-setup-packagesite.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +# shellcheck disable=SC1091 +. /freebsd-install.sh +setup_freebsd_packagesite "$@" diff --git a/docker/freebsd.sh b/docker/freebsd.sh index e351bd3dc..108d04f3b 100755 --- a/docker/freebsd.sh +++ b/docker/freebsd.sh @@ -3,11 +3,51 @@ set -x set -euo pipefail -export ARCH="${1}" # shellcheck disable=SC1091 -. freebsd-common.sh +. /freebsd-common.sh # shellcheck disable=SC1091 -. lib.sh +. /lib.sh + +# we prefer those closer in geography to the US. they're triaged in +# order of ease of use, reliability, and then geography. the mirror +# list is at https://docs.freebsd.org/en/books/handbook/mirrors/. +# these mirrors were known to work as of 2022-11-27. this does +# not include any mirrors that are known to be rate-limited or +# commercial. everything returns HTML output. +MIRRORS=( + # this is a guaranteed mirror, unlike those below. + "http://ftp.freebsd.org/pub/FreeBSD/releases" + "http://ftp11.freebsd.org/pub/FreeBSD/releases" + "http://ftp3.br.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.uk.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.nl.freebsd.org/pub/FreeBSD/releases" + "http://ftp6.fr.freebsd.org/pub/FreeBSD/releases" + "http://ftp1.de.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.de.freebsd.org/pub/FreeBSD/releases" + "http://ftp5.de.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.ru.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.gr.freebsd.org/pub/FreeBSD/releases" + "http://ftp4.za.freebsd.org/pub/FreeBSD/releases" + "http://ftp2.za.freebsd.org/pub/FreeBSD/releases" + "http://ftp4.tw.freebsd.org/pub/FreeBSD/releases" + "http://ftp3.jp.freebsd.org/pub/FreeBSD/releases" + "http://ftp6.jp.freebsd.org/pub/FreeBSD/releases" + "http://ftp.uk.freebsd.org/pub/FreeBSD/releases" + "http://ftp.nl.freebsd.org/pub/FreeBSD/releases" + "http://ftp.fr.freebsd.org/pub/FreeBSD/releases" + "http://ftp.at.freebsd.org/pub/FreeBSD/releases" + "http://ftp.dk.freebsd.org/FreeBSD/releases" + "http://ftp.cz.freebsd.org/pub/FreeBSD/releases" + "http://ftp.se.freebsd.org/pub/FreeBSD/releases" + "http://ftp.lv.freebsd.org/freebsd/releases" + "http://ftp.pl.freebsd.org/pub/FreeBSD/releases" + "http://ftp.ua.freebsd.org/pub/FreeBSD/releases" + "http://ftp.gr.freebsd.org/pub/FreeBSD/releases" + "http://ftp.ru.freebsd.org/pub/FreeBSD/releases" + "http://ftp.nz.freebsd.org/pub/FreeBSD/releases" + "http://ftp.kr.freebsd.org/pub/FreeBSD/releases" + "http://ftp.jp.freebsd.org/pub/FreeBSD/releases" +) max_freebsd() { local best= @@ -19,44 +59,100 @@ max_freebsd() { version=$(echo "${release}" | cut -d '-' -f 1) release_major=$(echo "${version}"| cut -d '.' -f 1) release_minor=$(echo "${version}"| cut -d '.' -f 2) - if [ "${release_major}" == "${BSD_MAJOR}" ] && [ "${release_minor}" -gt "${minor}" ]; then + if [ "${release_major}" == "${FREEBSD_MAJOR}" ] && [ "${release_minor}" -gt "${minor}" ]; then best="${release}" minor="${release_minor}" fi done if [[ -z "$best" ]]; then - echo "Could not find best release for FreeBSD ${BSD_MAJOR}." 1>&2 + echo -e "\e[31merror:\e[0m could not find best release for FreeBSD ${FREEBSD_MAJOR}." 1>&2 exit 1 fi echo "${best}" } latest_freebsd() { - local dirs - local releases - local max_release + local mirror="${1}" + local response= + local line= + local lines= + local releases= + local max_release= + + response=$(curl --retry 3 -sSfL "${mirror}/${FREEBSD_ARCH}/" | grep RELEASE) + if [[ "${response}" != *RELEASE* ]]; then + echo -e "\e[31merror:\e[0m could not find a candidate release for FreeBSD ${FREEBSD_MAJOR}." 1>&2 + exit 1 + fi + readarray -t lines <<< "${response}" + + # shellcheck disable=SC2016 + local regex='/\s*(\d+\.\d+-RELEASE)\s*\/?\s*<\/a>/; print $1' + # not all lines will match: some return `*-RELEASE/` as a line + if [[ "${response}" == *"/dev/null + code=$? + if [[ "${code}" == 0 ]]; then + echo "${mirror}" + return 0 + elif [[ "${code}" != 124 ]]; then + echo -e "\e[1;33mwarning:\e[0m mirror ${mirror} does not seem to work." 1>&2 + fi + done + + echo -e "\e[31merror:\e[0m could not find a working FreeBSD mirror." 1>&2 + exit 1 +} + +freebsd_mirror() { + set +e + _freebsd_mirror + code=$? + set -e + + return "${code}" +} + +mirror=$(freebsd_mirror) +base_release=$(latest_freebsd "${mirror}") +bsd_url="${mirror}/${FREEBSD_ARCH}/${base_release}-RELEASE" main() { - local binutils=2.32 \ - gcc=6.4.0 \ - target="${ARCH}-unknown-freebsd${BSD_MAJOR}" + local binutils=2.40 \ + gcc=6.4.0 \ + target="${ARCH}-unknown-freebsd${FREEBSD_MAJOR}" install_packages ca-certificates \ curl \ g++ \ make \ wget \ + texinfo \ xz-utils local td @@ -65,10 +161,10 @@ main() { mkdir "${td}"/{binutils,gcc}{,-build} "${td}/freebsd" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/binutils/binutils-${binutils}.tar.gz" -O + download_binutils "${binutils}" "gz" tar -C "${td}/binutils" --strip-components=1 -xf "binutils-${binutils}.tar.gz" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/gcc/gcc-${gcc}/gcc-${gcc}.tar.gz" -O + download_gcc "${gcc}" "gz" tar -C "${td}/gcc" --strip-components=1 -xf "gcc-${gcc}.tar.gz" cd gcc @@ -76,7 +172,7 @@ main() { ./contrib/download_prerequisites cd .. - curl --retry 3 -sSfL "${bsd_http}/base.txz" -O + curl --retry 3 -sSfL "${bsd_url}/base.txz" -O tar -C "${td}/freebsd" -xJf base.txz ./usr/include ./usr/lib ./lib cd binutils-build @@ -88,29 +184,36 @@ main() { local destdir="/usr/local/${target}" cp -r "${td}/freebsd/usr/include" "${destdir}" - cp "${td}/freebsd/lib/libc.so.7" "${destdir}/lib" - cp "${td}/freebsd/lib/libm.so.5" "${destdir}/lib" - cp "${td}/freebsd/lib/libkvm.so.7" "${destdir}/lib" - cp "${td}/freebsd/lib/libthr.so.3" "${destdir}/lib" - cp "${td}/freebsd/lib/libutil.so.9" "${destdir}/lib" - cp "${td}/freebsd/lib/libdevstat.so.7" "${destdir}/lib" + cp -r "${td}/freebsd/lib/"* "${destdir}/lib" cp "${td}/freebsd/usr/lib/libc++.so.1" "${destdir}/lib" cp "${td}/freebsd/usr/lib/libc++.a" "${destdir}/lib" - cp "${td}/freebsd/usr/lib"/lib{c,util,m,ssp_nonshared}.a "${destdir}/lib" + cp "${td}/freebsd/usr/lib/libcxxrt.a" "${destdir}/lib" + cp "${td}/freebsd/usr/lib/libcompiler_rt.a" "${destdir}/lib" + cp "${td}/freebsd/usr/lib"/lib{c,util,m,ssp_nonshared,memstat}.a "${destdir}/lib" cp "${td}/freebsd/usr/lib"/lib{rt,execinfo,procstat}.so.1 "${destdir}/lib" + cp "${td}/freebsd/usr/lib"/libmemstat.so.3 "${destdir}/lib" cp "${td}/freebsd/usr/lib"/{crt1,Scrt1,crti,crtn}.o "${destdir}/lib" cp "${td}/freebsd/usr/lib"/libkvm.a "${destdir}/lib" - ln -s libc.so.7 "${destdir}/lib/libc.so" - ln -s libc++.so.1 "${destdir}/lib/libc++.so" - ln -s libexecinfo.so.1 "${destdir}/lib/libexecinfo.so" - ln -s libprocstat.so.1 "${destdir}/lib/libprocstat.so" - ln -s libm.so.5 "${destdir}/lib/libm.so" - ln -s librt.so.1 "${destdir}/lib/librt.so" - ln -s libutil.so.9 "${destdir}/lib/libutil.so" + local lib= + local base= + local link= + for lib in "${destdir}/lib/"*.so.*; do + base=$(basename "${lib}") + link="${base}" + # not strictly necessary since this will always work, but good fallback + while [[ "${link}" == *.so.* ]]; do + link="${link%.*}" + done + + # just extra insurance that we won't try to overwrite an existing file + local dstlink="${destdir}/lib/${link}" + if [[ -n "${link}" ]] && [[ "${link}" != "${base}" ]] && [[ ! -f "${dstlink}" ]]; then + ln -s "${base}" "${dstlink}" + fi + done + ln -s libthr.so.3 "${destdir}/lib/libpthread.so" - ln -s libdevstat.so.7 "${destdir}/lib/libdevstat.so" - ln -s libkvm.so.7 "${destdir}/lib/libkvm.so" cd gcc-build ../gcc/configure \ @@ -125,7 +228,7 @@ main() { --disable-libvtv \ --disable-lto \ --disable-nls \ - --enable-languages=c,c++ \ + --enable-languages=c,c++,fortran \ --target="${target}" make "-j$(nproc)" make install @@ -137,7 +240,7 @@ main() { purge_packages # store the version info for the FreeBSD release - bsd_revision=$(curl --retry 3 -sSfL "${bsd_http}/REVISION") + bsd_revision=$(curl --retry 3 -sSfL "${bsd_url}/REVISION") echo "${base_release} (${bsd_revision})" > /opt/freebsd-version rm -rf "${td}" diff --git a/docker/illumos.sh b/docker/illumos.sh index 94884cc4d..c3662b64d 100755 --- a/docker/illumos.sh +++ b/docker/illumos.sh @@ -31,25 +31,23 @@ main() { mkdir "${td}"/{binutils,gcc}{,-build} "${td}/illumos" - local binutils_file="binutils-${binutils}.tar.xz" local binutils_sum="16328a906e55a3c633854beec8e9e255a639b366436470b4f6245eb0d2fde942" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/binutils/${binutils_file}" -O - real_sum=$(sha256sum "${binutils_file}" | cut -d ' ' -f 1) + download_binutils "${binutils}" "xz" + real_sum=$(sha256sum "binutils-${binutils}.tar.xz" | cut -d ' ' -f 1) if [[ "${binutils_sum}" != "${real_sum}" ]]; then echo "Error: invalid hash for binutils." >&2 exit 1 fi - tar -C "${td}/binutils" --strip-components=1 -xJf "${binutils_file}" + tar -C "${td}/binutils" --strip-components=1 -xJf "binutils-${binutils}.tar.xz" - local gcc_file="gcc-${gcc}.tar.xz" local gcc_sum="e30a6e52d10e1f27ed55104ad233c30bd1e99cfb5ff98ab022dc941edd1b2dd4" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/gcc/gcc-${gcc}/${gcc_file}" -O - real_sum=$(sha256sum "${gcc_file}" | cut -d ' ' -f 1) + download_gcc "${gcc}" "xz" + real_sum=$(sha256sum "gcc-${gcc}.tar.xz" | cut -d ' ' -f 1) if [[ "${gcc_sum}" != "${real_sum}" ]]; then echo "Error: invalid hash for gcc." >&2 exit 1 fi - tar -C "${td}/gcc" --strip-components=1 -xJf "${gcc_file}" + tar -C "${td}/gcc" --strip-components=1 -xJf "gcc-${gcc}.tar.xz" pushd gcc sed -i -e 's/ftp:/https:/g' ./contrib/download_prerequisites @@ -106,7 +104,7 @@ main() { --target="${build_target}" \ --program-prefix="${target}-" \ --with-sysroot="${sysroot_dir}" \ - --enable-languages=c,c++ \ + --enable-languages=c,c++,fortran \ --disable-libada \ --disable-libcilkrts \ --disable-libgomp \ diff --git a/docker/lib.sh b/docker/lib.sh index 0d299b8a6..248374fb1 100644 --- a/docker/lib.sh +++ b/docker/lib.sh @@ -1,5 +1,14 @@ +#!/usr/bin/env bash +# shellcheck disable=SC2294 + purge_list=() +set_centos_ulimit() { + # this is a bug affecting buildkit with yum when ulimit is unlimited + # https://github.com/docker/buildx/issues/379#issuecomment-1196517905 + ulimit -n 1024000 +} + install_packages() { if grep -i ubuntu /etc/os-release; then apt-get update @@ -12,6 +21,7 @@ install_packages() { fi done else + set_centos_ulimit for pkg in "${@}"; do if ! yum list installed "${pkg}" >/dev/null 2>/dev/null; then yum install -y "${pkg}" @@ -43,3 +53,124 @@ if_ubuntu() { eval "${@}" fi } + +if_ubuntu_ge() { + if grep -q -i ubuntu /etc/os-release; then + local ver + ver="$(source /etc/os-release; echo $VERSION_ID)" + if dpkg --compare-versions "$ver" "ge" "$1"; then + shift + eval "${@}" + fi + fi +} + + +GNU_MIRRORS=( + "https://ftp.gnu.org/gnu/" + "https://ftpmirror.gnu.org/" +) + +download_mirrors() { + local relpath="${1}" + shift + local filename="${1}" + shift + + for mirror in "${@}"; do + if curl --retry 3 -sSfL "${mirror}/${relpath}/${filename}" -O; then + break + fi + done + if [[ ! -f "${filename}" ]]; then + echo "Unable to download ${filename}" >&2 + exit 1 + fi +} + +download_binutils() { + local mirror + local version="${1}" + local ext="${2}" + local filename="binutils-${version}.tar.${ext}" + + download_mirrors "binutils" "${filename}" "${GNU_MIRRORS[@]}" +} + +download_gcc() { + local mirror + local version="${1}" + local ext="${2}" + local filename="gcc-${version}.tar.${ext}" + + download_mirrors "gcc/gcc-${version}" "${filename}" "${GNU_MIRRORS[@]}" +} + +docker_to_qemu_arch() { + local arch="${1}" + case "${arch}" in + arm64) + echo "aarch64" + ;; + 386) + echo "i386" + ;; + amd64) + echo "x86_64" + ;; + arm|ppc64le|riscv64|s390x) + echo "${arch}" + ;; + *) + echo "Unknown Docker image architecture, got \"${arch}\"." >&2 + exit 1 + ;; + esac +} + +docker_to_linux_arch() { + # variant may not be provided + local oldstate + oldstate="$(set +o)" + set +u + + local arch="${1}" + local variant="${2}" + case "${arch}" in + arm64) + echo "aarch64" + ;; + 386) + echo "i686" + ;; + amd64) + echo "x86_64" + ;; + ppc64le) + echo "powerpc64le" + ;; + arm) + case "${variant}" in + v6) + echo "arm" + ;; + ""|v7) + echo "armv7" + ;; + *) + echo "Unknown Docker image variant, got \"${variant}\"." >&2 + exit 1 + ;; + esac + ;; + riscv64|s390x) + echo "${arch}" + ;; + *) + echo "Unknown Docker image architecture, got \"${arch}\"." >&2 + exit 1 + ;; + esac + + eval "${oldstate}" +} diff --git a/docker/linux-image.sh b/docker/linux-image.sh index dc85ed45c..745ac2fa9 100755 --- a/docker/linux-image.sh +++ b/docker/linux-image.sh @@ -6,95 +6,163 @@ set -euo pipefail # shellcheck disable=SC1091 . lib.sh +max_kernel_version() { + # kernel versions have the following format: + # `5.10.0-10-$arch`, where the `$arch` may be optional. + local IFS=$'\n' + local -a versions + local major=0 + local minor=0 + local patch=0 + local release=0 + local index=0 + local version + local x + local y + local z + local r + local is_larger + + read -r -d '' -a versions <<<"$1" + for i in "${!versions[@]}"; do + version="${versions[$i]}" + x=$(echo "$version" | cut -d '.' -f 1) + y=$(echo "$version" | cut -d '.' -f 2) + z=$(echo "$version" | cut -d '.' -f 3 | cut -d '-' -f 1) + r=$(echo "$version" | cut -d '-' -f 2) + is_larger= + + if [ "$x" -gt "$major" ]; then + is_larger=1 + elif [ "$x" -eq "$major" ] && [ "$y" -gt "$minor" ]; then + is_larger=1 + elif [ "$x" -eq "$major" ] && [ "$y" -eq "$minor" ] && [ "$z" -gt "$patch" ]; then + is_larger=1 + elif [ "$x" -eq "$major" ] && [ "$y" -eq "$minor" ] && [ "$z" -eq "$patch" ] && [ "$r" -gt "$release" ]; then + is_larger=1 + fi + + if [ -n "$is_larger" ]; then + index="$i" + major="$x" + minor="$y" + patch="$z" + release="$r" + fi + done + + echo "${versions[index]}" +} + main() { # arch in the rust target local arch="${1}" \ - kversion=4.19.0-20 + kversion=5.10.0-26 - local debsource="deb http://http.debian.net/debian/ buster main" - debsource="${debsource}\ndeb http://security.debian.org/ buster/updates main" + local debsource="deb http://http.debian.net/debian/ bullseye main" + debsource="${debsource}\ndeb http://security.debian.org/ bullseye-security main" local dropbear="dropbear-bin" local -a deps local kernel= - local libgcc="libgcc1" + local libgcc="libgcc-s1" + local ncurses= # select debian arch and kernel version case "${arch}" in - aarch64) - arch=arm64 - kernel="${kversion}-arm64" - ;; - armv7) - arch=armhf - kernel="${kversion}-armmp" - ;; - i686) - arch=i386 - kernel="${kversion}-686" - ;; - mips|mipsel) - kernel="${kversion}-4kc-malta" - ;; - mips64el) - kernel="${kversion}-5kc-malta" - ;; - powerpc) - # there is no buster powerpc port, so we use jessie - # use a more recent kernel from backports - kversion='4.9.0-0.bpo.6' - kernel="${kversion}-powerpc" - debsource="deb http://archive.debian.org/debian jessie main" - debsource="${debsource}\ndeb http://archive.debian.org/debian jessie-backports main" - debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unstable main" - debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" - - # archive.debian.org Release files are expired. - echo "Acquire::Check-Valid-Until false;" | tee -a /etc/apt/apt.conf.d/10-nocheckvalid - echo "APT::Get::AllowUnauthenticated true;" | tee -a /etc/apt/apt.conf.d/10-nocheckvalid - - dropbear="dropbear" - deps=(libcrypt1:"${arch}") - ;; - powerpc64) - # there is no stable port - arch=ppc64 - # https://packages.debian.org/en/sid/linux-image-powerpc64 - kversion='5.*' - kernel="${kversion}-powerpc64" - libgcc="libgcc-s1" - debsource="deb http://ftp.ports.debian.org/debian-ports unstable main" - debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" - # sid version of dropbear requires these dependencies - deps=(libcrypt1:"${arch}") - ;; - powerpc64le) - arch=ppc64el - kernel="${kversion}-powerpc64le" - ;; - s390x) - arch=s390x - kernel="${kversion}-s390x" - ;; - sparc64) - # there is no stable port - # https://packages.debian.org/en/sid/linux-image-sparc64 - kernel='*-sparc64' - libgcc="libgcc-s1" - debsource="deb http://ftp.ports.debian.org/debian-ports unstable main" - debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" - # sid version of dropbear requires these dependencies - deps=(libcrypt1:"${arch}") - ;; - x86_64) - arch=amd64 - kernel="${kversion}-amd64" - ;; - *) - echo "Invalid arch: ${arch}" - exit 1 - ;; + aarch64) + arch=arm64 + kernel="${kversion}-arm64" + deps=(libcrypt1:"${arch}") + ;; + armv7) + arch=armhf + kernel='5.*-armmp' + deps=(libcrypt1:"${arch}") + ;; + i686) + arch=i386 + kernel="${kversion}-686" + deps=(libcrypt1:"${arch}") + ;; + mips) + # mips was discontinued in bullseye, so we have to use buster. + libgcc="libgcc1" + debsource="deb http://http.debian.net/debian/ buster main" + debsource="${debsource}\ndeb http://security.debian.org/ buster/updates main" + kernel='4.*-4kc-malta' + ncurses="=6.1*" + ;; + mipsel) + kernel='5.*-4kc-malta' + deps=(libcrypt1:"${arch}") + ;; + mips64el) + kernel='5.*-5kc-malta' + deps=(libcrypt1:"${arch}") + ;; + powerpc) + # there is no buster powerpc port, so we use jessie + # use a more recent kernel from backports + kversion='4.9.0-0.bpo.6' + kernel="${kversion}-powerpc" + debsource="deb http://archive.debian.org/debian jessie main" + debsource="${debsource}\ndeb http://archive.debian.org/debian jessie-backports main" + debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unstable main" + debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" + + # archive.debian.org Release files are expired. + echo "Acquire::Check-Valid-Until false;" | tee -a /etc/apt/apt.conf.d/10-nocheckvalid + echo "APT::Get::AllowUnauthenticated true;" | tee -a /etc/apt/apt.conf.d/10-nocheckvalid + echo "Acquire::AllowInsecureRepositories True;" | tee -a /etc/apt/apt.conf.d/10-nocheckvalid + + dropbear="dropbear" + deps=(libcrypt1:"${arch}") + ;; + powerpc64) + # there is no stable port + arch=ppc64 + # https://packages.debian.org/en/sid/linux-image-powerpc64 + kernel='6.*-powerpc64' + debsource="deb http://ftp.ports.debian.org/debian-ports unstable main" + debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" + # sid version of dropbear requires these dependencies + deps=(libcrypt1:"${arch}") + ;; + powerpc64le) + arch=ppc64el + kernel='5.*-powerpc64le' + deps=(libcrypt1:"${arch}") + ;; + riscv64) + kernel='6.*-riscv64' + debsource="deb http://deb.debian.org/debian unstable main" + deps=(libcrypt1:"${arch}") + ;; + s390x) + arch=s390x + kernel='5.*-s390x' + deps=(libcrypt1:"${arch}") + ;; + sparc64) + # there is no stable port + # https://packages.debian.org/en/sid/linux-image-sparc64 + kernel='6.*-sparc64' + debsource="deb http://ftp.ports.debian.org/debian-ports unstable main" + debsource="${debsource}\ndeb http://ftp.ports.debian.org/debian-ports unreleased main" + # sid version of dropbear requires these dependencies + deps=(libcrypt1:"${arch}") + ;; + x86_64) + arch=amd64 + kernel="${kversion}-amd64" + deps=(libcrypt1:"${arch}") + ;; + *) + echo "Invalid arch: ${arch}" + exit 1 + ;; esac install_packages ca-certificates \ @@ -103,35 +171,65 @@ main() { sharutils \ gnupg + # conflicting versions of some packages will be installed already for the host platform, + # we need to remove the system installs later. since apt relies + # on these packages, we need to download them and reinstall + # using dpkg later, since we cannot redownload via apt. + local dpkg_arch + dpkg_arch=$(dpkg --print-architecture) + local libgcc_packages=("${libgcc}:${arch}" "libstdc++6:${arch}") + if [[ "${arch}" == "${dpkg_arch}" ]]; then + local libgcc_root=/qemu/libgcc + mkdir -p "${libgcc_root}" + pushd "${libgcc_root}" + apt-get -d --no-install-recommends download "${libgcc_packages[@]}" + popd + fi + # Download packages mv /etc/apt/sources.list /etc/apt/sources.list.bak - echo -e "${debsource}" > /etc/apt/sources.list + mv /etc/apt/sources.list.d /etc/apt/sources.list.d.bak + echo -e "${debsource}" >/etc/apt/sources.list # Old ubuntu does not support --add-architecture, so we directly change multiarch file if [ -f /etc/dpkg/dpkg.cfg.d/multiarch ]; then cp /etc/dpkg/dpkg.cfg.d/multiarch /etc/dpkg/dpkg.cfg.d/multiarch.bak fi - dpkg --add-architecture "${arch}" || echo "foreign-architecture ${arch}" > /etc/dpkg/dpkg.cfg.d/multiarch + dpkg --add-architecture "${arch}" || echo "foreign-architecture ${arch}" >/etc/dpkg/dpkg.cfg.d/multiarch # Add Debian keys. - curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/archive-key-{7.0,8,9,10}.asc' -O - curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/archive-key-{8,9,10}-security.asc' -O - curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/release-{7,8,9,10}.asc' -O - curl --retry 3 -sSfL 'https://www.ports.debian.org/archive_{2020,2021,2022}.key' -O + curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/archive-key-{7.0,8,9,10,11}.asc' -O + curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/archive-key-{8,9,10,11}-security.asc' -O + curl --retry 3 -sSfL 'https://ftp-master.debian.org/keys/release-{7,8,9,10,11}.asc' -O + curl --retry 3 -sSfL 'https://www.ports.debian.org/archive_{2020,2021,2022,2023,2024}.key' -O for key in *.asc *.key; do - apt-key add "${key}" - rm "${key}" + apt-key add "${key}" + rm "${key}" done # allow apt-get to retry downloads - echo 'APT::Acquire::Retries "3";' > /etc/apt/apt.conf.d/80-retries + echo 'APT::Acquire::Retries "3";' >/etc/apt/apt.conf.d/80-retries apt-get update mkdir -p "/qemu/${arch}" chmod 777 /qemu "/qemu/${arch}" + # Need to limit the kernel version and select the best version + # if we have a wildcard. This is because some matches, such as + # `linux-image-4.*-4kc-malta` can match more than 1 package, + # which will prevent further steps from working. + if [[ "$kernel" == *'*'* ]]; then + # Need an exact match for start and end, to avoid debug kernels. + # Afterwards, need to do a complex sort for the best kernel version, + # since the sort is non-trivial and must extract subcomponents. + packages=$(apt-cache search ^linux-image-"$kernel$" --names-only) + names=$(echo "$packages" | cut -d ' ' -f 1) + kversions="${names//linux-image-/}" + kernel=$(max_kernel_version "$kversions") + fi + cd "/qemu/${arch}" apt-get -d --no-install-recommends download \ ${deps[@]+"${deps[@]}"} \ @@ -141,11 +239,40 @@ main() { "libtomcrypt1:${arch}" \ "libgmp10:${arch}" \ "libc6:${arch}" \ - "${libgcc}:${arch}" \ - "libstdc++6:${arch}" \ "linux-image-${kernel}:${arch}" \ - ncurses-base \ + ncurses-base"${ncurses}" \ "zlib1g:${arch}" + + if [[ "${arch}" != "${dpkg_arch}" ]]; then + apt-get -d --no-install-recommends download "${libgcc_packages[@]}" + else + # host arch has conflicting versions of the packages installed + # this prevents us from downloading them, so we need to + # simply grab the last version from the debian sources. + # we're search for a paragraph with: + # Maintainer: Debian + # but not + # Original-Maintainer: Debian + # + # then, we extract the version record and download **only** + # packages matching that specific version. + local version_info + local version_record + local version + for package in "${libgcc_packages[@]}"; do + version_info=$(apt-cache show "${package}") + version_record=$(echo "${version_info}" | perl -n00e 'print if /^Maintainer: Debian/m') + version=$(echo "${version_record}" | grep 'Version: ' | cut -d ' ' -f 2) + apt-get -d --no-install-recommends download "${package}=${version}" + done + + # now, if we don't remove the system installs, qemu-system won't + # be able to find these libgcc packages after building, since it + # will prefer the system packages, which it can't find later. + # removing these packages needs to occur after download via apt, + # since apt-get relies on libgcc_s1 and libstdc++6. + dpkg -r --force-depends "${libgcc_packages[@]}" + fi cd /qemu # Install packages @@ -159,33 +286,38 @@ main() { # initrd mkdir -p "${root}/modules" + if ls -d "${root}/usr/lib/modules"/*/kernel; then + prefix='/usr' + else + prefix='' + fi cp -v \ - "${root}/lib/modules"/*/kernel/drivers/net/net_failover.ko \ - "${root}/lib/modules"/*/kernel/drivers/net/virtio_net.ko \ - "${root}/lib/modules"/*/kernel/drivers/virtio/* \ - "${root}/lib/modules"/*/kernel/fs/netfs/netfs.ko \ - "${root}/lib/modules"/*/kernel/fs/9p/9p.ko \ - "${root}/lib/modules"/*/kernel/fs/fscache/fscache.ko \ - "${root}/lib/modules"/*/kernel/net/9p/9pnet.ko \ - "${root}/lib/modules"/*/kernel/net/9p/9pnet_virtio.ko \ - "${root}/lib/modules"/*/kernel/net/core/failover.ko \ + "${root}${prefix}/lib/modules"/*/kernel/drivers/net/net_failover.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/drivers/net/virtio_net.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/drivers/virtio/* \ + "${root}${prefix}/lib/modules"/*/kernel/fs/netfs/netfs.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/fs/9p/9p.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/fs/fscache/fscache.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/net/9p/9pnet.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/net/9p/9pnet_virtio.ko* \ + "${root}${prefix}/lib/modules"/*/kernel/net/core/failover.ko* \ "${root}/modules" || true # some file may not exist rm -rf "${root:?}/boot" - rm -rf "${root:?}/lib/modules" + rm -rf "${root:?}${prefix}/lib/modules" - cat << 'EOF' > "${root}/etc/hosts" + cat <<'EOF' >"${root}/etc/hosts" 127.0.0.1 localhost qemu EOF - cat << 'EOF' > $root/etc/hostname + cat <<'EOF' >"$root/etc/hostname" qemu EOF - cat << 'EOF' > $root/etc/passwd + cat <<'EOF' >"$root/etc/passwd" root::0:0:root:/root:/bin/sh EOF -cat << 'EOF' | uudecode -o $root/etc/dropbear/dropbear_rsa_host_key + cat <<'EOF' | uudecode -o "$root/etc/dropbear/dropbear_rsa_host_key" begin 600 dropbear_rsa_host_key M````!W-S:"UR $root/init -#!/bin/busybox sh + if [[ -e "${root}/usr/bin/busybox" ]]; then + busybox='/usr/bin/busybox' + else + busybox='/bin/busybox' + fi + cat <"${root}/init" +#!${busybox} sh set -e -/bin/busybox --install +${busybox} --install mount -t devtmpfs devtmpfs /dev mount -t proc none /proc @@ -226,20 +363,21 @@ mkdir /dev/pts mount -t devpts none /dev/pts/ # some archs does not have virtio modules -insmod /modules/failover.ko || true -insmod /modules/net_failover.ko || true -insmod /modules/virtio.ko || true -insmod /modules/virtio_ring.ko || true -insmod /modules/virtio_mmio.ko || true -insmod /modules/virtio_pci_legacy_dev.ko || true -insmod /modules/virtio_pci_modern_dev.ko || true -insmod /modules/virtio_pci.ko || true -insmod /modules/virtio_net.ko || true -insmod /modules/netfs.ko || true -insmod /modules/fscache.ko -insmod /modules/9pnet.ko -insmod /modules/9pnet_virtio.ko || true -insmod /modules/9p.ko +# fscache is builtin on riscv64 +insmod /modules/failover.ko || insmod /modules/failover.ko.xz || true +insmod /modules/net_failover.ko || insmod /modules/net_failover.ko.xz || true +insmod /modules/virtio.ko || insmod /modules/virtio.ko.xz || true +insmod /modules/virtio_ring.ko || insmod /modules/virtio_ring.ko.xz || true +insmod /modules/virtio_mmio.ko || insmod /modules/virtio_mmio.ko.xz || true +insmod /modules/virtio_pci_legacy_dev.ko || insmod /modules/virtio_pci_legacy_dev.ko.xz || true +insmod /modules/virtio_pci_modern_dev.ko || insmod /modules/virtio_pci_modern_dev.ko.xz || true +insmod /modules/virtio_pci.ko || insmod /modules/virtio_pci.ko.xz || true +insmod /modules/virtio_net.ko || insmod /modules/virtio_net.ko.xz || true +insmod /modules/netfs.ko || insmod /modules/netfs.ko.xz || true +insmod /modules/fscache.ko || insmod /modules/fscache.ko.xz || true +insmod /modules/9pnet.ko || insmod /modules/9pnet.ko.xz +insmod /modules/9pnet_virtio.ko || insmod /modules/9pnet_virtio.ko.xz || true +insmod /modules/9p.ko || insmod /modules/9p.ko.xz ifconfig lo 127.0.0.1 ifconfig eth0 10.0.2.15 @@ -251,21 +389,44 @@ mount -t 9p -o trans=virtio target /target -oversion=9p2000.u || true exec dropbear -F -E -B EOF + if [[ "${arch}" == "riscv64" ]]; then + # Symlink dynamic loader to /lib/ld-linux-riscv64-lp64d.so.1 + mkdir -p "${root}/lib" + ln -s /usr/lib/riscv64-linux-gnu/ld-linux-riscv64-lp64d.so.1 "${root}/lib/ld-linux-riscv64-lp64d.so.1" + fi + chmod +x "${root}/init" cd "${root}" - find . | cpio --create --format='newc' --quiet | gzip > ../initrd.gz + find . | cpio --create --format='newc' --quiet | gzip >../initrd.gz cd - + if [[ "${arch}" == "${dpkg_arch}" ]]; then + # need to reinstall these packages, since basic utilities rely on them. + pushd "${libgcc_root}" + dpkg -i --force-depends "${libgcc_root}"/*.deb + popd + rm -rf "${libgcc_root}" + fi + # Clean up rm -rf "/qemu/${root}" "/qemu/${arch}" mv -f /etc/apt/sources.list.bak /etc/apt/sources.list + mv -f /etc/apt/sources.list.d.bak /etc/apt/sources.list.d if [ -f /etc/dpkg/dpkg.cfg.d/multiarch.bak ]; then mv /etc/dpkg/dpkg.cfg.d/multiarch.bak /etc/dpkg/dpkg.cfg.d/multiarch fi - # can fail if arch is used (amd64 and/or i386) + if [ -f /etc/apt/apt.conf.d/10-nocheckvalid ]; then + rm /etc/apt/apt.conf.d/10-nocheckvalid + fi + # can fail if arch is used (image arch, such as amd64 and/or i386) dpkg --remove-architecture "${arch}" || true apt-get update + # need to reinstall the removed libgcc packages, which are required for apt + if [[ "${arch}" == "${dpkg_arch}" ]]; then + apt-get install --no-install-recommends --assume-yes "${packages[@]}" + fi + purge_packages ls -lh /qemu diff --git a/docker/linux-runner b/docker/linux-runner index 2ef0efc67..c0de2ef8c 100755 --- a/docker/linux-runner +++ b/docker/linux-runner @@ -2,6 +2,9 @@ set -e +# shellcheck disable=SC1091 +. /base-runner.sh + LOG=/tmp/qemu.log LOCK=/tmp/qemu.lock @@ -13,38 +16,19 @@ fi arch="${1}" shift -if [ "${CROSS_RUNNER}" = "" ]; then - if [[ "${arch}" == i?86 ]] || [[ "${arch}" == x86_64 ]]; then +if [[ -z "${CROSS_RUNNER}" ]]; then + if is_native_binary "${arch}"; then CROSS_RUNNER=native else CROSS_RUNNER=qemu-user fi fi -# select qemu arch -qarch="${arch}" -case "${arch}" in - armv7) - qarch="arm" - ;; - i686) - qarch="i386" - ;; - powerpc) - qarch="ppc" - ;; - powerpc64) - qarch="ppc64" - ;; - powerpc64le) - if [ "${CROSS_RUNNER}" = "qemu-user" ]; then - qarch="ppc64le" - else - qarch="ppc64" - fi - ;; -esac +# Ensure that the correct prefix is set even if the user has cleared the env. +# `@DEFAULT_QEMU_LD_PREFIX@` is replaced during image build. +export QEMU_LD_PREFIX=${QEMU_LD_PREFIX:-@DEFAULT_QEMU_LD_PREFIX@} +qarch=$(qemu_arch "${arch}") case "${CROSS_RUNNER}" in native) exec "${@}" @@ -74,7 +58,7 @@ case "${arch}" in n=$(( n > 8 ? 8 : n )) opt="-machine virt -cpu cortex-a57" ;; - armv7) + armv7hf) opt="-machine virt" driver9p="virtio-9p-device" drivernet="virtio-net-device" @@ -98,6 +82,9 @@ case "${arch}" in opt="-append console=ttyPZ0" n=1 ;; + riscv64) + opt="-machine virt" + ;; powerpc64|powerpc64le) opt="-append console=hvc0 --nodefaults -serial stdio" ;; @@ -140,12 +127,16 @@ esac else ${QEMU_CMD} >> "${LOG}" 2>&1 & fi + qemu_pid=$! # wait for dropbear for _ in $(seq 240); do if grep -q "Not backgrounding" "${LOG}"; then READY=1 break + elif ! (ps -p "${qemu_pid}" >/dev/null 2>&1); then + # qemu command failed and exited early + exit 1 fi sleep 0.5s done diff --git a/docker/mingw.sh b/docker/mingw.sh index 7d8815cba..9dfef0eee 100755 --- a/docker/mingw.sh +++ b/docker/mingw.sh @@ -21,7 +21,7 @@ main() { local dependencies=(build-essential) while IFS='' read -r dep; do dependencies+=("${dep}"); done < \ - <(apt-cache showsrc gcc-mingw-w64-i686 | grep Build | cut -d: -f2 | tr , '\n' | cut -d' ' -f2 | sort | uniq) + <(apt-cache showsrc gcc-mingw-w64-i686 | grep Build | cut -d: -f2 | tr , '\n' | cut -d' ' -f2 | sort | uniq) install_packages "${dependencies[@]}" @@ -34,7 +34,7 @@ main() { pushd gcc-mingw-w64-* # We are using dwarf exceptions instead of sjlj - sed -i -e 's/libgcc_s_sjlj-1/libgcc_s_dw2-1/g' debian/gcc-mingw-w64-i686.install + sed -i -e 's/libgcc_s_sjlj-1/libgcc_s_dw2-1/g' debian/gcc-mingw-w64-i686.install.in # Only build i686 packages (disable x86_64) patch -p0 <<'EOF' @@ -96,7 +96,7 @@ EOF threads := posix win32 # Hardening on the host, none on the target -@@ -216,6 +216,10 @@ +@@ -220,6 +220,10 @@ # Enable libatomic CONFFLAGS += \ --enable-libatomic @@ -104,11 +104,16 @@ EOF +CONFFLAGS += \ + --disable-sjlj-exceptions \ + --with-dwarf2 - # Enable experimental::filesystem + # Enable experimental::filesystem and std::filesystem CONFFLAGS += \ --enable-libstdcxx-filesystem-ts=yes EOF + # Need symlinks for specific autoconf versions, since it + # attempts to use autoconf2.69 and autom4te2.69. + ln -s /usr/bin/autoconf /usr/bin/autoconf2.69 + ln -s /usr/bin/autom4te /usr/bin/autom4te2.69 + # Build the modified mingw packages MAKEFLAGS=--silent dpkg-buildpackage -nc -B --jobs=auto @@ -122,6 +127,10 @@ EOF rm -rf "${td}" rm "${0}" + + # Unlink our temporary aliases + unlink /usr/bin/autoconf2.69 + unlink /usr/bin/autom4te2.69 } main "${@}" diff --git a/docker/musl-gcc.sh b/docker/musl-gcc.sh new file mode 100755 index 000000000..531b4ac93 --- /dev/null +++ b/docker/musl-gcc.sh @@ -0,0 +1,74 @@ +#!/bin/bash + +# this linker works around missing builtins in older rust versions. +# we also have custom linker scripts for our static libstdc++ for all versions +# which is found in `musl-symlink.sh`. +# +# for other targets, issues in older versions of compiler-builtins require +# manually linking to libgcc to compensate for missing builtins. +# target-specific details include: +# +# aarch64-unknown-linux-musl (fixed in 1.48) +# https://github.com/rust-lang/compiler-builtins/pull/377 +# +# armv5te-unknown-linux-musleabi (fixed in 1.65) +# missing sync `sync_X_and_fetch` +# https://github.com/rust-lang/compiler-builtins/pull/484 +# +# mips64-unknown-linux-muslabi64, mips64el-unknown-linux-muslabi64 (fixed in 1.65) +# missing soft-fp routine `__trunctfsf2` +# https://github.com/rust-lang/compiler-builtins/pull/483 + +set -x +set -euo pipefail + +main() { + local minor + local patched_minor="${CROSS_BUILTINS_PATCHED_MINOR_VERSION:-0}" + minor=$(rustc_minor_version) + + if [[ $# -eq 0 ]] || [[ "${minor}" -ge "${patched_minor}" ]]; then + exec "${CROSS_TOOLCHAIN_PREFIX}"gcc "${@}" + else + exec "${CROSS_TOOLCHAIN_PREFIX}"gcc "${@}" -lgcc -static-libgcc + fi +} + +# FIXME: the rest of the contents of this file can be removed later on, +# especially after 0.3.0 has been released so we can ensure everyone is +# using a cross version at least as recent as images requiring the rust +# versions provided as environment variables. these functions are wrappers +# around these environment variables for backwards compatibility. +# https://github.com/cross-rs/cross/issues/1046 + +# NOTE: this will fail if rustc does not provide version +# info, which may happen with a custom toolchain. +rustc_version() { + rustc -Vv | grep '^release:' | cut -d ':' -f2 +} + +rustc_major_version() { + if [[ -z "${CROSS_RUSTC_MAJOR_VERSION:-}" ]]; then + CROSS_RUSTC_MAJOR_VERSION=$(rustc_version | cut -d '.' -f1) + export CROSS_RUSTC_MAJOR_VERSION + fi + echo "${CROSS_RUSTC_MAJOR_VERSION}" +} + +rustc_minor_version() { + if [[ -z "${CROSS_RUSTC_MINOR_VERSION:-}" ]]; then + CROSS_RUSTC_MINOR_VERSION=$(rustc_version | cut -d '.' -f2) + export CROSS_RUSTC_MINOR_VERSION + fi + echo "${CROSS_RUSTC_MINOR_VERSION}" +} + +rustc_patch_version() { + if [[ -z "${CROSS_RUSTC_PATCH_VERSION:-}" ]]; then + CROSS_RUSTC_PATCH_VERSION=$(rustc_version | cut -d '.' -f3) + export CROSS_RUSTC_PATCH_VERSION + fi + echo "${CROSS_RUSTC_PATCH_VERSION}" +} + +main "${@}" diff --git a/docker/musl-symlink.sh b/docker/musl-symlink.sh index a02dd7a51..6906030e4 100755 --- a/docker/musl-symlink.sh +++ b/docker/musl-symlink.sh @@ -49,6 +49,29 @@ main() { fi done + # ensure we statically link libstdc++, so avoid segfaults with c++ + # https://github.com/cross-rs/cross/issues/902 + rm "${sysroot}"/lib/libstdc++.so* || true + + # now, we create a linker script that adds all the required dependencies + # because we link to a static libstdc++ to avoid runtime issues and + # with the shared libstdc++, we can have missing symbols that are reference + # in libstdc++, such as those from libc like `setlocale` and `__cxa_atexit`, + # as well as those from libgcc, like `__extendsftf2`. all musl targets + # can require symbols from libc, however, only the following are known + # to require symbols from libgcc: + # - aarch64-unknown-linux-musl + # - mips64-unknown-linux-muslabi64 + # - mips64el-unknown-linux-muslabi64 + echo '/* cross-rs linker script + * this allows us to statically link libstdc++ to avoid segfaults + * https://github.com/cross-rs/cross/issues/902 + */ +GROUP ( libstdc++.a AS_NEEDED( -lgcc -lc -lm ) ) +' > "${sysroot}"/lib/libstdc++.so.6.0.27 + ln -s libstdc++.so.6.0.27 "${sysroot}"/lib/libstdc++.so.6 + ln -s libstdc++.so.6.0.27 "${sysroot}"/lib/libstdc++.so + echo "${sysroot}/lib" >> "/etc/ld-musl-${arch}.path" rm -rf "${0}" diff --git a/docker/musl.sh b/docker/musl.sh index 54d816303..e0d4fed39 100755 --- a/docker/musl.sh +++ b/docker/musl.sh @@ -9,9 +9,9 @@ set -euo pipefail hide_output() { set +x trap " - echo 'ERROR: An error was encountered with the build.' - cat /tmp/build.log - exit 1 + echo 'ERROR: An error was encountered with the build.' + cat /tmp/build.log + exit 1 " ERR bash -c 'while true; do sleep 30; echo $(date) - building ...; done' & PING_LOOP_PID=$! @@ -22,7 +22,7 @@ hide_output() { } main() { - local version=0.9.9 + local version=fe91582 install_packages ca-certificates curl build-essential @@ -30,19 +30,28 @@ main() { td="$(mktemp -d)" pushd "${td}" - curl --retry 3 -sSfL "https://github.com/richfelker/musl-cross-make/archive/v${version}.tar.gz" -O - tar --strip-components=1 -xzf "v${version}.tar.gz" + curl --retry 3 -sSfL "https://github.com/richfelker/musl-cross-make/archive/${version}.tar.gz" -O + tar --strip-components=1 -xzf "${version}.tar.gz" # Don't depend on the mirrors of sabotage linux that musl-cross-make uses. local linux_headers_site=https://ci-mirrors.rust-lang.org/rustc/sabotage-linux-tarballs + local linux_ver=headers-4.19.88 + # alpine GCC is built with `--enable-default-pie`, so we want to + # ensure we use that. we want support for shared runtimes except for + # libstdc++, however, the only way to do that is to simply remove + # the shared libraries later. on alpine, binaries use static-pie + # linked, so our behavior has maximum portability, and is consistent + # with popular musl distros. hide_output make install "-j$(nproc)" \ GCC_VER=9.2.0 \ - MUSL_VER=1.1.24 \ + MUSL_VER=1.2.3 \ BINUTILS_VER=2.33.1 \ DL_CMD='curl --retry 3 -sSfL -C - -o' \ - LINUX_HEADERS_SITE=$linux_headers_site \ + LINUX_HEADERS_SITE="${linux_headers_site}" \ + LINUX_VER="${linux_ver}" \ OUTPUT=/usr/local/ \ + "GCC_CONFIG += --enable-default-pie --enable-languages=c,c++,fortran" \ "${@}" purge_packages diff --git a/docker/native-linux-image.sh b/docker/native-linux-image.sh new file mode 100755 index 000000000..e8c988e0a --- /dev/null +++ b/docker/native-linux-image.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -x +set -eo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +main() { + local arch + arch=$(docker_to_linux_arch "${TARGETARCH}" "${TARGETVARIANT}") + /linux-image.sh "${arch}" + rm "${0}" +} + +main "${@}" diff --git a/docker/native-linux-runner b/docker/native-linux-runner new file mode 100755 index 000000000..0013f29f6 --- /dev/null +++ b/docker/native-linux-runner @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +set -eo pipefail + +# shellcheck disable=SC1091 +. /lib.sh + +main() { + local arch + arch=$(docker_to_linux_arch "${CROSS_TARGETARCH:-@DEFAULT_CROSS_TARGETARCH@}" "${CROSS_TARGETVARIANT:-@DEFAULT_CROSS_TARGETVARIANT@}") + + if [[ -z "${CROSS_RUNNER}" ]]; then + export CROSS_RUNNER=native + fi + + exec /linux-runner "${arch}" "${@}" +} + +main "${@}" diff --git a/docker/native-qemu.sh b/docker/native-qemu.sh new file mode 100755 index 000000000..f975489f3 --- /dev/null +++ b/docker/native-qemu.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -x +set -euo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +main() { + local arch + arch=$(docker_to_qemu_arch "${TARGETARCH}") + /qemu.sh "${arch}" softmmu + rm "${0}" +} + +main "${@}" diff --git a/docker/netbsd.sh b/docker/netbsd.sh index 8b9301885..12d07394a 100755 --- a/docker/netbsd.sh +++ b/docker/netbsd.sh @@ -8,8 +8,8 @@ set -euo pipefail main() { local binutils=2.36.1 \ - gcc=9.4.0 \ - target=x86_64-unknown-netbsd + gcc=9.4.0 \ + target=x86_64-unknown-netbsd install_packages bzip2 \ ca-certificates \ @@ -26,10 +26,10 @@ main() { mkdir "${td}"/{binutils,gcc}{,-build} "${td}/netbsd" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/binutils/binutils-${binutils}.tar.bz2" -O + download_binutils "${binutils}" "bz2" tar -C "${td}/binutils" --strip-components=1 -xjf "binutils-${binutils}.tar.bz2" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/gcc/gcc-${gcc}/gcc-${gcc}.tar.xz" -O + download_gcc "${gcc}" "xz" tar -C "${td}/gcc" --strip-components=1 -xJf "gcc-${gcc}.tar.xz" pushd "${td}" @@ -38,8 +38,9 @@ main() { sed -i -e 's/ftp:/https:/g' ./contrib/download_prerequisites ./contrib/download_prerequisites local patches=( - https://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/lang/gcc9/patches/patch-libstdc++-v3_config_os_bsd_netbsd_ctype__base.h https://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/lang/gcc9/patches/patch-libstdc++-v3_config_os_bsd_netbsd_ctype__configure__char.cc + https://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/lang/gcc9/patches/patch-libstdc++-v3_config_os_bsd_netbsd_ctype__base.h + https://ftp.netbsd.org/pub/pkgsrc/current/pkgsrc/lang/gcc8/patches/patch-libgfortran_io_io.h ) local patch @@ -52,10 +53,14 @@ main() { done popd - curl --retry 3 -sSfL ftp://ftp.netbsd.org/pub/NetBSD/NetBSD-9.2/amd64/binary/sets/base.tar.xz -O + local mirrors=( + "ftp://ftp.netbsd.org" + "https://cdn.NetBSD.org" + ) + download_mirrors "pub/NetBSD/NetBSD-9.2/amd64/binary/sets" "base.tar.xz" "${mirrors[@]}" tar -C "${td}/netbsd" -xJf base.tar.xz ./usr/include ./usr/lib ./lib - curl --retry 3 -sSfL ftp://ftp.netbsd.org/pub/NetBSD/NetBSD-9.2/amd64/binary/sets/comp.tar.xz -O + download_mirrors "pub/NetBSD/NetBSD-9.2/amd64/binary/sets" "comp.tar.xz" "${mirrors[@]}" tar -C "${td}/netbsd" -xJf comp.tar.xz ./usr/include ./usr/lib pushd binutils-build @@ -74,6 +79,7 @@ main() { cp "${td}/netbsd/lib/libpthread.so.1.4" "${destdir}/lib" cp "${td}/netbsd/usr/lib/librt.so.1.1" "${destdir}/lib" cp "${td}/netbsd/usr/lib"/lib{c,m,pthread}{,_p}.a "${destdir}/lib" + cp "${td}/netbsd/usr/lib"/libexecinfo.so "${destdir}/lib" cp "${td}/netbsd/usr/lib"/{crt0,crti,crtn,crtbeginS,crtendS,crtbegin,crtend,gcrt0}.o "${destdir}/lib" ln -s libc.so.12.213 "${destdir}/lib/libc.so" @@ -87,7 +93,8 @@ main() { ln -s libutil.so.7.24 "${destdir}/lib/libutil.so.7" pushd gcc-build - ../gcc/configure \ + # remove the environment variables after bumping the gcc version to 11. + target_configargs="ac_cv_func_newlocale=no ac_cv_func_freelocale=no ac_cv_func_uselocale=no" ../gcc/configure \ --disable-libada \ --disable-libcilkrt \ --disable-libcilkrts \ @@ -100,7 +107,7 @@ main() { --disable-lto \ --disable-multilib \ --disable-nls \ - --enable-languages=c,c++ \ + --enable-languages=c,c++,fortran \ --target="${target}" make "-j$(nproc)" make install diff --git a/docker/qemu-runner b/docker/qemu-runner index dd89705c9..404600167 100755 --- a/docker/qemu-runner +++ b/docker/qemu-runner @@ -5,6 +5,9 @@ set -e +# shellcheck disable=SC1091 +. /base-runner.sh + if [ -n "${CROSS_DEBUG}" ]; then set -x fi @@ -13,38 +16,19 @@ fi arch="${1}" shift -if [ "${CROSS_RUNNER}" = "" ]; then - if [[ "${arch}" == i?86 ]] || [[ "${arch}" == x86_64 ]]; then +if [[ -z "${CROSS_RUNNER}" ]]; then + if is_native_binary "${arch}"; then CROSS_RUNNER=native else CROSS_RUNNER=qemu-user fi fi -# select qemu arch -qarch="${arch}" -case "${arch}" in - armv7) - qarch="arm" - ;; - i?86) - qarch="i386" - ;; - powerpc) - qarch="ppc" - ;; - powerpc64) - qarch="ppc64" - ;; - powerpc64le) - if [ "${CROSS_RUNNER}" = "qemu-user" ]; then - qarch="ppc64le" - else - qarch="ppc64" - fi - ;; -esac +# Ensure that the correct prefix is set even if the user has cleared the env. +# `@DEFAULT_QEMU_LD_PREFIX@` is replaced during image build. +export QEMU_LD_PREFIX=${QEMU_LD_PREFIX:-@DEFAULT_QEMU_LD_PREFIX@} +qarch=$(qemu_arch "${arch}") case "${CROSS_RUNNER}" in native) exec "${@}" diff --git a/docker/qemu.sh b/docker/qemu.sh index 75626c55c..f56dfc83d 100755 --- a/docker/qemu.sh +++ b/docker/qemu.sh @@ -29,6 +29,10 @@ build_static_libffi () { build_static_libmount () { local version_spec=2.23.2 local version=2.23 + + if_ubuntu_ge 22.04 version_spec=2.37.2 + if_ubuntu_ge 22.04 version=2.37 + local td td="$(mktemp -d)" @@ -54,6 +58,7 @@ build_static_libattr() { pushd "${td}" + set_centos_ulimit yum install -y gettext curl --retry 3 -sSfL "https://download.savannah.nongnu.org/releases/attr/attr-${version}.src.tar.gz" -O @@ -108,13 +113,32 @@ build_static_pixman() { rm -rf "${td}" } +build_static_slirp() { + local version=4.1.0 + + local td + td="$(mktemp -d)" + + pushd "${td}" + + curl --retry 3 -sSfL "https://gitlab.freedesktop.org/slirp/libslirp//-/archive/v${version}/libslirp-v${version}.tar.gz" -O + tar -xzf "libslirp-v${version}.tar.gz" + meson setup -Ddefault_library=static libslirp-v${version} build + ninja -C build + install -m 644 ./build/libslirp.a /usr/lib64/ + + popd + + rm -rf "${td}" +} + main() { local version=5.1.0 if_centos version=4.2.1 local arch="${1}" \ - softmmu="${2:-}" + softmmu="${2:-}" install_packages \ autoconf \ @@ -171,14 +195,27 @@ main() { libselinux1-dev \ zlib1g-dev - # if we have python3.6+, we can install qemu 6.1.0, which needs ninja-build + # ubuntu no longer provides statically linked libmount + if_ubuntu_ge 22.04 build_static_libmount + + # if we have python3.6+, we can install qemu 7.0.0, which needs ninja-build # ubuntu 16.04 only provides python3.5, so remove when we have a newer qemu. is_ge_python36=$(python3 -c "import sys; print(int(sys.version_info >= (3, 6)))") if [[ "${is_ge_python36}" == "1" ]]; then - if_ubuntu version=6.1.0 + if_ubuntu version=7.0.0 if_ubuntu install_packages ninja-build fi + # if we have python3.8+, we can install qemu 8.2.2, which needs ninja-build, + # meson, python3-pip and libslirp-dev. + # ubuntu 16.04 only provides python3.5, so remove when we have a newer qemu. + is_ge_python38=$(python3 -c "import sys; print(int(sys.version_info >= (3, 8)))") + if [[ "${is_ge_python38}" == "1" ]]; then + if_ubuntu version=8.2.2 + if_ubuntu install_packages ninja-build meson python3-pip libslirp-dev + if_ubuntu build_static_slirp + fi + local td td="$(mktemp -d)" diff --git a/docker/solaris.sh b/docker/solaris.sh index ac51a6a7b..a7ba0da98 100755 --- a/docker/solaris.sh +++ b/docker/solaris.sh @@ -8,15 +8,18 @@ set -euo pipefail main() { local arch="${1}" + local manufacturer="${2}" local binutils=2.28.1 \ - gcc=8.4.0 \ - target="${arch}-sun-solaris2.10" + gcc=8.4.0 \ + target="${arch}-${manufacturer}-solaris2.10" install_packages bzip2 \ ca-certificates \ curl \ + dirmngr \ g++ \ + gpg-agent \ make \ patch \ software-properties-common \ @@ -29,10 +32,10 @@ main() { mkdir "${td}"/{binutils,gcc}{,-build} "${td}/solaris" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/binutils/binutils-${binutils}.tar.xz" -O + download_binutils "${binutils}" "xz" tar -C "${td}/binutils" --strip-components=1 -xJf "binutils-${binutils}.tar.xz" - curl --retry 3 -sSfL "https://ftp.gnu.org/gnu/gcc/gcc-${gcc}/gcc-${gcc}.tar.xz" -O + download_gcc "${gcc}" "xz" tar -C "${td}/gcc" --strip-components=1 -xJf "gcc-${gcc}.tar.xz" cd gcc @@ -54,28 +57,33 @@ main() { esac apt-key adv --batch --yes --keyserver keyserver.ubuntu.com --recv-keys 74DA7924C5513486 - add-apt-repository -y 'deb http://apt.dilos.org/dilos dilos2-testing main' + add-apt-repository -y 'deb http://apt.dilos.org/dilos dilos2 main' dpkg --add-architecture "${apt_arch}" apt-get update - # shellcheck disable=SC2046 - apt-get download $(apt-cache depends --recurse --no-replaces \ - "libc:${apt_arch}" \ - "liblgrp-dev:${apt_arch}" \ - "liblgrp:${apt_arch}" \ - "libm-dev:${apt_arch}" \ - "libpthread:${apt_arch}" \ - "libresolv:${apt_arch}" \ - "librt:${apt_arch}" \ - "libsendfile-dev:${apt_arch}" \ - "libsendfile:${apt_arch}" \ - "libsocket:${apt_arch}" \ - "system-crt:${apt_arch}" \ - "system-header:${apt_arch}" \ - | grep "^\w") - - for deb in *"${apt_arch}.deb"; do - dpkg -x "${deb}" "${td}/solaris" + apt-get install -y --download-only \ + "libc:${apt_arch}" \ + "liblgrp:${apt_arch}" \ + "libm-dev:${apt_arch}" \ + "libpthread:${apt_arch}" \ + "libresolv:${apt_arch}" \ + "librt:${apt_arch}" \ + "libsendfile:${apt_arch}" \ + "libsocket:${apt_arch}" \ + "system-crt:${apt_arch}" \ + "system-header:${apt_arch}" + + for deb in /var/cache/apt/archives/*"${apt_arch}.deb"; do + dpkg -x "${deb}" "${td}/solaris" done + apt-get clean + + # The -dev packages are not available from the apt repository we're using. + # However, those packages are just symlinks from *.so to *.so.. + # This makes all those symlinks. + while IFS= read -r -d '' lib; do + link_name=${lib%.so.*}.so + [ -e "$link_name" ] || ln -sf "${lib##*/}" "$link_name" + done < <(find . -name '*.so.*' -print0) cd binutils-build ../binutils/configure \ @@ -119,7 +127,7 @@ EOF --disable-lto \ --disable-multilib \ --disable-nls \ - --enable-languages=c,c++ \ + --enable-languages=c,c++,fortran \ --with-gnu-as \ --with-gnu-ld \ --target="${target}" diff --git a/docker/toolchain.cmake b/docker/toolchain.cmake new file mode 100644 index 000000000..0017aac45 --- /dev/null +++ b/docker/toolchain.cmake @@ -0,0 +1,57 @@ +# default toolchain file for targets, see #1110 +# required so CMAKE_CROSSCOMPILING_EMULATOR is set, +# as well for embedded systems and other targets. +# +# all embedded systems without an OS should set the system name to generic +# https://cmake.org/cmake/help/book/mastering-cmake/chapter/Cross%20Compiling%20With%20CMake.html + +set(CMAKE_SYSTEM_NAME "$ENV{CROSS_CMAKE_SYSTEM_NAME}") +set(CMAKE_SYSTEM_PROCESSOR "$ENV{CROSS_CMAKE_SYSTEM_PROCESSOR}") +if(DEFINED ENV{CROSS_TARGET_RUNNER}) + set(runner "$ENV{CROSS_TARGET_RUNNER}") + separate_arguments(runner) + set(CMAKE_CROSSCOMPILING_EMULATOR ${runner}) +endif() + +# not all of these are standard, however, they're common enough +# that it's good practice to define them. +set(prefix "$ENV{CROSS_TOOLCHAIN_PREFIX}") +set(suffix "$ENV{CROSS_TOOLCHAIN_SUFFIX}") +set(CMAKE_C_COMPILER "${prefix}gcc${suffix}") +set(CMAKE_ASM_COMPILER "${prefix}gcc${suffix}") +set(CMAKE_CXX_COMPILER "${prefix}g++${suffix}") +set(CMAKE_AR "${prefix}ar") +set(CMAKE_LINKER "${prefix}ld") +set(CMAKE_NM "${prefix}nm") +set(CMAKE_OBJCOPY "${prefix}objcopy") +set(CMAKE_OBJDUMP "${prefix}objdump") +set(CMAKE_RANLIB "${prefix}ranlib") +set(CMAKE_STRIP "${prefix}strip") + +# these are cached so any build system that compiled outside of the rust +# build system, such as a third-party cmake build and install of a shared +# library, will still work. however, cmake-rs can override these values +if(DEFINED ENV{CROSS_CMAKE_OBJECT_FLAGS}) + set(CMAKE_C_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "C Compiler options") + set(CMAKE_CXX_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "C++ Compiler options") + set(CMAKE_ASM_FLAGS "$ENV{CROSS_CMAKE_OBJECT_FLAGS}" CACHE STRING "ASM Compiler options") +endif() + +# if cross-compiling, we need to disable where the root path +# is found and also provide our own sysroot +if(DEFINED ENV{CROSS_SYSROOT}) + set(CMAKE_FIND_ROOT_PATH "$ENV{CROSS_SYSROOT}" "${CMAKE_PREFIX_PATH}") + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) +endif() + +set(crt "$ENV{CROSS_CMAKE_CRT}") +if(crt STREQUAL "newlib") + # cmake normally tries to test the C and C++ compilers by building and + # running a binary, but this fails for bare-metal targets, since + # they are missing start files and potentially other symbols. + # choosing to make a static library causes cmake to skip the check. + set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY) +endif() diff --git a/docker/validate-android-args.sh b/docker/validate-android-args.sh new file mode 100755 index 000000000..660a39fd7 --- /dev/null +++ b/docker/validate-android-args.sh @@ -0,0 +1,217 @@ +#!/usr/bin/env bash +# Ensure the NDK, SDK, and Android versions match to exit +# before a build or even worse, a runner later fails. + +set -x +set -euo pipefail + +main() { + local arch="${1}" + + validate_ndk "${arch}" + validate_sdk + validate_system + validate_ndk_sdk "${arch}" + validate_sdk_system +} + +validate_ndk() { + local arch="${1}" + local ndk_version= + ndk_version=$(echo "${ANDROID_NDK}" | tr -dc '0-9') + + case "${arch}" in + mips|mips64) + if [[ "${ndk_version}" -ge 17 ]]; then + echo "Android NDKs r17+ removed support for MIPS architectures." 1>&2 + exit 1 + fi + ;; + *) + ;; + esac +} + +validate_sdk() { + local invalid_sdk_versions=(6 7 10 11 20 25) + # shellcheck disable=SC2076 + if [[ "${invalid_sdk_versions[*]}" =~ "${ANDROID_SDK}" ]]; then + echo "The Android SDK version ${ANDROID_SDK} is not provided by Android and therefore not supported." 1>&2 + exit 1 + fi +} + +validate_system() { + local major_version + major_version=$(echo "${ANDROID_VERSION}" | cut -d '.' -f 1) + if [[ "${major_version}" -lt 5 ]]; then + echo "Invalid Android version ${ANDROID_VERSION}, must be Android 5+." 1>&2 + exit 1 + fi +} + +validate_ndk_sdk() { + local arch="${1}" + local ndk_version= + ndk_version=$(echo "${ANDROID_NDK}" | tr -dc '0-9') + + # no minimum version for most 32-bit architectures + case "${arch}" in + arm|x86) + ;; + mips) + check_min_sdk_arch "${arch}" 9 + ;; + arm64|mips64|x86_64) + check_min_sdk_arch "${arch}" 21 + ;; + *) + echo "Unsupported architecture, got ${arch}." 1>&2 + exit 1 + ;; + esac + + case "${ndk_version}" in + 9) + check_sdk_range 3 19 + ;; + 10) + check_sdk_range 3 21 + ;; + 11) + check_sdk_range 3 24 + ;; + 12|13|14) + check_sdk_range 9 24 + ;; + 15) + check_sdk_range 14 26 + ;; + 16) + check_sdk_range 14 27 + ;; + 17) + check_sdk_range 14 28 + ;; + 18) + check_sdk_range 16 28 + ;; + 19) + check_sdk_range 16 28 + ;; + 20) + check_sdk_range 16 29 + ;; + 21|22) + check_sdk_range 21 30 + ;; + 23) + check_sdk_range 21 31 + ;; + 24) + check_sdk_range 21 32 + ;; + 25) + check_sdk_range 21 33 + ;; + *) + echo "Currently unsupported NDK version of ${ndk_version}." 1>&2 + echo "If you would like support, please file an issue." 1>&2 + exit 1 + ;; + esac +} + +check_min_sdk_arch() { + local arch="${1}" + local minimum="${2}" + if [[ "${ANDROID_SDK}" -lt "${minimum}" ]]; then + echo "Invalid SDK version ${ANDROID_SDK} for architecture ${arch}" 1>&2 + echo "The minimum supported SDK version is ${minimum}." 1>&2 + exit 1 + fi +} + +check_sdk_range() { + local lower="${1}" + local upper="${2}" + if [[ "${ANDROID_SDK}" -lt "${lower}" ]] || [[ "${ANDROID_SDK}" -gt "${upper}" ]]; then + echo "Invalid SDK version ${ANDROID_SDK} for NDK version ${ANDROID_NDK}" 1>&2 + echo "Valid SDK versions are ${lower}-${upper}." 1>&2 + exit 1 + fi +} + +validate_sdk_system() { + local major_version + local minor_version + major_version=$(echo "${ANDROID_VERSION}" | cut -d '.' -f 1) + minor_version=$(echo "${ANDROID_VERSION}" | cut -d '.' -f 2) + local system_version="${major_version}.${minor_version}" + case "${system_version}" in + 5.0) + check_sdk_system_equal 21 + ;; + 5.1) + check_sdk_system_equal 22 + ;; + 6.0) + check_sdk_system_equal 23 + ;; + 7.0) + check_sdk_system_equal 24 + ;; + 7.1) + check_sdk_system_equal 25 + ;; + 8.0) + check_sdk_system_equal 26 + ;; + 8.1) + check_sdk_system_equal 27 + ;; + 9.0) + check_sdk_system_equal 28 + ;; + 10.0) + check_sdk_system_equal 29 + ;; + 11.0) + check_sdk_system_equal 30 + ;; + 12.0) + check_sdk_system_equal 31 + ;; + 12.1) + # NOTE: also knows as 12L + check_sdk_system_equal 32 + ;; + 13.0) + check_sdk_system_equal 33 + ;; + *) + echo "Currently unsupported Android system version of ${system_version}." 1>&2 + echo "If you would like support, please file an issue." 1>&2 + exit 1 + ;; + esac +} + +check_sdk_system_equal() { + local expected=("$@") + local valid=0 + + for version in "${expected[@]}"; do + if [[ "${ANDROID_SDK}" == "${version}" ]]; then + valid=1 + fi + done + + if [[ "${valid}" -ne 1 ]]; then + # shellcheck disable=SC2145 + echo "Invalid SDK version, got ${ANDROID_SDK} and expected ${expected[@]}." 1>&2 + exit 1 + fi +} + +main "${@}" diff --git a/docker/windows-entry.sh b/docker/windows-entry.sh index d7673f85f..c4b1c1e8e 100755 --- a/docker/windows-entry.sh +++ b/docker/windows-entry.sh @@ -9,7 +9,8 @@ mkdir -p "${HOME}" export WINEPREFIX=/tmp/wine mkdir -p "${WINEPREFIX}" # FIXME: Make the wine prefix initialization faster -wineboot &> /dev/null +# TODO: https://github.com/cross-rs/cross/issues/1372 wine fails on arm64 qemu +wineboot &> /dev/null || true # Put libstdc++ and some other mingw dlls in WINEPATH # This must work for x86_64 and i686 diff --git a/docker/wine.sh b/docker/wine.sh index cda79f38a..b449e82ea 100755 --- a/docker/wine.sh +++ b/docker/wine.sh @@ -7,6 +7,7 @@ set -euo pipefail . lib.sh main() { + local version="9.0.0.0~focal-1" install_packages wget dpkg --add-architecture i386 @@ -14,12 +15,26 @@ main() { # add repository for latest wine version and install from source # hardcode version, since we might want to avoid a version later. wget -nc https://dl.winehq.org/wine-builds/winehq.key - mv winehq.key /usr/share/keyrings/winehq-archive.key - wget -nc https://dl.winehq.org/wine-builds/ubuntu/dists/bionic/winehq-bionic.sources - mv winehq-bionic.sources /etc/apt/sources.list.d/ + + # workaround for wine server synchronization, see #1035 + # we need to ensure the keys are now stored in `/etc/apt/keyrings`, + # which were previously stored in `/usr/share/keyrings`, and ensure + # our sources list searches for the right location. + mkdir -p /etc/apt/keyrings + mv winehq.key /etc/apt/keyrings/winehq-archive.key + + wget -nc https://dl.winehq.org/wine-builds/ubuntu/dists/focal/winehq-focal.sources + mv winehq-focal.sources /etc/apt/sources.list.d/ + sed -i s@/usr/share/keyrings/@/etc/apt/keyrings/@ /etc/apt/sources.list.d/winehq-focal.sources || true + + # winehq requires all the dependencies to be manually specified + # if we're not using the latest version of a given major version. apt-get update apt install --no-install-recommends --assume-yes \ - "winehq-stable=7.0.0.0~bionic-1" + "wine-stable=${version}" \ + "wine-stable-amd64=${version}" \ + "wine-stable-i386=${version}" \ + "winehq-stable=${version}" purge_packages } diff --git a/docker/zig.sh b/docker/zig.sh new file mode 100755 index 000000000..b84a74df2 --- /dev/null +++ b/docker/zig.sh @@ -0,0 +1,197 @@ +#!/usr/bin/env bash + +set -x +set -eo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +main() { + local platform="${1}" + install_packages ca-certificates curl xz-utils + + install_zig "${platform}" + install_zigbuild "${platform}" + + purge_packages + rm "${0}" +} + +install_zig() { + local platform="${1}" + local version="0.11.0" + local dst="/opt/zig" + local arch= + local os= + local triple= + + case "${platform}" in + 'linux/386') + arch="i386" + os="linux" + ;; + 'linux/amd64') + arch="x86_64" + os="linux" + ;; + 'linux/arm64') + arch="aarch64" + os="linux" + ;; + 'linux/riscv64') + arch="riscv64" + os="linux" + ;; + 'linux/ppc64le') + triple="powerpc64le-linux-gnu" + ;; + 'linux/s390x') + triple="s390x-linux-gnu" + ;; + 'darwin/amd64') + arch="x86_64" + os="macos" + ;; + 'darwin/arm64') + arch="aarch64" + os="macos" + ;; + # NOTE: explicitly don't support linux/arm/v6 + *) + echo "Unsupported target platform '${platform}'" 1>&2 + exit 1 + ;; + esac + + if [[ -n "${arch}" ]]; then + install_zig_tarball "${arch}" "${os}" "${version}" "${dst}" + else + install_zig_source "${triple}" "${version}" "${dst}" + fi +} + +install_zig_tarball() { + local arch="${1}" + local os="${2}" + local version="${3}" + local dst="${4}" + local filename="zig-${os}-${arch}-${version}.tar.xz" + + local td + td="$(mktemp -d)" + + pushd "${td}" + + curl --retry 3 -sSfL "https://ziglang.org/download/${version}/${filename}" -O + mkdir -p "${dst}" + tar --strip-components=1 -xJf "${filename}" --directory "${dst}" + + popd + + rm -rf "${td}" +} + +install_zig_source() { + local triple="${1}" + local version="${2}" + local dst="${3}" + local filename="zig-bootstrap-${version}.tar.xz" + + local td + td="$(mktemp -d)" + + pushd "${td}" + + curl --retry 3 -sSfL "https://ziglang.org/download/${version}/${filename}" -O + mkdir zig + tar --strip-components=1 -xJf "${filename}" --directory zig + + pushd zig + install_packages python3 make g++ + ./build -j5 "${triple}" native + mv "out/zig-${triple}-native" /opt/zig + + popd + popd + + rm -rf "${td}" +} + +install_zigbuild() { + local platform="${1}" + local version="0.17.5" + local dst="/usr/local" + local triple= + + # we don't know if `linux/arm/v7` is hard-float, + # and we don't know the the zigbuild `apple-darwin` + # target doesn't manually specify the architecture. + case "${platform}" in + 'linux/386') + triple="i686-unknown-linux-musl" + ;; + 'linux/amd64') + triple="x86_64-unknown-linux-musl" + ;; + 'linux/arm64') + triple="aarch64-unknown-linux-musl" + ;; + *) + ;; + esac + + if [[ -n "${triple}" ]]; then + install_zigbuild_tarball "${triple}" "${version}" "${dst}" + else + install_zigbuild_source "${version}" "${dst}" + fi +} + +install_zigbuild_tarball() { + local triple="${1}" + local version="${2}" + local dst="${3}" + local repo="https://github.com/messense/cargo-zigbuild" + local filename="cargo-zigbuild-v${version}.${triple}.tar.gz" + + local td + td="$(mktemp -d)" + + pushd "${td}" + + curl --retry 3 -sSfL "${repo}/releases/download/v${version}/${filename}" -O + mkdir -p "${dst}/bin" + tar -xzf "${filename}" --directory "${dst}/bin" + + popd + + rm -rf "${td}" +} + +install_zigbuild_source() { + local version="${1}" + local dst="${2}" + + local td + td="$(mktemp -d)" + + pushd "${td}" + + export RUSTUP_HOME="${td}/rustup" + export CARGO_HOME="${td}/cargo" + + curl --retry 3 -sSfL https://sh.rustup.rs -o rustup-init.sh + sh rustup-init.sh -y --no-modify-path --profile minimal + + PATH="${CARGO_HOME}/bin:${PATH}" \ + cargo install cargo-zigbuild \ + --version "${version}" \ + --root "${dst}" \ + --locked + + popd + + rm -rf "${td}" +} + +main "${@}" diff --git a/docs/cargo_configuration.md b/docs/cargo_configuration.md new file mode 100644 index 000000000..d02c09cb9 --- /dev/null +++ b/docs/cargo_configuration.md @@ -0,0 +1,64 @@ + +- [Configuring `cross`](#configuring-cross) +- [Configuring Cargo through environment variables](#configuring-cargo-through-environment-variables) +- [Use Xargo instead of Cargo](#use-xargo-instead-of-cargo) + + +# Configuring `cross` + +Please refer to the following docs: + +- [config file](./config_file.md) +- [env variables](./environment_variables.md) + + +# Configuring Cargo through environment variables + +When cross-compiling, `cargo` does not use environment variables such as +`RUSTFLAGS`, and must be provided using `CARGO_TARGET_${TARGET}_${OPTION}`. +Please note that some of these may be provided by the image themselves, such as +runners, and should be overwritten with caution. A list of important flags +includes: + +- `CARGO_TARGET_${TARGET}_LINKER`: specify a custom linker passed to rustc. +- `CARGO_TARGET_${TARGET}_RUNNER`: specify the wrapper to run executables. +- `CARGO_TARGET_${TARGET}_RUSTFLAGS`: add additional flags passed to rustc. + +Any of the following [flags][cargo-flags] can be provided, and are converted to +uppercase. For example, changing `foo-bar` would be provided as +`CARGO_TARGET_${TARGET}_FOO_BAR`. + +For example, to run binaries on `i686-unknown-linux-gnu` with Qemu, first +create a custom image containing Qemu, and run with the following command: + +``` +CARGO_TARGET_I686_UNKNOWN_LINUX_GNU_RUNNER=qemu-i386 cross run ... +``` + + +# Use Xargo instead of Cargo + +By default, `cross` uses `xargo` to build your Cargo project only for all +non-standard targets (i.e. something not reported by rustc/rustup). However, +you can use the `build.xargo` or `target.{{TARGET}}.xargo` field in +`Cross.toml` to force the use of `xargo`: + +```toml +# all the targets will use `xargo` +[build] +xargo = true +``` + +Or, + +```toml +# only this target will use `xargo` +[target.aarch64-unknown-linux-gnu] +xargo = true +``` + +`xargo = false` will work the opposite way (pick cargo always) and is useful +when building for custom targets that you know to work with cargo. + + +[cargo-flags]: https://doc.rust-lang.org/cargo/reference/config.html#target diff --git a/docs/config_file.md b/docs/config_file.md new file mode 100644 index 000000000..be67ee1f2 --- /dev/null +++ b/docs/config_file.md @@ -0,0 +1,325 @@ + +- [`build`](#build) +- [`build.env`](#buildenv) +- [`build.dockerfile`](#builddockerfile) +- [`build.zig`](#buildzig) +- [`target.TARGET`](#targettarget) +- [`target.TARGET.pre-build`](#targettargetpre-build) +- [`target.TARGET.image`](#targettargetimage) +- [`target.TARGET.env`](#targettargetenv) +- [`target.TARGET.dockerfile`](#targettargetdockerfile) +- [`target.TARGET.zig`](#targettargetzig) + + +> **Note**: Additional configuration is available through +> [environment variables](./environment_variables.md) + +You can place a `Cross.toml` file in the root of your Cargo project or use a +`CROSS_CONFIG` environment variable to tweak cross's behavior. You can also use +`package.metadata.cross.KEY` in `Cargo.toml`, and the priority of settings is +environment variables override `Cross.toml` options, which override +`Cargo.toml` options. Annotated examples of both +[`Cross.toml`][example-cross-toml] and [`Cargo.toml`][example-cargo-toml] are +provided. + +For example, the `[build]` table in `Cross.toml` is identical to setting +`[package.metadata.cross.build]` in `Cargo.toml`. + +The `cross` configuration in the `Cross.toml` file can contain the following +elements: + + +# `build` + +The `build` key allows you to set global variables, e.g.: + +> *NOTE*: `$CROSS_DEB_ARCH` is automatically provided by cross, +> [see here][custom_images_automatic_arch]. + +```toml +[build] +build-std = false # do not build the std library. has precedence over xargo +xargo = true # enable the use of xargo by default +zig = false # do not use zig cc for the builds +default-target = "x86_64-unknown-linux-gnu" # use this target if none is explicitly provided +pre-build = [ # additional commands to run prior to building the package + "dpkg --add-architecture $CROSS_DEB_ARCH", + "apt-get update && apt-get --assume-yes install libssl-dev:$CROSS_DEB_ARCH" +] +``` + + +# `build.env` + +With the `build.env` key you can globally set volumes that should be mounted in +the Docker container or environment variables that should be passed through. +For example: + +```toml +[build.env] +volumes = ["VOL1_ARG", "VOL2_ARG=/path/to/volume"] +passthrough = ["VAR1_ARG", "VAR2_ARG=VALUE"] +``` + +Note how in the environment variable passthrough, we can provide a definition +for the variable as well. `VAR1_ARG` will be the value of the environment +variable on the host, while `VAR2_ARG` will be `VALUE`. Likewise, the path to +the volume for `VOL1_ARG` will be the value of the environment variable on the +host, while `VOL2_ARG` will be `/path/to/volume`. + + +# `build.dockerfile` + +> If the image you want to use is already available from a container registry, +> check out the `target.TARGET.image` option below. + +The `build.dockerfile` key lets you provide a custom Docker image for all +targets, except those specified `target.TARGET.dockerfile`. The value can be +provided as either a table or a string. If `build.dockerfile` is set to a +string, it's equivalent to setting `build.dockerfile.file` to that value. For +example, using only a string: + +```toml +[build] +dockerfile = "./Dockerfile" +``` + +Or using a table: + +```toml +[build.dockerfile] +file = "./Dockerfile" # the dockerfile to use relative to the `Cargo.toml` +context = "." # the context folder to build the script in. defaults to `.` +build-args = { ARG1 = "foo" } # https://docs.docker.com/engine/reference/builder/#arg +``` + +`cross` will build and use the image that was built instead of the default +image. It's recommended to base your custom image on the default Docker image +that `cross` uses: `ghcr.io/cross-rs/{{TARGET}}:{{VERSION}}` (where +`{{VERSION}}` is `cross`'s version). This way you won't have to figure out how +to install a cross-C toolchain in your custom image. + +> *NOTE*: `$CROSS_DEB_ARCH` is automatically provided by cross, [see +> here][custom_images_automatic_arch]. + +``` Dockerfile +FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:latest + +RUN dpkg --add-architecture $CROSS_DEB_ARCH && \ + apt-get update && \ + apt-get install --assume-yes libfoo:$CROSS_DEB_ARCH +``` + +`cross` will provide the argument `CROSS_BASE_IMAGE` which points to the +default image `cross` would use for the target. Instead of the above, you can +also then do the following: + +```Dockerfile +ARG CROSS_BASE_IMAGE +FROM $CROSS_BASE_IMAGE +RUN ... +``` + + +# `build.zig` + +The `build.zig` key lets you use `zig cc` as a cross-compiler, enabling +cross-compilation to numerous architectures and glibc versions using a single +Docker image. Note that `zig cc` doesn't support all targets: only a subset of +our Linux GNU targets, so it might be better to set these values in +`target.TARGET.zig` instead. The value can be provided as either a table, a bool, +or a string. If `build.zig` is set to a string, it's equivalent to setting +`build.zig.version` to that value and `build.zig.enable` to true: + +```toml +[build] +zig = "2.17" +``` + +If `build.zig` is set to a bool, it's equivalent to setting `build.zig.enable` +to that value: + +```toml +[build] +zig = true +``` + +Or using a table: + +```toml +[build.zig] +enable = true # enable or disable the use of zig cc +version = "2.17" # the glibc version to use +image = "myimage" # a custom image containing zig to use +``` + + +# `target.TARGET` + +The `target` key allows you to specify parameters for specific compilation +targets: + +```toml +[target.aarch64-unknown-linux-gnu] +build-std = ["core", "alloc"] # always build the `core` and `alloc` crates from the std library. has precedence over xargo +xargo = false # disable the use of xargo +image = "test-image" # use a different image for the target +runner = "qemu-user" # wrapper to run the binary (must be `qemu-system`, `qemu-user`, or `native`). +``` + + +# `target.TARGET.pre-build` + +The `pre-build` field can reference a file to copy and run. This file is +relative to the container context, which would be the workspace root, or the +current directory if `--manifest-path` is used. For more involved scripts, +consider using `target.TARGET.dockerfile` instead to directly control the +execution. + +This script will be invoked as `RUN ./pre-build-script $CROSS_TARGET` where +`$CROSS_TARGET` is the target triple. + +```toml +[target.aarch64-unknown-linux-gnu] +pre-build = "./scripts/my-script.sh" +``` + +```bash +$ cat ./scripts/my-script.sh +#!/usr/bin/env bash + +apt-get install libssl-dev -y +``` + +`pre-build` can also be a list of commands to directly run inside the image: + +> *NOTE*: `$CROSS_DEB_ARCH` is automatically provided by cross, [see +> here][custom_images_automatic_arch]. + +```toml +[target.aarch64-unknown-linux-gnu] +pre-build = [ + "dpkg --add-architecture $CROSS_DEB_ARCH", + "apt-get update", + "apt-get install --assume-yes libfoo:$CROSS_DEB_ARCH" +] +``` + + +# `target.TARGET.image` + +```toml +[target.aarch64-unknown-linux-gnu] +image = "my/image:latest" +``` + +In the example above, `cross` will use a image named `my/image:latest` instead of +the default one. Normal Docker behavior applies, so: + +- Docker will first look for a local image named `my/image:latest` +- If it doesn't find a local image, then it will look in Docker Hub. +- If only `image:latest` is specified, then Docker won't look in Docker Hub. +- If the tag is omitted, then Docker will use the `latest` tag. + +If you specify a tag but no image name, `cross` will use the default image with +the tag you provided: + +```toml +[target.aarch64-unknown-linux-gnu] +# Translates to `ghcr.io/cross-rs/aarch64-unknown-linux-gnu:edge` +image = ":edge" + +[target.x86_64-unknown-linux-musl] +# Translates to `ghcr.io/cross-rs/x86_64-unknown-linux-musl@sha256:77db671d8356a64ae72a3e1415e63f547f26d374fbe3c4762c1cd36c7eac7b99` +image = "@sha256:77db671d8356a64ae72a3e1415e63f547f26d374fbe3c4762c1cd36c7eac7b99" +``` + +You can also specify a subtarget with no tag nor image name: + +```toml +[target.x86_64-unknown-linux-gnu] +# Translates to `ghcr.io/cross-rs/x86_64-unknown-linux-gnu:0.3.0-centos` +image = "-centos" +``` + +The `image` key can also take the toolchains/platforms supported by the image: + +```toml +[target.aarch64-unknown-linux-gnu] +image.name = "alpine:edge" +image.toolchain = ["x86_64-unknown-linux-musl", "linux/arm64=aarch64-unknown-linux-musl"] # Defaults to `x86_64-unknown-linux-gnu` +``` + + + +# `target.TARGET.env` + +The `env` key allows you to specify environment variables that should be used +for a specific compilation target. This is similar to `build.env`, but allows +you to be more specific per target: + +```toml +[target.x86_64-unknown-linux-gnu.env] +volumes = ["VOL1_ARG", "VOL2_ARG=/path/to/volume"] +passthrough = ["VAR1_ARG", "VAR2_ARG=VALUE"] +``` + + +# `target.TARGET.dockerfile` + +The `dockerfile` key lets you provide a custom Docker image for the +given target. The value can be provided as either a table or a string. If +`target.TARGET.dockerfile` is set to a string, it's equivalent to setting +`target.(...).dockerfile.file` to that value. For example, using only a string: + +```toml +[target.aarch64-unknown-linux-gnu] +dockerfile = "./Dockerfile" +``` + +Or using a table: + +```toml +[target.aarch64-unknown-linux-gnu.dockerfile] +file = "./Dockerfile" # the dockerfile to use relative to the `Cargo.toml` +context = "." # the context folder to build the script in. defaults to `.` +build-args = { ARG1 = "foo" } # https://docs.docker.com/engine/reference/builder/#arg +``` + + +# `target.TARGET.zig` + +The `target.TARGET.zig` key lets you use `zig cc` as a cross-compiler, enabling +cross-compilation to numerous architectures and glibc versions using a single +Docker image. The value can be provided as either a table, a bool, or a string. +If `target.TARGET.zig` is set to a string, it's equivalent to setting +`target.TARGET.zig.version` to that value and `target.TARGET.zig.enable` to +true: + +```toml +[target.aarch64-unknown-linux-gnu] +zig = "2.17" +``` + +If `target.TARGET.zig` is set to a bool, it's equivalent to setting +`target.TARGET.zig.enable` to that value: + +```toml +[target.aarch64-unknown-linux-gnu] +zig = true +``` + +Or using a table: + +```toml +[target.aarch64-unknown-linux-gnu.zig] +enable = true # enable or disable the use of zig cc +version = "2.17" # the glibc version to use +image = "myimage" # a custom image containing zig to use +``` + + + +[example-cross-toml]: https://github.com/cross-rs/wiki_assets/blob/main/Configuration/Cross.toml +[example-cargo-toml]: https://github.com/cross-rs/wiki_assets/blob/main/Configuration/Cargo.toml +[custom_images_automatic_arch]: ./custom_images.md#automatic-target-architecture-on-debian diff --git a/docs/cross_toml.md b/docs/cross_toml.md deleted file mode 100644 index 91e3f36f3..000000000 --- a/docs/cross_toml.md +++ /dev/null @@ -1,84 +0,0 @@ -The `cross` configuration in the `Cross.toml` file, can contain the elements described below. - -If the configuration is given in the `Cargo.toml`, these table headers must be of the form `[package.metadata.cross.]`. - -# `build` - -The `build` key allows you to set global variables, e.g.: - -```toml -[build] -xargo = true -build-std = true -default-target = "x86_64-unknown-linux-gnu" -``` - -# `build.env` - -With the `build.env` key you can globally set volumes that should be mounted -in the Docker container or environment variables that should be passed through. -For example: - -```toml -[build.env] -volumes = ["VOL1_ARG", "VOL2_ARG"] -passthrough = ["IMPORTANT_ENV_VARIABLES"] -``` - -# `target.TARGET` - -The `target` key allows you to specify parameters for specific compilation targets. - -```toml -[target.aarch64-unknown-linux-gnu] -xargo = false -build-std = false -image = "test-image" -pre-build = ["apt-get update"] # can also be the path to a file to run -runner = "custom-runner" -``` - -# `target.TARGET.pre-build` - -The `pre-build` field can also reference a file to copy and run. This file is relative to the container context, which would be the workspace root, or the current directory if `--manifest-path` is used. For more involved scripts, consider using `target.TARGET.dockerfile` instead to directly control the execution. - -This script will be invoked as `RUN ./pre-build-script $CROSS_TARGET` where `$CROSS_TARGET` is the target triple. - -```toml -[target.aarch64-unknown-linux-gnu] -pre-build = "./scripts/my-script.sh" -``` - -```sh -$ cat ./scripts/my-script.sh -#!/usr/bin/env bash - -apt-get install libssl-dev -y -``` - -# `target.TARGET.env` - -The `target` key allows you to specify environment variables that should be used for a specific compilation target. -This is similar to `build.env`, but allows you to be more specific per target. - -```toml -[target.x86_64-unknown-linux-gnu.env] -volumes = ["VOL1_ARG", "VOL2_ARG"] -passthrough = ["IMPORTANT_ENV_VARIABLES"] -``` - -# `target.TARGET.dockerfile` - -```toml -[target.x86_64-unknown-linux-gnu.dockerfile] -file = "./Dockerfile" # The dockerfile to use relative to the `Cargo.toml` -context = "." # What folder to run the build script in -build-args = { ARG1 = "foo" } # https://docs.docker.com/engine/reference/builder/#arg -``` - -also supports - -```toml -[target.x86_64-unknown-linux-gnu] -dockerfile = "./Dockerfile" -``` diff --git a/docs/custom_images.md b/docs/custom_images.md new file mode 100644 index 000000000..ddaa071f8 --- /dev/null +++ b/docs/custom_images.md @@ -0,0 +1,91 @@ + +- [Automatic Target Architecture on Debian](#automatic-target-architecture-on-debian) +- [Custom Images](#custom-images) + - [Adding Dependencies to Existing Images](#adding-dependencies-to-existing-images) + - [Custom Dockerfile](#custom-dockerfile) + - [Custom Image](#custom-image) + + +# Automatic Target Architecture on Debian + +Custom images generated from config `dockerfile` or `pre-build` keys will +export `CROSS_DEB_ARCH`, which allows you to install packages from +Ubuntu/Debian repositories without having to specify the exact architecture. +You can find an +[example of this here](#adding-dependencies-to-existing-images). + + +# Custom Images + +`cross` provides default Docker images for the targets listed [in the +README](../README.md#supported-targets). However, it can't cover every single +use case out there. + +## Adding Dependencies to Existing Images + +If you simply need to install a dependency availaible in ubuntus package +manager, see [`target.TARGET.pre-build`][config-target-pre-build]: + +```toml +[target.x86_64-unknown-linux-gnu] +pre-build = [ + "dpkg --add-architecture $CROSS_DEB_ARCH", + "apt-get update && apt-get install --assume-yes libssl-dev:$CROSS_DEB_ARCH" +] +``` + +For FreeBSD targets, a few helper scripts are available for use in +[`target.TARGET.pre-build`][config-target-pre-build]: + +```toml +[target.x86_64-unknown-freebsd] +pre-build = [""" +export FREEBSD_MIRROR=$(/freebsd-fetch-best-mirror.sh) && +/freebsd-setup-packagesite.sh && +/freebsd-install-package.sh xen-tools +"""] +``` + +## Custom Dockerfile + +For other targets, or when the default image is not enough, you can use the +[`target.{{TARGET}}.dockerfile`][config_target_dockerfile] field +in `Cross.toml` to use a custom Docker image for a specific target: + +> *NOTE*: Refer to the [`build.dockerfile`][config_build_dockerfile] section of +> the configuration for tips when writing your own `Dockerfile`. + +``` toml +[target.aarch64-unknown-linux-gnu] +dockerfile = "Dockerfile" +``` + +`cross` will build and use the image that was built instead of the default +image. + + +## Custom Image + +If there is a pre-built image for your specific target, you can use the +[`target.{{TARGET}}.image`][config_target_image] field in `Cross.toml` to use +that instead: + +``` toml +[target.aarch64-unknown-linux-gnu] +image = "my/image:tag" +``` + +In thie case, `cross` will use a image named `my/image:tag` instead of the +default one. Normal Docker behavior applies, so: + +- Docker will first look for a local image named `my/image:tag` +- If it doesn't find a local image, then it will look in Docker Hub. +- If only `image:tag` is specified, then Docker won't look in Docker Hub. +- If only `tag` is omitted, then Docker will use the `latest` tag. + + + +[config-target-pre-build]: ./config_file.md#targettargetpre-build +[config_target_dockerfile]: ./config_file.md#targettargetdockerfile +[config_target_image]: ./config_file.md#targettargetimage +[config_build_dockerfile]: ./config_file.md#builddockerfile diff --git a/docs/environment_variables.md b/docs/environment_variables.md new file mode 100644 index 000000000..bafc72f25 --- /dev/null +++ b/docs/environment_variables.md @@ -0,0 +1,120 @@ + +- [Configuring cross with environment variables](#configuring-cross-with-environment-variables) +- [Environment-Variable passthrough](#environment-variable-passthrough) + + +# Configuring cross with environment variables + +Cross can be further customized by setting certain environment variables. +In-depth documentation with examples can be found [here][env-examples]. + +- `CROSS_CONTAINER_ENGINE`: The container engine to run cross in. Defaults to + `docker` then `podman`, whichever is found first (example: `docker`, see the + [FAQ][faq-container-engines]). +- `XARGO_HOME`: Home for [`xargo`][xargo-project] (example: `~/.xargo`). +- `NIX_STORE`: The directory for the [Nix store][nix-store] (example: + `/nix/store`). +- `CROSS_CONTAINER_UID`: Set the user identifier for the cross command + (example: `1000`). +- `CROSS_CONTAINER_GID`: Set the group identifier for the cross command + (example: `1000`). +- `CROSS_CONTAINER_IN_CONTAINER`: Inform `cross` that it is running inside a + container (example: `true`, see the FAQ). +- `CROSS_CONTAINER_OPTS`: Additional arguments to provide to the container + engine during `$engine run` (example: `--env MYVAR=1` where `engine=docker`). +- `CROSS_CONFIG`: Specify the path to the `cross` config file (see [Config + File][cross-config-file]). +- `CROSS_BUILD_OPTS`: Space separated flags to add when building a custom + image, i.e. `--network=host` +- `CROSS_DEBUG`: Print debugging information for `cross`. +- `CROSS_COMPATIBILITY_VERSION`: Use older `cross` behavior (example: `0.2.1`). +- `CROSS_CUSTOM_TOOLCHAIN`: Specify that `rustup` is using a custom toolchain, + and therefore should not try to add targets/install components. Useful with + [`cargo-bisect-rustc`][cargo-bisect-rustc]. +- `CROSS_REMOTE`: Inform `cross` it is using a remote container engine, and use + data volumes rather than local bind mounts. See [Remote][docs-remote] for + more information using remote container engines. +- `QEMU_STRACE`: Get a backtrace of system calls from “foreign” (non x86_64) + binaries when using `cross` run. +- `CARGO_BUILD_TARGET`: Sets the default target, similar to specifying + `--target`. +- `CROSS_ROOTLESS_CONTAINER_ENGINE`: Specify whether to container engine runs + as root or is rootless. If set to `auto` or not provided, it assumes `docker` + runs as root and all other container engines are rootless. +- `CROSS_CONTAINER_USER_NAMESPACE`: Custom the [container user + namespace][container-user-namespace]. If set to `none`, user namespaces will + be disabled. If not provided or set to `auto`, it will use the default + namespace. +- `CROSS_CUSTOM_TOOLCHAIN_COMPAT`: A descriptive name for a custom toolchain so + `cross` can convert it to a fully-qualified toolchain name. +- `CROSS_CONTAINER_ENGINE_NO_BUILDKIT`: The container engine does not have + `buildx` command (or BuildKit support) when building custom images. +- `CROSS_NO_WARNINGS`: Set to `1` to panic on warnings from `cross`, before + building the executables. + Use `0` to disable this behaviour. + The no warnings behaviour is implicitly enabled in CI pipelines. + +All config file options can also be specified using environment variables. For +example, setting `CROSS_BUILD_XARGO=1` is identical to setting `build.xargo = +true`, and `CROSS_TARGET_AARCH64_UNKNOWN_LINUX_GNU_XARGO=1` is identical to +`target.aarch64-unknown-linux-gnu.xargo = true`. + + +# Environment-Variable passthrough + +By default, `cross` does not pass most environment variables into the build +environment from the calling shell. This is chosen as a safe default as most +use cases will not want the calling environment leaking into the inner +execution environment. There are, however, some notable exceptions: most +environment variables `cross` or `cargo` reads are passed through automatically +to the build environment. The major exceptions are variables that are set by +`cross` or conflict with our build environment, including: + +- `CARGO_HOME` +- `CARGO_TARGET_DIR` +- `CARGO_BUILD_TARGET_DIR` +- `CARGO_BUILD_RUSTC` +- `CARGO_BUILD_RUSTC_WRAPPER` +- `CARGO_BUILD_RUSTC_WORKSPACE_WRAPPER` +- `CARGO_BUILD_RUSTDOC` +- `CROSS_RUNNER` +- `CROSS_RUSTC_MAJOR_VERSION` +- `CROSS_RUSTC_MINOR_VERSION` +- `CROSS_RUSTC_PATCH_VERSION` + +Otherwise, any environment variables that start with CARGO_ or CROSS_, and a +few others, will be available in the build environment. For example, RUSTFLAGS +and CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_RUSTFLAGS will both be automatically +available in the build environment. + +In the instances that you do want to pass through additional environment +variables, this can be done via `build.env.passthrough` in your `Cross.toml`: + +```toml +[build.env] +passthrough = [ + "RUST_BACKTRACE", + "RUST_LOG", + "TRAVIS", +] +``` + +To pass variables through for one target but not others, you can use +this syntax instead: + +```toml +[target.aarch64-unknown-linux-gnu.env] +passthrough = [ + "RUST_DEBUG", +] +``` + + +[env-examples]: https://github.com/cross-rs/wiki_assets/blob/main/Configuration/crossrc.bash_aliases +[faq-container-engines]: https://github.com/cross-rs/cross/wiki/FAQ#explicitly-choose-the-container-engine +[xargo-project]: https://github.com/japaric/xargo +[nix-store]: https://nixos.org/manual/nix/stable/introduction.html +[cross-config-file]: ./config_file.md +[cargo-bisect-rustc]: https://github.com/rust-lang/cargo-bisect-rustc +[docs-remote]: ./remote.md +[container-user-namespace]: https://docs.docker.com/engine/security/userns-remap/ diff --git a/docs/getting-started.md b/docs/getting-started.md new file mode 100644 index 000000000..d477aa3dd --- /dev/null +++ b/docs/getting-started.md @@ -0,0 +1,123 @@ + +- [Installing Cross](#installing-cross) + - [Installing Rust via Rustup](#installing-rust-via-rustup) + - [Installing Cross](#installing-cross) +- [Installing A Container Engine](#installing-a-container-engine) +- [Cross-Compiling Your First Package](#cross-compiling-your-first-package) + + +New to cross? Cross-compilation? Container engines? Here's how to get up-and-running. + +# Installing Cross + +## Installing Rust via Rustup + +`cross` requires a `rustup` installation of Rust. To do so, the recommended +instructions are documented [here](https://www.rust-lang.org/tools/install), +but might differ on some platforms. For UNIX-like systems, run the following +command in a terminal and follow the instructions to install Rust and add Rust +to the path: + +```bash +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +``` + +On Windows, download +[rustup-init.exe](https://static.rust-lang.org/rustup/dist/i686-pc-windows-gnu/rustup-init.exe) +or following the [other installation +methods](https://forge.rust-lang.org/infra/other-installation-methods.html), +say, to install from a package manager. + +On some platforms, such as NixOS, you might need to use a package manager since +the default `rustup` install will fail. On NixOS, you should run the following, +which will install `rustup` and the latest `stable` release of Rust. + +```bash +nix-env -i rustup +rustup toolchain install stable +``` + +Note that you might need additional tools on some platforms to get `rustc` and +`cargo` working. On UNIX-like systems, this generally means an install of GCC +or Clang. For example, on NixOS you will likely need to install GCC via +`nix-env -i gcc` and then go into a GCC and Rust shell (`nix-shell -p gcc +rustup`). On Alpine, you'll need to run `apk add libgcc gcc musl-dev`. Exact +instructions will differ by OS and Linux distro, feel free to ask on the +[discussion](https://github.com/cross-rs/cross/discussions) or our [Matrix +room](https://matrix.to/#/#cross-rs:matrix.org) if you have any questions. + + +## Installing Cross + +Once `cargo` is installed via `rustup`, and the necessary additional tools are +present, you can now install `cross` via `cargo`: + +```bash +cargo install cross +# Optionally, if you have cargo-binstall, you can install via pre-built binary +cargo binstall cross +``` + +Once `cross` is installed, you need a container engine and you can start +cross-compiling. + + +# Installing A Container Engine + +On Windows and macOS, we generally recommend you use Docker unless you know +what you're doing. [Docker +Desktop](https://www.docker.com/products/docker-desktop/) install instructions +can be found [here](https://www.docker.com/products/docker-desktop/). On Linux, +you can either install via [Docker +Engine](https://docs.docker.com/engine/install/ubuntu/), [Docker +Desktop](https://docs.docker.com/desktop/install/linux-install/) or +[Podman](https://podman.io/getting-started/installation). We generally +recommend Podman, since it runs rootless by default. If you choose to use +Docker, make sure you add users to the [docker +group](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user) +so it can be run without `sudo` (note that this has security implications) or +use [rootless](https://docs.docker.com/engine/security/rootless/) +Docker. + +If you use Docker Desktop for Windows, ensure you're using the WSL2. Follow the +[WSL2 installation +instructions](https://docs.microsoft.com/en-us/windows/wsl/install) to enable +the [WSL2 backend in docker](https://docs.docker.com/desktop/windows/wsl/). + +Once your container engine is installed, you can check that it is running via: + +```bash +# or use podman, if installed +$ docker ps -a +``` + +Using rootless docker also requires setting the environment +variable `CROSS_ROOTLESS_CONTAINER_ENGINE=1`. + + +# Cross-Compiling Your First Package + +Once both `cross` and the container engine are installed, you can build your +first package: this is all that's required. + +```bash +$ cargo init --bin hello +$ cd hello +$ cross run --target aarch64-unknown-linux-gnu + Compiling hello v0.1.0 (/project) + Finished dev [unoptimized + debuginfo] target(s) in 0.64s + Running `/linux-runner aarch64 /target/aarch64-unknown-linux-gnu/debug/hello` +Hello, world! +``` + +This will automatically install the Rust target required and the Docker image +containing the toolchain to cross-compile your target. + +If you get an error similar to `error: toolchain +'stable-x86_64-unknown-linux-gnu' does not support components`, try +reinstalling that toolchain with rustup. + +```sh +$ rustup toolchain uninstall stable-x86_64-unknown-linux-gnu +$ rustup toolchain install stable-x86_64-unknown-linux-gnu --force-non-host +``` diff --git a/docs/recipes.md b/docs/recipes.md new file mode 100644 index 000000000..0cefec824 --- /dev/null +++ b/docs/recipes.md @@ -0,0 +1,295 @@ + +- [OpenSSL](#openssl) + - [Vendored](#vendored) + - [Pre-build](#pre-build) + - [Custom dockerfile](#custom-dockerfile) +- [sccache](#sccache) +- [Redoxer](#redoxer) +- [vcpkg, Meson, and Conan](#vcpkg-meson-and-conan) +- [Using Clang and Software Collections on CentOS7](#using-clang-and-software-collections-on-centos7) + + +This contains recipes for common logic use cases. + + +# OpenSSL + +You can either use the vendored or system packages for the +[openssl](https://crates.io/crates/openssl) crate. See +[openssl-certs](https://github.com/cross-rs/wiki_assets/tree/main/Recipes/openssl-certs) +for a working project. + +## Vendored + +Use the vendored feature of the openssl crate by adding the following to your +dependencies in `Cargo.toml`: + +```toml,cargo +openssl = { version = "0.10", features = ["vendored"] } +``` + +## Pre-build + +To install OpenSSL in an image with `apt-get` available add the following to +your [Cross +configuration](./config_file.md): + +```toml +[target.x86_64-unknown-linux-gnu] +pre-build = [ + "dpkg --add-architecture $CROSS_DEB_ARCH", + "apt-get update && apt-get install --assume-yes libssl-dev:$CROSS_DEB_ARCH" +] +``` + +## Custom dockerfile + +A sample Dockerfile for `aarch64` with OpenSSL support is: + +```Dockerfile +FROM ghcr.io/cross-rs/aarch64-unknown-linux-gnu:edge +RUN dpkg --add-architecture arm64 +RUN apt-get update && apt-get install --assume-yes libssl-dev:arm64 +``` + +Build this image and use it, as is described extensively in [Custom +Images](./custom_images.md). + + +# sccache + +sccache support can be done either by `sccache` from source or using a pre-built binary. See [sccache](https://github.com/cross-rs/wiki_assets/tree/main/Recipes/sccache) for a working project using pre-build hooks. + +1. Create a script to [install](#sccache-install-script) sccache in the image, either from a [pre-built binary](#sccache-prebuilt-binary) or [from source](#sccache-from-source). +2. Extend a [Dockerfile](#sccache-dockerfile) to install sccache in the image. +3. Passthrough the appropriate environment variables in [Cross.toml](#sccache-cross-toml) when using sccache. + +

Install Script

+ +First, we need a script to copy into our image as `sccache.sh` (make sure the script is executable). + +

Pre-Built Binary

+ +```bash +#!/bin/bash + +set -x +set -euo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +main() { + local triple + local tag + local td + local url="https://github.com/mozilla/sccache" + triple="${1}" + + install_packages unzip tar + + # Download our package, then install our binary. + td="$(mktemp -d)" + pushd "${td}" + tag=$(git ls-remote --tags --refs --exit-code \ + "${url}" \ + | cut -d/ -f3 \ + | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' \ + | sort --version-sort \ + | tail -n1) + curl -LSfs "${url}/releases/download/${tag}/sccache-${tag}-${triple}.tar.gz" \ + -o sccache.tar.gz + tar -xvf sccache.tar.gz + rm sccache.tar.gz + cp "sccache-${tag}-${triple}/sccache" "/usr/bin/sccache" + chmod +x "/usr/bin/sccache" + + # clean up our install + purge_packages + popd + rm -rf "${td}" + rm "${0}" +} + +main "${@}" +``` + +

From Source

+ +When installing from source, we can toggle various features, however it is highly recommended to use the vendored OpenSSL. + +```bash +#!/bin/bash + +set -x +set -euo pipefail + +# shellcheck disable=SC1091 +. lib.sh + +main() { + local triple + local tag + local td + local url="https://github.com/mozilla/sccache" + triple="${1}" + + install_packages ca-certificates curl unzip + + # install rust and cargo to build sccache + export RUSTUP_HOME=/tmp/rustup + export CARGO_HOME=/tmp/cargo + curl --retry 3 -sSfL https://sh.rustup.rs -o rustup-init.sh + sh rustup-init.sh -y --no-modify-path + rm rustup-init.sh + export PATH="${CARGO_HOME}/bin:${PATH}" + rustup target add "${triple}" + + # download the source code from the latest sccache release + td="$(mktemp -d)" + pushd "${td}" + tag=$(git ls-remote --tags --refs --exit-code \ + "${url}" \ + | cut -d/ -f3 \ + | grep -E '^v[0-9]+\.[0-9]+\.[0-9]+$' \ + | sort --version-sort \ + | tail -n1) + curl -LSfs "${url}/archive/refs/tags/${tag}.zip" \ + -o sccache.zip + unzip sccache.zip + mv "sccache-${tag//v/}" sccache + rm sccache.zip + + # build from source for the desired architecture + # you can also use additional features here + cd sccache + cargo build --release --target "${triple}" \ + --features=all,"openssl/vendored" + cp "target/${triple}/release/sccache" "/usr/bin/sccache" + + # clean up our install + rm -r "${RUSTUP_HOME}" "${CARGO_HOME}" + purge_packages + popd + rm -rf "${td}" + rm "${0}" +} + +main "${@}" +``` + +

Dockerfile

+ +Next, extend our Dockerfile and build our image, saved as `Dockerfile.${target}`, where `${target}` is replaced by our desired target (such as `x86_64-unknown-linux-musl`). + +```Dockerfile +FROM ghcr.io/cross-rs/${target}:main +ARG DEBIAN_FRONTEND=noninteractive + +COPY sccache.sh / +RUN /sccache.sh x86_64-unknown-linux-musl + +ENV RUSTC_WRAPPER="/usr/bin/sccache" +``` + +Build our Docker image with: + +```bash +docker build --tag ${target}:sccache \ + --file Dockerfile.${target} . +``` + +

Cross.toml

+ +Now, we need to passthrough our environment variables and ensure they're exported when running cross. In `Cross.toml`, define: + +```toml +[target.${target}] +image = "${target}:sccache" + +[build.env] +passthrough = [ + "SCCACHE_ERROR_LOG", + "SCCACHE_LOG", + "SCCACHE_AZURE_CONNECTION_STRING", + "SCCACHE_AZURE_BLOB_CONTAINER", + "SCCACHE_DIR", +] +``` + +

Building with sccache

+ +Finally, we can run cross with our `sccache` environment variables defined using `cross`: + +```bash +SCCACHE_LOG=trace SCCACHE_DIR=/path/to/sccache/cache \ + cross build --target "${target}" --verbose +``` + +# Redoxer + +Redoxer support can be done by installing the necessary dependencies, Redoxer, and the Redoxer toolchain in a custom image. See [redoxer](https://github.com/cross-rs/wiki_assets/tree/main/Recipes/redoxer) for a working project using a custom Dockerfile. + +Please note that this requires a base Ubuntu version of 20.04, and therefore needs you to build the images with [newer Linux versions](https://github.com/cross-rs/cross/wiki/FAQ#newer-linux-versions). + +# vcpkg, Meson, and Conan + +Often C++ projects have complex build systems, due to a myriad of dependencies, competing build systems, and the lack of a built-in package manager. Some of the most popular build systems include GNU Make, CMake, and [Meson](https://mesonbuild.com/), and the two most popular package managers are [vcpkg](https://vcpkg.io/en/index.html) and [Conan](https://conan.io/). We have an entire [project](https://github.com/cross-rs/wiki_assets/tree/main/Recipes/vcpkg) with builds using CMake + Conan, Meson + Conan, and CMake + vcpkg. + +An example of building a project with an external `zlib` dependency using Meson and Conan is as follows. First, we create our Conan dependency file: + +**conanfile.py** + +```python +from conans import ConanFile, Meson + +class ZlibExec(ConanFile): + name = "zlibexec" + version = "0.1" + settings = "os", "compiler", "build_type", "arch" + generators = "cmake", "pkg_config" + requires = "zlib/1.2.11" + + def build(self): + meson = Meson(self) + meson.configure(build_folder="build") + meson.build() +``` + +Next, we need our Meson build file: + +**meson.build** + +```meson +project('zlibexec', 'cpp') +executable('zlibexec', 'zlib.cc', dependencies: dependency('zlib')) +``` + +Now, we need to build our project: + +```bash +mkdir build && cd build +conan install .. --build +meson .. +conan build .. +``` + +To make this magic happen, the project contains [Dockerfiles](https://github.com/cross-rs/wiki_assets/blob/main/Recipes/vcpkg/aarch64.Dockerfile) with Meson, Conan, and vcpkg installed where the CMake toolchains and Meson configurations automatically cross-compile for the desire architecture. These images are [automatically](https://github.com/cross-rs/wiki_assets/blob/main/Recipes/vcpkg/Cross.toml) built when running `cross` via [pre-build hooks](https://github.com/cross-rs/cross/wiki/Configuration#custom-images). In order to integrate these builds with Rust, rather than invoking the `meson` or `cmake` commands directly, you should use [meson-rs](https://docs.rs/meson/1.0.0/meson/) and [cmake-rs](https://docs.rs/cmake/latest/cmake/) to configure and build the projects. + +# Using Clang and Software Collections on CentOS7 + +In order to use Clang on CentOS 7, you must both install the SCL repository, the LLVM toolset, and set the necessary paths to clang and LLVM. A sample Dockerfile is as follows: + +```Dockerfile +FROM ghcr.io/cross-rs/x86_64-unknown-linux-gnu:main-centos + +RUN yum update -y && \ + yum install centos-release-scl -y && \ + yum install llvm-toolset-7 -y + +ENV LIBCLANG_PATH=/opt/rh/llvm-toolset-7/root/usr/lib64/ \ + LIBCLANG_STATIC_PATH=/opt/rh/llvm-toolset-7/root/usr/lib64/ \ + CLANG_PATH=/opt/rh/llvm-toolset-7/root/usr/bin/clang +``` + +Build this image and use it, as is described extensively in [Custom Images](./custom_images.md). diff --git a/docs/remote.md b/docs/remote.md new file mode 100644 index 000000000..ecd170679 --- /dev/null +++ b/docs/remote.md @@ -0,0 +1,136 @@ + +- [Getting Started](#getting-started) +- [Data Volumes](#data-volumes) +- [Managing Data](#managing-data) +- [Private Dependencies](#private-dependencies) +- [Environment Variables](#environment-variables) + + + +# Getting Started + +To inform `cross` it is using a remote container engine, set the environment +variable `CROSS_REMOTE=1`. Rather than use bind mounts to mount local volumes +into the filesystem, it copies data from the local filesystem into data volumes +on the remote host, ensuring all mounted volumes are present on the remote +filesystem in the same location as they would be present on the host. A sample +use with docker is: + +```bash +CROSS_REMOTE=1 DOCKER_HOST=tcp://docker:2375/ cross build --target arm-unknown-linux-gnueabihf +``` + +If using [docker +contexts](https://docs.docker.com/engine/context/working-with-contexts/), you +do not need to provide `DOCKER_HOST`. This also works with podman and +podman-remote. For podman remote, make sure the connection is +[added](https://docs.podman.io/en/latest/markdown/podman-system-connection-add.1.html) +to podman, and if using a connection that requires authentication, that it is +set to the default identity, and that you do not use an identity (to avoid +being prompted for a password on every command). + +An example with podman is: + +```bash +podman system connection add cross tcp://localhost:8080 --default=true +CROSS_REMOTE=1 CROSS_CONTAINER_ENGINE=podman cross build --target arm-unknown-linux-gnueabihf +``` + +Any command using `podman` can be replaced with `podman-remote`. Cross +automatically detects if `podman` or `podman-remote` is being used, and adds in +the `--remote` flag if needed. + + +# Data Volumes + +Since we cannot use bind mounts directly, we create a data volume mounted at +`/cross` in the container, and copy over all relevant data to that volume. To +ensure paths are identical with those on the host, all data is then symlinked +to the expected paths: for example, `/cross/home/user/project` becomes +`/home/user/project`. The files will have the expected metadata as on the host. + +By default, `cross` does not copy the `cargo` registry, not the target +directory. These can be enabled via the `CROSS_REMOTE_COPY_REGISTRY` and +`CROSS_REMOTE_COPY_CACHE` environment variables. In order to minimize the +number of calls to docker, which has large overhead, when copying only subsets +of a directory, we copy all files to a temporary directory for faster +performance. + +Since copying the entire toolchain remotely can take a long time, `cross` also +supports persistent data volumes containing all data for the current toolchain. +These can be created via: + +```bash +cross-util volumes crate +``` + +`cross` will detect if a persistent data volume is present, and prefer it over +a single-use volume. The persistent data volume will also contain the project +files, and will reflect any changes to the local project by copying/removing +changed files on every build. + + +# Managing Data + +Due to the addition of temporary files/directories, persistent data volumes, +and containers that may not be cleaned up, we've added utilities to clean up +data cross introduces: + +```bash +# VOLUMES +# list all all persistent data volumes +$ cross-util volumes list +cross-stable-x86_64-unknown-linux-gnu-16b8c-fe5b13d68 +# create a persistent data volume for the current toolchain +$ cross-util volumes create +# remove the persistent data volume for the current toolchain +$ cross-util volumes remove +# remove all persistent data volumes +$ cross-util volumes remove-all +# prune all data volumes not currently used with a container +# note that this affects more than just cross, and can +# be highly destructive +$ cross-util volumes prune + +# CONTAINERS +# list all all hanging containers +$ cross-util containers list +# stop and remove all hanging containers +$ cross-util containers remove-all +``` + + +# Private Dependencies + +Note that for private dependencies, you will need to copy over the cargo +registry, since it uses the host toolchain, and might only support single-use +volumes (this has not been extensively tested): + +```bash +CROSS_REMOTE_COPY_REGISTRY=1 CROSS_REMOTE=1 cross build --target arm-unknown-linux-gnueabihf +``` + +This is because the private dependencies are downloaded locally (to the host +registry), and therefore must be updated remotely, which will not have access +to SSH keys or other information inside the container. + + +# Environment Variables + +Remote build behavior can be further customized by environment variables +provided to the build command. + +- `CROSS_REMOTE`: Inform cross it is using a remote container engine, and use + data volumes rather than local bind mounts. +- `CROSS_REMOTE_COPY_REGISTRY`: Copy the `cargo` registry and git directories. + Is needed to support private SSH dependencies. +- `CROSS_REMOTE_COPY_CACHE`: Copy all directories, even those containing + `CACHETAG.DIR` (a cache directory [tag](https://bford.info/cachedir/)). +- `CROSS_REMOTE_SKIP_BUILD_ARTIFACTS`: Do not copy any generated build + artifacts back to the host after finishing the build. If using persistent + data volumes, the artifacts will remain in the volume. + +For additional environment variables, refer to the [environment variables +documentation][docs-env-vars]. + +[docs-env-vars]: ./environment_variables.md diff --git a/docs/unstable_features.md b/docs/unstable_features.md new file mode 100644 index 000000000..865be600b --- /dev/null +++ b/docs/unstable_features.md @@ -0,0 +1,9 @@ +Certain unstable features can enable additional functionality useful to +cross-compiling. Note that these are unstable, and may be removed at any time +(particularly if the feature is stabilized or removed), and will only be used +on a nightly channel. + +Here is the list of currently available unstable features: + +- `CROSS_UNSTABLE_ENABLE_DOCTESTS`: enable or disable running doctests + (example: `true`) diff --git a/rustfmt.yml b/rustfmt.yml index bddf89599..fdd7eb4b3 100644 --- a/rustfmt.yml +++ b/rustfmt.yml @@ -6,4 +6,4 @@ use_field_init_shorthand = true reorder_impl_items = true edition = "2021" newline_style = "Unix" -format_code_in_doc_comments = true \ No newline at end of file +format_code_in_doc_comments = true diff --git a/src/bin/commands/clean.rs b/src/bin/commands/clean.rs index 0a93abf6b..b7dc8bc01 100644 --- a/src/bin/commands/clean.rs +++ b/src/bin/commands/clean.rs @@ -7,15 +7,6 @@ use cross::shell::MessageInfo; #[derive(Args, Debug)] pub struct Clean { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Force removal of images. #[clap(short, long)] pub force: bool, @@ -32,7 +23,7 @@ pub struct Clean { impl Clean { pub fn run( - self, + &self, engine: cross::docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { @@ -51,9 +42,6 @@ impl Clean { // containers -> images -> volumes -> prune to ensure no conflicts. let remove_containers = RemoveAllContainers { - verbose: self.verbose, - quiet: self.quiet, - color: self.color.clone(), force: self.force, execute: self.execute, engine: None, @@ -62,9 +50,6 @@ impl Clean { let remove_images = RemoveImages { targets: vec![], - verbose: self.verbose, - quiet: self.quiet, - color: self.color.clone(), force: self.force, local: self.local, execute: self.execute, @@ -73,9 +58,6 @@ impl Clean { remove_images.run(engine.clone(), msg_info)?; let remove_volumes = RemoveAllVolumes { - verbose: self.verbose, - quiet: self.quiet, - color: self.color.clone(), force: self.force, execute: self.execute, engine: None, @@ -83,9 +65,6 @@ impl Clean { remove_volumes.run(engine.clone(), msg_info)?; let prune_volumes = PruneVolumes { - verbose: self.verbose, - quiet: self.quiet, - color: self.color.clone(), execute: self.execute, engine: None, }; @@ -97,16 +76,4 @@ impl Clean { pub fn engine(&self) -> Option<&str> { self.engine.as_deref() } - - pub fn verbose(&self) -> bool { - self.verbose - } - - pub fn quiet(&self) -> bool { - self.quiet - } - - pub fn color(&self) -> Option<&str> { - self.color.as_deref() - } } diff --git a/src/bin/commands/containers.rs b/src/bin/commands/containers.rs index a249d9558..530a3a8d9 100644 --- a/src/bin/commands/containers.rs +++ b/src/bin/commands/containers.rs @@ -1,42 +1,26 @@ use std::io; use clap::{Args, Subcommand}; +use cross::docker::ImagePlatform; +use cross::rustc::{QualifiedToolchain, Toolchain}; use cross::shell::{MessageInfo, Stream}; -use cross::{docker, CommandExt}; +use cross::{docker, CommandExt, TargetTriple}; #[derive(Args, Debug)] pub struct ListVolumes { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Container engine (such as docker or podman). #[clap(long)] pub engine: Option, } impl ListVolumes { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { list_volumes(&engine, msg_info) } } #[derive(Args, Debug)] pub struct RemoveAllVolumes { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Force removal of volumes. #[clap(short, long)] pub force: bool, @@ -49,22 +33,13 @@ pub struct RemoveAllVolumes { } impl RemoveAllVolumes { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { remove_all_volumes(self, &engine, msg_info) } } #[derive(Args, Debug)] pub struct PruneVolumes { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Remove volumes. Default is a dry run. #[clap(short, long)] pub execute: bool, @@ -74,7 +49,7 @@ pub struct PruneVolumes { } impl PruneVolumes { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { prune_volumes(self, &engine, msg_info) } } @@ -87,25 +62,19 @@ pub struct CreateVolume { /// If we should copy the cargo registry to the volume. #[clap(short, long)] pub copy_registry: bool, - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Container engine (such as docker or podman). #[clap(long)] pub engine: Option, + /// Toolchain to create a volume for + #[clap(long, default_value = TargetTriple::DEFAULT.triple(), )] + pub toolchain: String, } impl CreateVolume { pub fn run( - self, + &self, engine: docker::Engine, - channel: Option<&str>, + channel: Option<&Toolchain>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { create_persistent_volume(self, &engine, channel, msg_info) @@ -120,28 +89,22 @@ pub struct RemoveVolume { /// If cross is running inside a container. #[clap(short, long)] pub docker_in_docker: bool, - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Container engine (such as docker or podman). #[clap(long)] pub engine: Option, + /// Toolchain to remove the volume for + #[clap(long, default_value = TargetTriple::DEFAULT.triple(), )] + pub toolchain: String, } impl RemoveVolume { pub fn run( - self, + &self, engine: docker::Engine, - channel: Option<&str>, + channel: Option<&Toolchain>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - remove_persistent_volume(&engine, channel, msg_info) + remove_persistent_volume(self, &engine, channel, msg_info) } } @@ -153,42 +116,35 @@ pub enum Volumes { RemoveAll(RemoveAllVolumes), /// Prune volumes not used by any container. Prune(PruneVolumes), - /// Create a persistent data volume for the current toolchain. + /// Create a persistent data volume for a given toolchain. Create(CreateVolume), - /// Remove a persistent data volume for the current toolchain. + /// Remove a persistent data volume for a given toolchain. Remove(RemoveVolume), } - -macro_rules! volumes_get_field { - ($self:ident, $field:ident $(.$cb:ident)?) => {{ - match $self { - Volumes::List(l) => l.$field$(.$cb())?, - Volumes::RemoveAll(l) => l.$field$(.$cb())?, - Volumes::Prune(l) => l.$field$(.$cb())?, - Volumes::Create(l) => l.$field$(.$cb())?, - Volumes::Remove(l) => l.$field$(.$cb())?, - } - }}; -} - impl Volumes { pub fn run( - self, + &self, engine: docker::Engine, - toolchain: Option<&str>, + channel: Option<&Toolchain>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { match self { Volumes::List(args) => args.run(engine, msg_info), Volumes::RemoveAll(args) => args.run(engine, msg_info), Volumes::Prune(args) => args.run(engine, msg_info), - Volumes::Create(args) => args.run(engine, toolchain, msg_info), - Volumes::Remove(args) => args.run(engine, toolchain, msg_info), + Volumes::Create(args) => args.run(engine, channel, msg_info), + Volumes::Remove(args) => args.run(engine, channel, msg_info), } } pub fn engine(&self) -> Option<&str> { - volumes_get_field!(self, engine.as_deref) + match self { + Volumes::List(l) => l.engine.as_deref(), + Volumes::RemoveAll(l) => l.engine.as_deref(), + Volumes::Prune(l) => l.engine.as_deref(), + Volumes::Create(l) => l.engine.as_deref(), + Volumes::Remove(l) => l.engine.as_deref(), + } } // FIXME: remove this in v0.3.0. @@ -201,53 +157,23 @@ impl Volumes { Volumes::Remove(l) => l.docker_in_docker, } } - - pub fn verbose(&self) -> bool { - volumes_get_field!(self, verbose) - } - - pub fn quiet(&self) -> bool { - volumes_get_field!(self, quiet) - } - - pub fn color(&self) -> Option<&str> { - volumes_get_field!(self, color.as_deref) - } } #[derive(Args, Debug)] pub struct ListContainers { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Container engine (such as docker or podman). #[clap(long)] pub engine: Option, } impl ListContainers { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { list_containers(&engine, msg_info) } } #[derive(Args, Debug)] pub struct RemoveAllContainers { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Force removal of containers. #[clap(short, long)] pub force: bool, @@ -260,7 +186,7 @@ pub struct RemoveAllContainers { } impl RemoveAllContainers { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { remove_all_containers(self, &engine, msg_info) } } @@ -273,17 +199,8 @@ pub enum Containers { RemoveAll(RemoveAllContainers), } -macro_rules! containers_get_field { - ($self:ident, $field:ident $(.$cb:ident)?) => {{ - match $self { - Containers::List(l) => l.$field$(.$cb())?, - Containers::RemoveAll(l) => l.$field$(.$cb())?, - } - }}; -} - impl Containers { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { match self { Containers::List(args) => args.run(engine, msg_info), Containers::RemoveAll(args) => args.run(engine, msg_info), @@ -291,19 +208,10 @@ impl Containers { } pub fn engine(&self) -> Option<&str> { - containers_get_field!(self, engine.as_deref) - } - - pub fn verbose(&self) -> bool { - containers_get_field!(self, verbose) - } - - pub fn quiet(&self) -> bool { - containers_get_field!(self, quiet) - } - - pub fn color(&self) -> Option<&str> { - containers_get_field!(self, color.as_deref) + match self { + Containers::List(l) => l.engine.as_deref(), + Containers::RemoveAll(l) => l.engine.as_deref(), + } } } @@ -311,11 +219,13 @@ fn get_cross_volumes( engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result> { - let stdout = docker::subcommand(engine, "volume") + use cross::docker::VOLUME_PREFIX; + let stdout = engine + .subcommand("volume") .arg("list") - .args(&["--format", "{{.Name}}"]) + .args(["--format", "{{.Name}}"]) // handles simple regex: ^ for start of line. - .args(&["--filter", "name=^cross-"]) + .args(["--filter", &format!("name=^{VOLUME_PREFIX}")]) .run_and_get_stdout(msg_info)?; let mut volumes: Vec = stdout.lines().map(|s| s.to_string()).collect(); @@ -333,22 +243,22 @@ pub fn list_volumes(engine: &docker::Engine, msg_info: &mut MessageInfo) -> cros } pub fn remove_all_volumes( - RemoveAllVolumes { force, execute, .. }: RemoveAllVolumes, + RemoveAllVolumes { force, execute, .. }: &RemoveAllVolumes, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { let volumes = get_cross_volumes(engine, msg_info)?; - let mut command = docker::subcommand(engine, "volume"); + let mut command = engine.subcommand("volume"); command.arg("rm"); - if force { + if *force { command.arg("--force"); } command.args(&volumes); if volumes.is_empty() { Ok(()) - } else if execute { - command.run(msg_info, false).map_err(Into::into) + } else if *execute { + command.run(msg_info, false) } else { msg_info.note("this is a dry run. to remove the volumes, pass the `--execute` flag.")?; command.print(msg_info)?; @@ -357,14 +267,14 @@ pub fn remove_all_volumes( } pub fn prune_volumes( - PruneVolumes { execute, .. }: PruneVolumes, + PruneVolumes { execute, .. }: &PruneVolumes, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - let mut command = docker::subcommand(engine, "volume"); - command.args(&["prune", "--force"]); - if execute { - command.run(msg_info, false).map_err(Into::into) + let mut command = engine.subcommand("volume"); + command.args(["prune", "--force"]); + if *execute { + command.run(msg_info, false) } else { msg_info.note("this is a dry run. to prune the volumes, pass the `--execute` flag.")?; command.print(msg_info)?; @@ -373,42 +283,49 @@ pub fn prune_volumes( } pub fn create_persistent_volume( - CreateVolume { copy_registry, .. }: CreateVolume, + CreateVolume { + copy_registry, + toolchain, + .. + }: &CreateVolume, engine: &docker::Engine, - channel: Option<&str>, + channel: Option<&Toolchain>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - // we only need a triple that needs docker: the actual target doesn't matter. - let triple = cross::Host::X86_64UnknownLinuxGnu.triple(); - let (target, metadata, dirs) = docker::get_package_info(engine, triple, channel, msg_info)?; - let container = docker::remote::unique_container_identifier(&target, &metadata, &dirs)?; - let volume = docker::remote::unique_toolchain_identifier(&dirs.sysroot)?; - - if docker::remote::volume_exists(engine, &volume, msg_info)? { - eyre::bail!("Error: volume {volume} already exists."); + let mut toolchain = toolchain_or_target(toolchain, msg_info)?; + if let Some(channel) = channel { + toolchain.channel = channel.channel.clone(); + }; + let mount_finder = docker::MountFinder::create(engine, msg_info)?; + let dirs = docker::ToolchainDirectories::assemble(&mount_finder, toolchain.clone())?; + let container_id = dirs.unique_container_identifier(&toolchain.host().target)?; + let volume_id = dirs.unique_toolchain_identifier()?; + let volume = docker::DockerVolume::new(engine, &volume_id); + + if volume.exists(msg_info)? { + eyre::bail!("Error: volume {volume_id} already exists."); } - docker::subcommand(engine, "volume") - .args(&["create", &volume]) - .run_and_get_status(msg_info, false)?; + volume.create(msg_info)?; // stop the container if it's already running - let state = docker::remote::container_state(engine, &container, msg_info)?; + let container = docker::DockerContainer::new(engine, &container_id); + let state = container.state(msg_info)?; if !state.is_stopped() { - msg_info.warn("container {container} was running.")?; - docker::remote::container_stop_default(engine, &container, msg_info)?; + msg_info.warn(format_args!("container {container_id} was running."))?; + container.stop_default(msg_info)?; } if state.exists() { - msg_info.warn("container {container} was exited.")?; - docker::remote::container_rm(engine, &container, msg_info)?; + msg_info.warn(format_args!("container {container_id} was exited."))?; + container.remove(msg_info)?; } // create a dummy running container to copy data over - let mount_prefix = docker::remote::MOUNT_PREFIX; - let mut docker = docker::subcommand(engine, "run"); - docker.args(&["--name", &container]); + let mount_prefix = docker::MOUNT_PREFIX; + let mut docker = engine.subcommand("run"); + docker.args(["--name", &container_id]); docker.arg("--rm"); - docker.args(&["-v", &format!("{}:{}", volume, mount_prefix)]); + docker.args(["-v", &format!("{}:{}", volume_id, mount_prefix)]); docker.arg("-d"); let is_tty = io::Stdin::is_atty() && io::Stdout::is_atty() && io::Stderr::is_atty(); if is_tty { @@ -421,58 +338,42 @@ pub fn create_persistent_volume( // a TTY. this has a few issues though: now, the // container no longer responds to signals, so the // container will need to be sig-killed. - docker.args(&["sh", "-c", "sleep infinity"]); + docker.args(["sh", "-c", "sleep infinity"]); } // store first, since failing to non-existing container is fine - docker::remote::create_container_deleter(engine.clone(), container.clone()); - docker.run_and_get_status(msg_info, false)?; - - docker::remote::copy_volume_container_xargo( - engine, - &container, - &dirs.xargo, - &target, - mount_prefix.as_ref(), - msg_info, - )?; - docker::remote::copy_volume_container_cargo( - engine, - &container, - &dirs.cargo, - mount_prefix.as_ref(), - copy_registry, - msg_info, - )?; - docker::remote::copy_volume_container_rust( - engine, - &container, - &dirs.sysroot, - &target, - mount_prefix.as_ref(), - true, - msg_info, - )?; - - docker::remote::drop_container(is_tty, msg_info); + docker::ChildContainer::create(engine.clone(), container_id.clone())?; + docker.run_and_get_status(msg_info, true)?; + + let data_volume = docker::ContainerDataVolume::new(engine, &container_id, &dirs); + data_volume.copy_xargo(mount_prefix, msg_info)?; + data_volume.copy_cargo(mount_prefix, *copy_registry, msg_info)?; + data_volume.copy_rust(None, mount_prefix, msg_info)?; + + docker::ChildContainer::finish_static(is_tty, msg_info); Ok(()) } pub fn remove_persistent_volume( + RemoveVolume { toolchain, .. }: &RemoveVolume, engine: &docker::Engine, - channel: Option<&str>, + channel: Option<&Toolchain>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - // we only need a triple that needs docker: the actual target doesn't matter. - let triple = cross::Host::X86_64UnknownLinuxGnu.triple(); - let (_, _, dirs) = docker::get_package_info(engine, triple, channel, msg_info)?; - let volume = docker::remote::unique_toolchain_identifier(&dirs.sysroot)?; + let mut toolchain = toolchain_or_target(toolchain, msg_info)?; + if let Some(channel) = channel { + toolchain.channel = channel.channel.clone(); + }; + let mount_finder = docker::MountFinder::create(engine, msg_info)?; + let dirs = docker::ToolchainDirectories::assemble(&mount_finder, toolchain)?; + let volume_id = dirs.unique_toolchain_identifier()?; + let volume = docker::DockerVolume::new(engine, &volume_id); - if !docker::remote::volume_exists(engine, &volume, msg_info)? { - eyre::bail!("Error: volume {volume} does not exist."); + if !volume.exists(msg_info)? { + eyre::bail!("Error: volume {volume_id} does not exist."); } - docker::remote::volume_rm(engine, &volume, msg_info)?; + volume.remove(msg_info)?; Ok(()) } @@ -481,11 +382,13 @@ fn get_cross_containers( engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result> { - let stdout = docker::subcommand(engine, "ps") + use cross::docker::VOLUME_PREFIX; + let stdout = engine + .subcommand("ps") .arg("-a") - .args(&["--format", "{{.Names}}: {{.State}}"]) + .args(["--format", "{{.Names}}: {{.State}}"]) // handles simple regex: ^ for start of line. - .args(&["--filter", "name=^cross-"]) + .args(["--filter", &format!("name=^{VOLUME_PREFIX}")]) .run_and_get_stdout(msg_info)?; let mut containers: Vec = stdout.lines().map(|s| s.to_string()).collect(); @@ -503,7 +406,7 @@ pub fn list_containers(engine: &docker::Engine, msg_info: &mut MessageInfo) -> c } pub fn remove_all_containers( - RemoveAllContainers { force, execute, .. }: RemoveAllContainers, + RemoveAllContainers { force, execute, .. }: &RemoveAllContainers, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { @@ -514,7 +417,7 @@ pub fn remove_all_containers( // cannot fail, formatted as {{.Names}}: {{.State}} let (name, state) = container.split_once(':').unwrap(); let name = name.trim(); - let state = docker::remote::ContainerState::new(state.trim())?; + let state = docker::ContainerState::new(state.trim())?; if state.is_stopped() { stopped.push(name); } else { @@ -524,21 +427,21 @@ pub fn remove_all_containers( let mut commands = vec![]; if !running.is_empty() { - let mut stop = docker::subcommand(engine, "stop"); + let mut stop = engine.subcommand("stop"); stop.args(&running); commands.push(stop); } if !(stopped.is_empty() && running.is_empty()) { - let mut rm = docker::subcommand(engine, "rm"); - if force { + let mut rm = engine.subcommand("rm"); + if *force { rm.arg("--force"); } rm.args(&running); rm.args(&stopped); commands.push(rm); } - if execute { + if *execute { for mut command in commands { command.run(msg_info, false)?; } @@ -551,3 +454,20 @@ pub fn remove_all_containers( Ok(()) } + +fn toolchain_or_target( + s: &str, + msg_info: &mut MessageInfo, +) -> Result { + let config = cross::config::Config::new(None); + let mut toolchain = QualifiedToolchain::default(&config, msg_info)?; + let target_list = cross::rustc::target_list(msg_info)?; + if target_list.contains(s) { + toolchain.replace_host(&ImagePlatform::from_target(s.into())?); + } else { + let picked: Toolchain = s.parse()?; + toolchain = toolchain.with_picked(picked)?; + } + + Ok(toolchain) +} diff --git a/src/bin/commands/images.rs b/src/bin/commands/images.rs index 5b70efd25..fc62d743a 100644 --- a/src/bin/commands/images.rs +++ b/src/bin/commands/images.rs @@ -1,5 +1,6 @@ use std::collections::{BTreeMap, BTreeSet}; +use clap::builder::PossibleValue; use clap::{Args, Subcommand}; use cross::docker::{self, CROSS_CUSTOM_DOCKERFILE_IMAGE_PREFIX}; use cross::shell::MessageInfo; @@ -14,41 +15,45 @@ const IMAGE_PREFIXES: &[&str] = &[GHCR_IO, DOCKER_IO, RUST_EMBEDDED]; #[derive(Args, Debug)] pub struct ListImages { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Container engine (such as docker or podman). #[clap(long)] pub engine: Option, + /// Output format + #[clap(long, default_value = "human")] + pub format: OutputFormat, /// Only list images for specific target(s). By default, list all targets. pub targets: Vec, } impl ListImages { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { list_images(self, &engine, msg_info) } } +#[derive(Clone, Debug)] +pub enum OutputFormat { + Human, + Json, +} + +impl clap::ValueEnum for OutputFormat { + fn value_variants<'a>() -> &'a [Self] { + &[Self::Human, Self::Json] + } + + fn to_possible_value(&self) -> Option { + match self { + OutputFormat::Human => Some(PossibleValue::new("human")), + OutputFormat::Json => Some(PossibleValue::new("json")), + } + } +} + #[derive(Args, Debug)] pub struct RemoveImages { /// If not provided, remove all images. pub targets: Vec, - /// Remove images matching provided targets. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Force removal of images. #[clap(short, long)] pub force: bool, @@ -64,7 +69,7 @@ pub struct RemoveImages { } impl RemoveImages { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { if self.targets.is_empty() { remove_all_images(self, &engine, msg_info) } else { @@ -82,7 +87,7 @@ pub enum Images { } impl Images { - pub fn run(self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { + pub fn run(&self, engine: docker::Engine, msg_info: &mut MessageInfo) -> cross::Result<()> { match self { Images::List(args) => args.run(engine, msg_info), Images::Remove(args) => args.run(engine, msg_info), @@ -95,30 +100,9 @@ impl Images { Images::Remove(l) => l.engine.as_deref(), } } - - pub fn verbose(&self) -> bool { - match self { - Images::List(l) => l.verbose, - Images::Remove(l) => l.verbose, - } - } - - pub fn quiet(&self) -> bool { - match self { - Images::List(l) => l.quiet, - Images::Remove(l) => l.quiet, - } - } - - pub fn color(&self) -> Option<&str> { - match self { - Images::List(l) => l.color.as_deref(), - Images::Remove(l) => l.color.as_deref(), - } - } } -#[derive(Debug, PartialOrd, Ord, PartialEq, Eq)] +#[derive(Debug, PartialOrd, Ord, PartialEq, Eq, serde::Serialize)] struct Image { repository: String, tag: String, @@ -166,9 +150,10 @@ fn get_cross_images( msg_info: &mut MessageInfo, local: bool, ) -> cross::Result> { - let mut images: BTreeSet<_> = cross::docker::subcommand(engine, "images") - .args(&["--format", "{{.Repository}}:{{.Tag}} {{.ID}}"]) - .args(&[ + let mut images: BTreeSet<_> = engine + .subcommand("images") + .args(["--format", "{{.Repository}}:{{.Tag}} {{.ID}}"]) + .args([ "--filter", &format!("label={}.for-cross-target", cross::CROSS_LABEL_DOMAIN), ]) @@ -177,8 +162,9 @@ fn get_cross_images( .map(parse_image) .collect(); - let stdout = cross::docker::subcommand(engine, "images") - .args(&["--format", "{{.Repository}}:{{.Tag}} {{.ID}}"]) + let stdout = engine + .subcommand("images") + .args(["--format", "{{.Repository}}:{{.Tag}} {{.ID}}"]) .run_and_get_stdout(msg_info)?; let ids: Vec<_> = images.iter().map(|i| i.id.to_string()).collect(); images.extend( @@ -238,8 +224,8 @@ fn get_image_target( return Ok(target); } } - let mut command = cross::docker::subcommand(engine, "inspect"); - command.args(&[ + let mut command = engine.subcommand("inspect"); + command.args([ "--format", &format!( r#"{{{{index .Config.Labels "{}.for-cross-target"}}}}"#, @@ -256,7 +242,9 @@ fn get_image_target( } pub fn list_images( - ListImages { targets, .. }: ListImages, + ListImages { + targets, format, .. + }: &ListImages, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { @@ -276,48 +264,58 @@ pub fn list_images( map.get_mut(&target).expect("map must have key").push(image); } } - let mut keys: Vec<&str> = map.iter().map(|(k, _)| k.as_ref()).collect(); + let mut keys: Vec<&str> = map.keys().map(|k| k.as_ref()).collect(); keys.sort_unstable(); - let print_string = - |col1: &str, col2: &str, fill: char, info: &mut MessageInfo| -> cross::Result<()> { - let mut row = String::new(); - row.push('|'); - row.push(fill); - row.push_str(col1); - let spaces = max_target_len.max(col1.len()) + 1 - col1.len(); - for _ in 0..spaces { - row.push(fill); - } - row.push('|'); - row.push(fill); - row.push_str(col2); - let spaces = max_image_len.max(col2.len()) + 1 - col2.len(); - for _ in 0..spaces { - row.push(fill); + match format { + OutputFormat::Json => { + msg_info.info(format_args!("{}", serde_json::to_string(&map)?))?; + } + OutputFormat::Human => { + let print_string = + |col1: &str, col2: &str, fill: char, info: &mut MessageInfo| -> cross::Result<()> { + let mut row = String::new(); + row.push('|'); + row.push(fill); + row.push_str(col1); + let spaces = max_target_len.max(col1.len()) + 1 - col1.len(); + for _ in 0..spaces { + row.push(fill); + } + row.push('|'); + row.push(fill); + row.push_str(col2); + let spaces = max_image_len.max(col2.len()) + 1 - col2.len(); + for _ in 0..spaces { + row.push(fill); + } + row.push('|'); + info.print(row) + }; + + if targets.len() != 1 { + print_string("Targets", "Images", ' ', msg_info)?; + print_string("-------", "------", '-', msg_info)?; } - row.push('|'); - info.print(row) - }; - - if targets.len() != 1 { - print_string("Targets", "Images", ' ', msg_info)?; - print_string("-------", "------", '-', msg_info)?; - } - let print_single = - |_: &str, image: &Image, info: &mut MessageInfo| -> cross::Result<()> { info.print(image) }; - let print_table = |target: &str, image: &Image, info: &mut MessageInfo| -> cross::Result<()> { - let name = image.name(); - print_string(target, &name, ' ', info) - }; - - for target in keys { - for image in map.get(target).expect("map must have key").iter() { - if targets.len() == 1 { - print_single(target, image, msg_info)?; - } else { - print_table(target, image, msg_info)?; + let print_single = |_: &str, + image: &Image, + info: &mut MessageInfo| + -> cross::Result<()> { info.print(image) }; + let print_table = + |target: &str, image: &Image, info: &mut MessageInfo| -> cross::Result<()> { + let name = image.name(); + print_string(target, &name, ' ', info) + }; + + for target in keys { + for image in map.get(target).expect("map must have key").iter() { + if targets.len() == 1 { + print_single(target, image, msg_info)?; + } else { + print_table(target, image, msg_info)?; + } + } } } } @@ -332,7 +330,7 @@ fn remove_images( force: bool, execute: bool, ) -> cross::Result<()> { - let mut command = docker::subcommand(engine, "rmi"); + let mut command = engine.subcommand("rmi"); if force { command.arg("--force"); } @@ -340,7 +338,7 @@ fn remove_images( if images.is_empty() { Ok(()) } else if execute { - command.run(msg_info, false).map_err(Into::into) + command.run(msg_info, false) } else { msg_info.note("this is a dry run. to remove the images, pass the `--execute` flag.")?; command.print(msg_info)?; @@ -354,12 +352,12 @@ pub fn remove_all_images( local, execute, .. - }: RemoveImages, + }: &RemoveImages, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - let images = get_cross_images(engine, msg_info, local)?; - remove_images(engine, &images, msg_info, force, execute) + let images = get_cross_images(engine, msg_info, *local)?; + remove_images(engine, &images, msg_info, *force, *execute) } pub fn remove_target_images( @@ -369,20 +367,20 @@ pub fn remove_target_images( local, execute, .. - }: RemoveImages, + }: &RemoveImages, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - let cross_images = get_cross_images(engine, msg_info, local)?; + let cross_images = get_cross_images(engine, msg_info, *local)?; let target_list = msg_info.as_quiet(cross::rustc::target_list)?; let mut images = vec![]; for image in cross_images { - let target = dbg!(get_image_target(engine, &image, &target_list, msg_info)?); + let target = get_image_target(engine, &image, &target_list, msg_info)?; if targets.contains(&target) { images.push(image); } } - remove_images(engine, &images, msg_info, force, execute) + remove_images(engine, &images, msg_info, *force, *execute) } #[cfg(test)] diff --git a/src/bin/commands/mod.rs b/src/bin/commands/mod.rs index 30a1c771a..2f63ea608 100644 --- a/src/bin/commands/mod.rs +++ b/src/bin/commands/mod.rs @@ -1,7 +1,9 @@ mod clean; mod containers; mod images; +mod run; pub use self::clean::*; pub use self::containers::*; pub use self::images::*; +pub use self::run::*; diff --git a/src/bin/commands/run.rs b/src/bin/commands/run.rs new file mode 100644 index 000000000..836a90bd0 --- /dev/null +++ b/src/bin/commands/run.rs @@ -0,0 +1,105 @@ +use clap::Args as ClapArgs; +use cross::config::Config; +use cross::shell::{MessageInfo, Verbosity}; +use cross::{ + cargo_metadata_with_args, cli::Args, docker, rustc, setup, toml, CommandVariant, CrossSetup, + Target, +}; +use eyre::Context; + +#[derive(ClapArgs, Debug)] +pub struct Run { + /// Container engine (such as docker or podman). + #[clap(long)] + pub engine: Option, + /// Target + #[clap(short, long)] + pub target: String, + /// Interactive session + #[clap(short, long, default_value = "false")] + pub interactive: bool, + /// Command to run, will be run in a shell + #[clap(last = true)] + pub command: String, +} + +impl Run { + pub fn run( + &self, + cli: &crate::Cli, + engine: docker::Engine, + msg_info: &mut MessageInfo, + ) -> cross::Result<()> { + let target_list = rustc::target_list(&mut Verbosity::Quiet.into())?; + let target = Target::from(&self.target, &target_list); + + let cwd = std::env::current_dir()?; + let host_version_meta = rustc::version_meta()?; + + let args = Args { + cargo_args: vec![], + rest_args: vec![], + subcommand: None, + channel: None, + target: Some(target.clone()), + features: vec![], + target_dir: None, + manifest_path: None, + version: false, + verbose: if cli.verbose { 1 } else { 0 }, + quiet: cli.quiet, + color: cli.color.clone(), + }; + + if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), msg_info)? { + let CrossSetup { toolchain, .. } = + match setup(&host_version_meta, &metadata, &args, target_list, msg_info)? { + Some(setup) => setup, + _ => { + eyre::bail!("Error: cannot setup cross environment"); + } + }; + + let toml = toml(&metadata, msg_info)?; + let config = Config::new(Some(toml)); + + let image = match docker::get_image(&config, &target, false) { + Ok(i) => i, + Err(docker::GetImageError::NoCompatibleImages(..)) + if config.dockerfile(&target).is_some() => + { + "scratch".into() + } + Err(err) => { + msg_info.warn(&err)?; + eyre::bail!("Error: {}", &err); + } + }; + + let image = image.to_definite_with(&engine, msg_info)?; + + let paths = docker::DockerPaths::create(&engine, metadata, cwd, toolchain, msg_info)?; + let options = docker::DockerOptions::new( + engine, + target, + config, + image, + CommandVariant::Shell, + None, + self.interactive, + ); + + let mut args = vec![String::from("-c")]; + args.push(self.command.clone()); + + docker::run(options, paths, &args, None, msg_info) + .wrap_err("could not run container")?; + } + + Ok(()) + } + + pub fn engine(&self) -> Option<&str> { + self.engine.as_deref() + } +} diff --git a/src/bin/cross-util.rs b/src/bin/cross-util.rs index 8468c4c31..a90c6f92a 100644 --- a/src/bin/cross-util.rs +++ b/src/bin/cross-util.rs @@ -1,19 +1,31 @@ #![deny(missing_debug_implementations, rust_2018_idioms)] use clap::{CommandFactory, Parser, Subcommand}; -use cross::docker; use cross::shell::MessageInfo; +use cross::{docker, rustc::Toolchain}; mod commands; +const APP_NAME: &str = "cross-util"; +static VERSION: &str = concat!(env!("CARGO_PKG_VERSION"), cross::commit_info!()); + #[derive(Parser, Debug)] -#[clap(version, about, long_about = None)] -struct Cli { +#[clap(about, long_about = None, name = APP_NAME, version = VERSION)] +pub struct Cli { /// Toolchain name/version to use (such as stable or 1.59.0). #[clap(value_parser = is_toolchain)] - toolchain: Option, + toolchain: Option, #[clap(subcommand)] command: Commands, + /// Provide verbose diagnostic output. + #[clap(short, long, global = true)] + pub verbose: bool, + /// Do not print cross log messages. + #[clap(short, long, global = true)] + pub quiet: bool, + /// Coloring: auto, always, never + #[clap(long, global = true)] + pub color: Option, } // hidden implied parser so we can get matches without recursion. @@ -34,13 +46,15 @@ enum Commands { /// Work with cross containers in local storage. #[clap(subcommand)] Containers(commands::Containers), + /// Run in cross container. + Run(commands::Run), /// Clean all cross data in local storage. Clean(commands::Clean), } -fn is_toolchain(toolchain: &str) -> cross::Result { +fn is_toolchain(toolchain: &str) -> cross::Result { if toolchain.starts_with('+') { - Ok(toolchain.chars().skip(1).collect()) + Ok(toolchain.chars().skip(1).collect::().parse()?) } else { let _ = ::command().get_matches(); unreachable!(); @@ -70,36 +84,31 @@ macro_rules! get_engine { }}; } -macro_rules! get_msg_info { - ($args:ident) => {{ - MessageInfo::create($args.verbose(), $args.quiet(), $args.color()) - }}; -} - pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; let cli = Cli::parse(); - match cli.command { + let mut msg_info = MessageInfo::create(cli.verbose, cli.quiet, cli.color.as_deref())?; + match &cli.command { Commands::Images(args) => { - let mut msg_info = get_msg_info!(args)?; let engine = get_engine!(args, false, msg_info)?; args.run(engine, &mut msg_info)?; } Commands::Volumes(args) => { - let mut msg_info = get_msg_info!(args)?; let engine = get_engine!(args, args.docker_in_docker(), msg_info)?; - args.run(engine, cli.toolchain.as_deref(), &mut msg_info)?; + args.run(engine, cli.toolchain.as_ref(), &mut msg_info)?; } Commands::Containers(args) => { - let mut msg_info = get_msg_info!(args)?; let engine = get_engine!(args, false, msg_info)?; args.run(engine, &mut msg_info)?; } Commands::Clean(args) => { - let mut msg_info = get_msg_info!(args)?; let engine = get_engine!(args, false, msg_info)?; args.run(engine, &mut msg_info)?; } + Commands::Run(args) => { + let engine = get_engine!(args, false, msg_info)?; + args.run(&cli, engine, &mut msg_info)?; + } } Ok(()) diff --git a/src/bin/cross.rs b/src/bin/cross.rs index f2f8294ea..ba66146fb 100644 --- a/src/bin/cross.rs +++ b/src/bin/cross.rs @@ -1,10 +1,53 @@ #![deny(missing_debug_implementations, rust_2018_idioms)] +use std::{ + env, + io::{self, Write}, +}; + +use cross::{ + cargo, cli, rustc, + shell::{self, Verbosity}, + OutputExt, Subcommand, +}; + pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; cross::install_termination_hook()?; - let status = cross::run()?; + let target_list = rustc::target_list(&mut Verbosity::Quiet.into())?; + let args = cli::parse(&target_list)?; + let subcommand = args.subcommand.clone(); + let mut msg_info = shell::MessageInfo::create(args.verbose, args.quiet, args.color.as_deref())?; + let status = match cross::run(args, target_list, &mut msg_info)? { + Some(status) => status, + None if !msg_info.should_fail() => { + // if we fallback to the host cargo, use the same invocation that was made to cross + let argv: Vec = env::args().skip(1).collect(); + msg_info.note("Falling back to `cargo` on the host.")?; + match subcommand { + Some(Subcommand::List) => { + // this won't print in order if we have both stdout and stderr. + let out = cargo::run_and_get_output(&argv, &mut msg_info)?; + let stdout = out.stdout()?; + if out.status.success() && cli::is_subcommand_list(&stdout) { + cli::fmt_subcommands(&stdout, &mut msg_info)?; + } else { + // Not a list subcommand, which can happen with weird edge-cases. + print!("{}", stdout); + io::stdout().flush().expect("could not flush"); + } + out.status + } + _ => cargo::run(&argv, &mut msg_info)?, + } + } + None => { + msg_info.error("Errors encountered before cross compilation, aborting.")?; + msg_info.note("Disable this with `CROSS_NO_WARNINGS=0`")?; + std::process::exit(1); + } + }; let code = status .code() .ok_or_else(|| eyre::Report::msg("Cargo process terminated by signal"))?; diff --git a/src/build.rs b/src/build.rs index 84677cb38..11bbcc80c 100644 --- a/src/build.rs +++ b/src/build.rs @@ -1,7 +1,7 @@ use std::env; use std::error::Error; use std::fs::File; -use std::io::{self, Write}; +use std::io::Write; use std::path::PathBuf; use std::process::Command; @@ -24,10 +24,10 @@ fn main() { .write_all(commit_info().as_bytes()) .unwrap(); - File::create(out_dir.join("docker-images.rs")) - .unwrap() - .write_all(docker_images().as_bytes()) - .unwrap(); + if env::var("CROSS_SANDBOXED").is_ok() { + println!("cargo:rustc-cfg=cross_sandboxed"); + } + println!("cargo:rerun-if-env-changed=CROSS_SANDBOXED"); } fn commit_info() -> String { @@ -39,7 +39,7 @@ fn commit_info() -> String { fn commit_hash() -> Result { let output = Command::new("git") - .args(&["rev-parse", "--short", "HEAD"]) + .args(["rev-parse", "--short", "HEAD"]) .output()?; if output.status.success() { @@ -51,7 +51,7 @@ fn commit_hash() -> Result { fn commit_date() -> Result { let output = Command::new("git") - .args(&["log", "-1", "--date=short", "--pretty=format:%cd"]) + .args(["log", "-1", "--date=short", "--pretty=format:%cd"]) .output()?; if output.status.success() { @@ -60,26 +60,3 @@ fn commit_date() -> Result { Err(Some {}) } } - -fn docker_images() -> String { - let mut images = String::from("["); - let mut dir = PathBuf::from(env::var_os("CARGO_MANIFEST_DIR").unwrap()); - dir.push("docker"); - - let dir = dir.read_dir().unwrap(); - let mut paths = dir.collect::>>().unwrap(); - paths.sort_by_key(|e| e.path()); - - for entry in paths { - let path = entry.path(); - let file_name = path.file_name().unwrap().to_str().unwrap(); - if file_name.starts_with("Dockerfile.") { - images.push('"'); - images.push_str(&file_name.replacen("Dockerfile.", "", 1)); - images.push_str("\", "); - } - } - - images.push(']'); - images -} diff --git a/src/cargo.rs b/src/cargo.rs index f80d29fa3..010c655ef 100644 --- a/src/cargo.rs +++ b/src/cargo.rs @@ -7,13 +7,13 @@ use crate::errors::*; use crate::extensions::CommandExt; use crate::shell::{self, MessageInfo}; -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, PartialEq, Eq)] pub enum Subcommand { Build, Check, Doc, - Other, Run, + Rustdoc, Rustc, Test, Bench, @@ -21,13 +21,14 @@ pub enum Subcommand { Metadata, List, Clean, + Other(String), } impl Subcommand { #[must_use] pub fn needs_docker(self, is_remote: bool) -> bool { match self { - Subcommand::Other | Subcommand::List => false, + Subcommand::Other(_) | Subcommand::List => false, Subcommand::Clean if !is_remote => false, _ => true, } @@ -58,12 +59,13 @@ impl<'a> From<&'a str> for Subcommand { "doc" => Subcommand::Doc, "r" | "run" => Subcommand::Run, "rustc" => Subcommand::Rustc, + "rustdoc" => Subcommand::Rustdoc, "t" | "test" => Subcommand::Test, "bench" => Subcommand::Bench, "clippy" => Subcommand::Clippy, "metadata" => Subcommand::Metadata, "--list" => Subcommand::List, - _ => Subcommand::Other, + command => Subcommand::Other(command.to_owned()), } } } @@ -74,6 +76,7 @@ pub struct CargoMetadata { pub target_directory: PathBuf, pub packages: Vec, pub workspace_members: Vec, + pub metadata: Option>, } impl CargoMetadata { @@ -103,6 +106,7 @@ pub struct Package { pub source: Option, pub version: String, pub license: Option, + pub metadata: Option>, } impl Package { @@ -132,7 +136,7 @@ pub fn cargo_metadata_with_args( if let Some(channel) = args.and_then(|x| x.channel.as_deref()) { command.arg(format!("+{channel}")); } - command.arg("metadata").args(&["--format-version", "1"]); + command.arg("metadata").args(["--format-version", "1"]); if let Some(cd) = cd { command.current_dir(cd); } diff --git a/src/cli.rs b/src/cli.rs index c4dae060a..982ebde66 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -1,14 +1,17 @@ -use std::{env, path::PathBuf}; +use std::env; +use std::path::{Path, PathBuf}; use crate::cargo::Subcommand; use crate::errors::Result; +use crate::file::{absolute_path, PathExt}; use crate::rustc::TargetList; use crate::shell::{self, MessageInfo}; use crate::Target; #[derive(Debug)] pub struct Args { - pub all: Vec, + pub cargo_args: Vec, + pub rest_args: Vec, pub subcommand: Option, pub channel: Option, pub target: Option, @@ -16,20 +19,11 @@ pub struct Args { pub target_dir: Option, pub manifest_path: Option, pub version: bool, - pub verbose: bool, + pub verbose: u8, pub quiet: bool, pub color: Option, } -// Fix for issue #581. target_dir must be absolute. -fn absolute_path(path: PathBuf) -> Result { - Ok(if path.is_absolute() { - path - } else { - env::current_dir()?.join(path) - }) -} - pub fn is_subcommand_list(stdout: &str) -> bool { stdout.starts_with("Installed Commands:") } @@ -42,7 +36,7 @@ pub fn group_subcommands(stdout: &str) -> (Vec<&str>, Vec<&str>) { let first = line.split_whitespace().next(); if let Some(command) = first { match Subcommand::from(command) { - Subcommand::Other => host.push(line), + Subcommand::Other(_) => host.push(line), _ => cross.push(line), } } @@ -61,23 +55,29 @@ pub fn fmt_subcommands(stdout: &str, msg_info: &mut MessageInfo) -> Result<()> { } if !host.is_empty() { msg_info.print("Host Commands:")?; - for line in &cross { + for line in &host { msg_info.print(line)?; } } Ok(()) } -fn is_verbose(arg: &str) -> bool { +fn is_verbose(arg: &str) -> u8 { match arg { - "--verbose" => true, + "--verbose" => 1, // cargo can handle any number of "v"s a => { - a.starts_with('-') + if a.starts_with('-') && a.len() >= 2 && a.get(1..) .map(|a| a.chars().all(|x| x == 'v')) .unwrap_or_default() + { + // string must be of form `-v[v]*` here + a.len() as u8 - 1 + } else { + 0 + } } } } @@ -104,36 +104,59 @@ fn is_value_arg(arg: &str, field: &str) -> Option { fn parse_next_arg( arg: String, out: &mut Vec, - parse: impl Fn(&str) -> T, + parse: impl Fn(&str) -> Result, + store_cb: impl Fn(String) -> Result, iter: &mut impl Iterator, -) -> Option { +) -> Result> { out.push(arg); match iter.next() { Some(next) => { - let result = parse(&next); - out.push(next); - Some(result) + let result = parse(&next)?; + out.push(store_cb(next)?); + Ok(Some(result)) } - None => None, + None => Ok(None), } } -fn parse_equal_arg(arg: String, out: &mut Vec, parse: impl Fn(&str) -> T) -> T { - let result = parse(arg.split_once('=').expect("argument should contain `=`").1); - out.push(arg); +fn parse_equal_arg( + arg: String, + out: &mut Vec, + parse: impl Fn(&str) -> Result, + store_cb: impl Fn(String) -> Result, +) -> Result { + let (first, second) = arg.split_once('=').expect("argument should contain `=`"); + let result = parse(second)?; + out.push(format!("{first}={}", store_cb(second.to_owned())?)); - result + Ok(result) } -fn parse_manifest_path(path: &str) -> Option { +fn parse_manifest_path(path: &str) -> Result> { let p = PathBuf::from(path); - env::current_dir().ok().map(|cwd| cwd.join(p)) + Ok(absolute_path(p).ok()) } fn parse_target_dir(path: &str) -> Result { absolute_path(PathBuf::from(path)) } +fn identity(arg: String) -> Result { + Ok(arg) +} + +fn str_to_owned(arg: &str) -> Result { + Ok(arg.to_owned()) +} + +fn store_manifest_path(path: String) -> Result { + Path::new(&path).as_posix_relative() +} + +fn store_target_dir(_: String) -> Result { + Ok("/target".to_owned()) +} + pub fn parse(target_list: &TargetList) -> Result { let mut channel = None; let mut target = None; @@ -141,10 +164,11 @@ pub fn parse(target_list: &TargetList) -> Result { let mut manifest_path: Option = None; let mut target_dir = None; let mut sc = None; - let mut all: Vec = Vec::new(); + let mut cargo_args: Vec = Vec::new(); + let mut rest_args: Vec = Vec::new(); let mut version = false; let mut quiet = false; - let mut verbose = false; + let mut verbose = 0; let mut color = None; { @@ -153,68 +177,111 @@ pub fn parse(target_list: &TargetList) -> Result { if arg.is_empty() { continue; } - if is_verbose(arg.as_str()) { - verbose = true; - all.push(arg); + if matches!(arg.as_str(), "--") { + rest_args.push(arg); + rest_args.extend(args.by_ref()); + } else if let v @ 1.. = is_verbose(arg.as_str()) { + verbose += v; + cargo_args.push(arg); } else if matches!(arg.as_str(), "--version" | "-V") { version = true; } else if matches!(arg.as_str(), "--quiet" | "-q") { quiet = true; - all.push(arg); + cargo_args.push(arg); } else if let Some(kind) = is_value_arg(&arg, "--color") { color = match kind { ArgKind::Next => { - match parse_next_arg(arg, &mut all, ToOwned::to_owned, &mut args) { + match parse_next_arg( + arg, + &mut cargo_args, + str_to_owned, + identity, + &mut args, + )? { Some(c) => Some(c), None => shell::invalid_color(None), } } - ArgKind::Equal => Some(parse_equal_arg(arg, &mut all, ToOwned::to_owned)), + ArgKind::Equal => Some(parse_equal_arg( + arg, + &mut cargo_args, + str_to_owned, + identity, + )?), }; } else if let Some(kind) = is_value_arg(&arg, "--manifest-path") { manifest_path = match kind { - ArgKind::Next => { - parse_next_arg(arg, &mut all, parse_manifest_path, &mut args).flatten() - } - ArgKind::Equal => parse_equal_arg(arg, &mut all, parse_manifest_path), + ArgKind::Next => parse_next_arg( + arg, + &mut cargo_args, + parse_manifest_path, + store_manifest_path, + &mut args, + )? + .flatten(), + ArgKind::Equal => parse_equal_arg( + arg, + &mut cargo_args, + parse_manifest_path, + store_manifest_path, + )?, }; } else if let ("+", ch) = arg.split_at(1) { channel = Some(ch.to_owned()); } else if let Some(kind) = is_value_arg(&arg, "--target") { + let parse_target = |t: &str| Ok(Target::from(t, target_list)); target = match kind { ArgKind::Next => { - parse_next_arg(arg, &mut all, |t| Target::from(t, target_list), &mut args) + parse_next_arg(arg, &mut cargo_args, parse_target, identity, &mut args)? } - ArgKind::Equal => Some(parse_equal_arg(arg, &mut all, |t| { - Target::from(t, target_list) - })), + ArgKind::Equal => Some(parse_equal_arg( + arg, + &mut cargo_args, + parse_target, + identity, + )?), }; } else if let Some(kind) = is_value_arg(&arg, "--features") { match kind { ArgKind::Next => { - let next = parse_next_arg(arg, &mut all, ToOwned::to_owned, &mut args); + let next = parse_next_arg( + arg, + &mut cargo_args, + str_to_owned, + identity, + &mut args, + )?; if let Some(feature) = next { features.push(feature); } } ArgKind::Equal => { - features.push(parse_equal_arg(arg, &mut all, ToOwned::to_owned)); + features.push(parse_equal_arg( + arg, + &mut cargo_args, + str_to_owned, + identity, + )?); } } } else if let Some(kind) = is_value_arg(&arg, "--target-dir") { match kind { ArgKind::Next => { - all.push(arg); - if let Some(td) = args.next() { - target_dir = Some(parse_target_dir(&td)?); - all.push("/target".to_owned()); - } + target_dir = parse_next_arg( + arg, + &mut cargo_args, + parse_target_dir, + store_target_dir, + &mut args, + )?; } ArgKind::Equal => { - target_dir = Some(parse_target_dir( - arg.split_once('=').expect("argument should contain `=`").1, + target_dir = Some(parse_equal_arg( + arg, + &mut cargo_args, + parse_target_dir, + store_target_dir, )?); - all.push("--target-dir=/target".into()); } } } else { @@ -222,13 +289,14 @@ pub fn parse(target_list: &TargetList) -> Result { sc = Some(Subcommand::from(arg.as_ref())); } - all.push(arg.clone()); + cargo_args.push(arg.clone()); } } } Ok(Args { - all, + cargo_args, + rest_args, subcommand: sc, channel, target, @@ -248,13 +316,13 @@ mod tests { #[test] fn is_verbose_test() { - assert!(!is_verbose("b")); - assert!(!is_verbose("x")); - assert!(!is_verbose("-")); - assert!(!is_verbose("-V")); - assert!(is_verbose("-v")); - assert!(is_verbose("--verbose")); - assert!(is_verbose("-vvvv")); - assert!(!is_verbose("-version")); + assert!(is_verbose("b") == 0); + assert!(is_verbose("x") == 0); + assert!(is_verbose("-") == 0); + assert!(is_verbose("-V") == 0); + assert!(is_verbose("-v") == 1); + assert!(is_verbose("--verbose") == 1); + assert!(is_verbose("-vvvv") == 4); + assert!(is_verbose("-version") == 0); } } diff --git a/src/config.rs b/src/config.rs index a5d583016..0979ef7dc 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,16 +1,50 @@ +use crate::cross_toml::BuildStd; use crate::docker::custom::PreBuild; +use crate::docker::{ImagePlatform, PossibleImage}; use crate::shell::MessageInfo; use crate::{CrossToml, Result, Target, TargetList}; +use std::borrow::Cow; use std::collections::HashMap; use std::env; use std::str::FromStr; #[derive(Debug)] -struct Environment(&'static str, Option>); +pub struct ConfVal { + pub build: Option, + pub target: Option, +} + +impl ConfVal { + pub fn new(build: Option, target: Option) -> Self { + Self { build, target } + } + + pub fn map U>(self, f: F) -> ConfVal { + ConfVal { + build: self.build.map(&f), + target: self.target.map(&f), + } + } +} + +impl Default for ConfVal { + fn default() -> Self { + Self::new(None, None) + } +} + +impl PartialEq<(Option, Option)> for ConfVal { + fn eq(&self, other: &(Option, Option)) -> bool { + self.build == other.0 && self.target == other.1 + } +} + +#[derive(Debug)] +pub(crate) struct Environment(&'static str, Option>); impl Environment { - fn new(map: Option>) -> Self { + pub(crate) fn new(map: Option>) -> Self { Environment("CROSS", map) } @@ -30,12 +64,11 @@ impl Environment { var: &str, target: &Target, convert: impl Fn(&str) -> T, - ) -> (Option, Option) { - let target_values = self.get_target_var(target, var).map(|ref s| convert(s)); - + ) -> ConfVal { let build_values = self.get_build_var(var).map(|ref s| convert(s)); + let target_values = self.get_target_var(target, var).map(|ref s| convert(s)); - (build_values, target_values) + ConfVal::new(build_values, target_values) } fn target_path(target: &Target, key: &str) -> String { @@ -58,27 +91,63 @@ impl Environment { self.get_var(&self.build_var_name(&Self::target_path(target, key))) } - fn xargo(&self, target: &Target) -> (Option, Option) { + fn xargo(&self, target: &Target) -> ConfVal { self.get_values_for("XARGO", target, bool_from_envvar) } - fn build_std(&self, target: &Target) -> (Option, Option) { - self.get_values_for("BUILD_STD", target, bool_from_envvar) + fn build_std(&self, target: &Target) -> ConfVal { + self.get_values_for("BUILD_STD", target, |v| { + if let Some(value) = try_bool_from_envvar(v) { + BuildStd::Bool(value) + } else { + BuildStd::Crates(v.split(',').map(str::to_owned).collect()) + } + }) + } + + fn zig(&self, target: &Target) -> ConfVal { + self.get_values_for("ZIG", target, bool_from_envvar) + } + + fn zig_version(&self, target: &Target) -> ConfVal { + self.get_values_for("ZIG_VERSION", target, ToOwned::to_owned) + } + + fn zig_image(&self, target: &Target) -> Result> { + let get_build = |env: &Environment, var: &str| env.get_build_var(var); + let get_target = |env: &Environment, var: &str| env.get_target_var(target, var); + let env_build = get_possible_image( + self, + "ZIG_IMAGE", + "ZIG_IMAGE_TOOLCHAIN", + get_build, + get_build, + )?; + let env_target = get_possible_image( + self, + "ZIG_IMAGE", + "ZIG_IMAGE_TOOLCHAIN", + get_target, + get_target, + )?; + + Ok(ConfVal::new(env_build, env_target)) } - fn image(&self, target: &Target) -> Option { - self.get_target_var(target, "IMAGE") + fn image(&self, target: &Target) -> Result> { + let get_target = |env: &Environment, var: &str| env.get_target_var(target, var); + get_possible_image(self, "IMAGE", "IMAGE_TOOLCHAIN", get_target, get_target) } - fn dockerfile(&self, target: &Target) -> (Option, Option) { - self.get_values_for("DOCKERFILE", target, |s| s.to_owned()) + fn dockerfile(&self, target: &Target) -> ConfVal { + self.get_values_for("DOCKERFILE", target, ToOwned::to_owned) } - fn dockerfile_context(&self, target: &Target) -> (Option, Option) { - self.get_values_for("DOCKERFILE_CONTEXT", target, |s| s.to_owned()) + fn dockerfile_context(&self, target: &Target) -> ConfVal { + self.get_values_for("DOCKERFILE_CONTEXT", target, ToOwned::to_owned) } - fn pre_build(&self, target: &Target) -> (Option, Option) { + fn pre_build(&self, target: &Target) -> ConfVal { self.get_values_for("PRE_BUILD", target, |v| { let v: Vec<_> = v.split('\n').map(String::from).collect(); if v.len() == 1 { @@ -96,11 +165,11 @@ impl Environment { self.get_target_var(target, "RUNNER") } - fn passthrough(&self, target: &Target) -> (Option>, Option>) { + fn passthrough(&self, target: &Target) -> ConfVal> { self.get_values_for("ENV_PASSTHROUGH", target, split_to_cloned_by_ws) } - fn volumes(&self, target: &Target) -> (Option>, Option>) { + fn volumes(&self, target: &Target) -> ConfVal> { self.get_values_for("ENV_VOLUMES", target, split_to_cloned_by_ws) } @@ -110,27 +179,64 @@ impl Environment { } fn doctests(&self) -> Option { - env::var("CROSS_UNSTABLE_ENABLE_DOCTESTS") + self.get_var("CROSS_UNSTABLE_ENABLE_DOCTESTS") .map(|s| bool_from_envvar(&s)) - .ok() } fn custom_toolchain(&self) -> bool { - std::env::var("CROSS_CUSTOM_TOOLCHAIN").is_ok() + self.get_var("CROSS_CUSTOM_TOOLCHAIN") + .map_or(false, |s| bool_from_envvar(&s)) + } + + fn custom_toolchain_compat(&self) -> Option { + self.get_var("CUSTOM_TOOLCHAIN_COMPAT") + } + + fn build_opts(&self) -> Option { + self.get_var("CROSS_BUILD_OPTS") } } +fn get_possible_image( + env: &Environment, + image_var: &str, + toolchain_var: &str, + get_image: impl Fn(&Environment, &str) -> Option, + get_toolchain: impl Fn(&Environment, &str) -> Option, +) -> Result> { + get_image(env, image_var) + .map(Into::into) + .map(|mut i: PossibleImage| { + if let Some(toolchain) = get_toolchain(env, toolchain_var) { + i.toolchain = toolchain + .split(',') + .map(|t| ImagePlatform::from_target(t.trim().into())) + .collect::>>()?; + Ok(i) + } else { + Ok(i) + } + }) + .transpose() +} + fn split_to_cloned_by_ws(string: &str) -> Vec { string.split_whitespace().map(String::from).collect() } +/// this takes the value of the environment variable, +/// so you should call `bool_from_envvar(env::var("FOO"))` pub fn bool_from_envvar(envvar: &str) -> bool { + try_bool_from_envvar(envvar).unwrap_or(!envvar.is_empty()) +} + +pub fn try_bool_from_envvar(envvar: &str) -> Option { if let Ok(value) = bool::from_str(envvar) { - value + Some(value) } else if let Ok(value) = i32::from_str(envvar) { - value != 0 + Some(value != 0) } else { - !envvar.is_empty() + None } } @@ -160,7 +266,7 @@ impl Config { .replace(|c| c == '-' || c == '_', "") .to_lowercase(); if mentioned_target != target && mentioned_target_norm == target_norm { - msg_info.warn("a target named \"{mentioned_target}\" is mentioned in the Cross configuration, but the current specified target is \"{target}\".")?; + msg_info.warn(format_args!("a target named \"{mentioned_target}\" is mentioned in the Cross configuration, but the current specified target is \"{target}\"."))?; msg_info.status(" > Is the target misspelled in the Cross configuration?")?; } } @@ -168,62 +274,50 @@ impl Config { Ok(()) } - fn bool_from_config( + fn get_from_value_inner( &self, target: &Target, - env: impl Fn(&Environment, &Target) -> (Option, Option), - config: impl Fn(&CrossToml, &Target) -> (Option, Option), - ) -> Option { - let (env_build, env_target) = env(&self.env, target); - let (toml_build, toml_target) = if let Some(ref toml) = self.toml { - config(toml, target) - } else { - (None, None) - }; + env: impl for<'a> FnOnce(&'a Environment, &Target) -> ConfVal, + config: impl for<'a> FnOnce(&'a CrossToml, &Target) -> ConfVal>, + ) -> Option + where + U: ToOwned + ?Sized, + { + let env = env(&self.env, target); + let toml = self + .toml + .as_ref() + .map(|toml| config(toml, target)) + .unwrap_or_default(); - match (env_target, toml_target) { + match (env.target, toml.target) { (Some(value), _) => return Some(value), - (None, Some(value)) => return Some(value), + (None, Some(value)) => return Some(value.into_owned()), (None, None) => {} - }; + } - match (env_build, toml_build) { + match (env.build, toml.build) { (Some(value), _) => return Some(value), - (None, Some(value)) => return Some(value), + (None, Some(value)) => return Some(value.into_owned()), (None, None) => {} - }; + } None } - fn string_from_config( - &self, - target: &Target, - env: impl Fn(&Environment, &Target) -> Option, - config: impl Fn(&CrossToml, &Target) -> Option, - ) -> Result> { - let env_value = env(&self.env, target); - if let Some(env_value) = env_value { - return Ok(Some(env_value)); - } - self.toml - .as_ref() - .map_or(Ok(None), |t| Ok(config(t, target))) - } - fn vec_from_config( &self, target: &Target, - env: impl for<'a> Fn(&'a Environment, &Target) -> (Option>, Option>), - config: impl for<'a> Fn(&'a CrossToml, &Target) -> (Option<&'a [String]>, Option<&'a [String]>), + env: impl for<'a> FnOnce(&'a Environment, &Target) -> ConfVal>, + config: impl for<'a> FnOnce(&'a CrossToml, &Target) -> ConfVal<&'a [String]>, sum: bool, - ) -> Result>> { + ) -> Option> { if sum { - let (mut env_build, env_target) = env(&self.env, target); - env_build - .as_mut() - .map(|b| env_target.map(|mut t| b.append(&mut t))); - self.sum_of_env_toml_values(env_build, |t| config(t, target)) + let mut env = env(&self.env, target); + if let (Some(b), Some(t)) = (&mut env.build, &mut env.target) { + b.append(t); + } + self.sum_of_env_toml_values(env.build, |t| config(t, target)) } else { self.get_from_ref(target, env, config) } @@ -232,55 +326,72 @@ impl Config { fn get_from_ref( &self, target: &Target, - env: impl for<'a> Fn(&'a Environment, &Target) -> (Option, Option), - config: impl for<'a> Fn(&'a CrossToml, &Target) -> (Option<&'a U>, Option<&'a U>), - ) -> Result> + env: impl for<'a> FnOnce(&'a Environment, &Target) -> ConfVal, + config: impl for<'a> FnOnce(&'a CrossToml, &Target) -> ConfVal<&'a U>, + ) -> Option where U: ToOwned + ?Sized, { - let (env_build, env_target) = env(&self.env, target); - - if let Some(env_target) = env_target { - return Ok(Some(env_target)); - } - - let (build, target) = self - .toml - .as_ref() - .map(|t| config(t, target)) - .unwrap_or_default(); - - // FIXME: let expression - if target.is_none() && env_build.is_some() { - return Ok(env_build); - } + self.get_from_value_inner(target, env, |toml, target| { + config(toml, target).map(|v| Cow::Borrowed(v)) + }) + } - if target.is_none() { - Ok(build.map(ToOwned::to_owned)) - } else { - Ok(target.map(ToOwned::to_owned)) - } + fn get_from_value( + &self, + target: &Target, + env: impl FnOnce(&Environment, &Target) -> ConfVal, + config: impl FnOnce(&CrossToml, &Target) -> ConfVal, + ) -> Option + where + T: ToOwned, + { + self.get_from_value_inner::(target, env, |toml, target| { + config(toml, target).map(|v| Cow::Owned(v)) + }) } #[cfg(test)] - fn new_with(toml: Option, env: Environment) -> Self { + pub(crate) fn new_with(toml: Option, env: Environment) -> Self { Config { toml, env } } pub fn xargo(&self, target: &Target) -> Option { - self.bool_from_config(target, Environment::xargo, CrossToml::xargo) + self.get_from_value(target, Environment::xargo, CrossToml::xargo) + } + + pub fn build_std(&self, target: &Target) -> Option { + self.get_from_ref(target, Environment::build_std, CrossToml::build_std) + } + + pub fn zig(&self, target: &Target) -> Option { + self.get_from_value(target, Environment::zig, CrossToml::zig) } - pub fn build_std(&self, target: &Target) -> Option { - self.bool_from_config(target, Environment::build_std, CrossToml::build_std) + pub fn zig_version(&self, target: &Target) -> Option { + self.get_from_value(target, Environment::zig_version, CrossToml::zig_version) } - pub fn image(&self, target: &Target) -> Result> { - self.string_from_config(target, Environment::image, CrossToml::image) + pub fn zig_image(&self, target: &Target) -> Result> { + let env = self.env.zig_image(target)?; + Ok(self.get_from_value(target, |_, _| env, CrossToml::zig_image)) } - pub fn runner(&self, target: &Target) -> Result> { - self.string_from_config(target, Environment::runner, CrossToml::runner) + pub fn image(&self, target: &Target) -> Result> { + let env = self.env.image(target)?; + Ok(self.get_from_ref( + target, + |_, _| ConfVal::new(None, env), + |toml, target| ConfVal::new(None, toml.image(target)), + )) + } + + pub fn runner(&self, target: &Target) -> Option { + self.get_from_ref( + target, + |env, target| ConfVal::new(None, env.runner(target)), + |toml, target| ConfVal::new(None, toml.runner(target)), + ) } pub fn doctests(&self) -> Option { @@ -291,7 +402,15 @@ impl Config { self.env.custom_toolchain() } - pub fn env_passthrough(&self, target: &Target) -> Result>> { + pub fn custom_toolchain_compat(&self) -> Option { + self.env.custom_toolchain_compat() + } + + pub fn build_opts(&self) -> Option { + self.env.build_opts() + } + + pub fn env_passthrough(&self, target: &Target) -> Option> { self.vec_from_config( target, Environment::passthrough, @@ -300,7 +419,7 @@ impl Config { ) } - pub fn env_volumes(&self, target: &Target) -> Result>> { + pub fn env_volumes(&self, target: &Target) -> Option> { self.get_from_ref(target, Environment::volumes, CrossToml::env_volumes) } @@ -313,11 +432,11 @@ impl Config { .and_then(|t| t.default_target(target_list)) } - pub fn dockerfile(&self, target: &Target) -> Result> { + pub fn dockerfile(&self, target: &Target) -> Option { self.get_from_ref(target, Environment::dockerfile, CrossToml::dockerfile) } - pub fn dockerfile_context(&self, target: &Target) -> Result> { + pub fn dockerfile_context(&self, target: &Target) -> Option { self.get_from_ref( target, Environment::dockerfile_context, @@ -325,46 +444,40 @@ impl Config { ) } - pub fn dockerfile_build_args( - &self, - target: &Target, - ) -> Result>> { + pub fn dockerfile_build_args(&self, target: &Target) -> Option> { // This value does not support env variables self.toml .as_ref() - .map_or(Ok(None), |t| Ok(t.dockerfile_build_args(target))) + .and_then(|t| t.dockerfile_build_args(target)) } - pub fn pre_build(&self, target: &Target) -> Result> { + pub fn pre_build(&self, target: &Target) -> Option { self.get_from_ref(target, Environment::pre_build, CrossToml::pre_build) } // FIXME: remove when we disable sums in 0.3.0. fn sum_of_env_toml_values<'a>( &'a self, - env_values: Option>, - toml_getter: impl FnOnce(&'a CrossToml) -> (Option<&'a [String]>, Option<&'a [String]>), - ) -> Result>> { + env_values: Option>, + toml_getter: impl FnOnce(&'a CrossToml) -> ConfVal<&'a [String]>, + ) -> Option> { let mut defined = false; let mut collect = vec![]; - if let Some(vars) = env_values { - collect.extend(vars.as_ref().iter().cloned()); - defined = true; - } else if let Some((build, target)) = self.toml.as_ref().map(toml_getter) { - if let Some(build) = build { + if env_values.is_some() { + return env_values; + } else if let Some(toml) = self.toml.as_ref().map(toml_getter) { + if let Some(build) = toml.build { collect.extend(build.iter().cloned()); defined = true; } - if let Some(target) = target { + + if let Some(target) = toml.target { collect.extend(target.iter().cloned()); defined = true; } } - if !defined { - Ok(None) - } else { - Ok(Some(collect)) - } + + defined.then_some(collect) } } @@ -410,28 +523,42 @@ mod tests { } mod test_environment { - use super::*; #[test] - pub fn parse_error_in_env() { + pub fn parse_error_in_env() -> Result<()> { let mut map = std::collections::HashMap::new(); map.insert("CROSS_BUILD_XARGO", "tru"); map.insert("CROSS_BUILD_STD", "false"); + map.insert("CROSS_BUILD_ZIG_IMAGE", "zig:local"); let env = Environment::new(Some(map)); assert_eq!(env.xargo(&target()), (Some(true), None)); - assert_eq!(env.build_std(&target()), (Some(false), None)); + assert_eq!( + env.build_std(&target()), + (Some(BuildStd::Bool(false)), None) + ); + assert_eq!(env.zig(&target()), (None, None)); + assert_eq!(env.zig_version(&target()), (None, None)); + assert_eq!(env.zig_image(&target())?, (Some("zig:local".into()), None)); + + Ok(()) } #[test] - pub fn build_and_target_set_returns_tuple() { + pub fn build_and_target_set_returns_tuple() -> Result<()> { let mut map = std::collections::HashMap::new(); map.insert("CROSS_BUILD_XARGO", "true"); + map.insert("CROSS_BUILD_ZIG", "true"); + map.insert("CROSS_BUILD_ZIG_VERSION", "2.17"); map.insert("CROSS_TARGET_AARCH64_UNKNOWN_LINUX_GNU_XARGO", "false"); let env = Environment::new(Some(map)); assert_eq!(env.xargo(&target()), (Some(true), Some(false))); + assert_eq!(env.zig(&target()), (Some(true), None)); + assert_eq!(env.zig_version(&target()), (Some("2.17".into()), None)); + + Ok(()) } #[test] @@ -461,7 +588,7 @@ mod tests { let env = Environment::new(Some(map)); - let (build, target) = env.passthrough(&target()); + let ConfVal { build, target } = env.passthrough(&target()); assert!(build.as_ref().unwrap().contains(&"TEST1".to_owned())); assert!(build.as_ref().unwrap().contains(&"TEST2".to_owned())); assert!(target.as_ref().unwrap().contains(&"PASS1".to_owned())); @@ -471,7 +598,6 @@ mod tests { #[cfg(test)] mod test_config { - use super::*; macro_rules! s { @@ -482,7 +608,7 @@ mod tests { fn toml(content: &str) -> Result { Ok( - CrossToml::parse_from_cross(content, &mut MessageInfo::default()) + CrossToml::parse_from_cross_str(content, None, &mut MessageInfo::default()) .wrap_err("couldn't parse toml")? .0, ) @@ -502,7 +628,7 @@ mod tests { assert_eq!(config.xargo(&target()), Some(true)); assert_eq!(config.build_std(&target()), None); assert_eq!( - config.pre_build(&target())?, + config.pre_build(&target()), Some(PreBuild::Lines(vec![ s!("apt-get update"), s!("apt-get install zlib-dev") @@ -516,13 +642,16 @@ mod tests { pub fn env_target_and_toml_target_xargo_target_then_use_env() -> Result<()> { let mut map = HashMap::new(); map.insert("CROSS_TARGET_AARCH64_UNKNOWN_LINUX_GNU_XARGO", "true"); - map.insert("CROSS_TARGET_AARCH64_UNKNOWN_LINUX_GNU_BUILD_STD", "true"); + map.insert("CROSS_TARGET_AARCH64_UNKNOWN_LINUX_GNU_BUILD_STD", "core"); let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_TARGET_XARGO_FALSE)?), env); assert_eq!(config.xargo(&target()), Some(true)); - assert_eq!(config.build_std(&target()), Some(true)); - assert_eq!(config.pre_build(&target())?, None); + assert_eq!( + config.build_std(&target()), + Some(BuildStd::Crates(vec!["core".to_owned()])) + ); + assert_eq!(config.pre_build(&target()), None); Ok(()) } @@ -536,7 +665,7 @@ mod tests { let config = Config::new_with(Some(toml(TOML_BUILD_XARGO_FALSE)?), env); assert_eq!(config.xargo(&target()), Some(true)); assert_eq!(config.build_std(&target()), None); - assert_eq!(config.pre_build(&target())?, None); + assert_eq!(config.pre_build(&target()), None); Ok(()) } @@ -552,7 +681,7 @@ mod tests { let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_BUILD_PRE_BUILD)?), env); assert_eq!( - config.pre_build(&target())?, + config.pre_build(&target()), Some(PreBuild::Single { line: s!("dpkg --add-architecture arm64"), env: true @@ -573,14 +702,14 @@ mod tests { let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_BUILD_DOCKERFILE)?), env); - assert_eq!(config.dockerfile(&target())?, Some(s!("Dockerfile4"))); - assert_eq!(config.dockerfile(&target2())?, Some(s!("Dockerfile3"))); + assert_eq!(config.dockerfile(&target()), Some(s!("Dockerfile4"))); + assert_eq!(config.dockerfile(&target2()), Some(s!("Dockerfile3"))); let map = HashMap::new(); let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_BUILD_DOCKERFILE)?), env); - assert_eq!(config.dockerfile(&target())?, Some(s!("Dockerfile2"))); - assert_eq!(config.dockerfile(&target2())?, Some(s!("Dockerfile1"))); + assert_eq!(config.dockerfile(&target()), Some(s!("Dockerfile2"))); + assert_eq!(config.dockerfile(&target2()), Some(s!("Dockerfile1"))); Ok(()) } @@ -591,11 +720,11 @@ mod tests { let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_ARRAYS_BOTH)?), env); assert_eq!( - config.env_passthrough(&target())?, + config.env_passthrough(&target()), Some(vec![s!("VAR1"), s!("VAR2"), s!("VAR3"), s!("VAR4")]) ); assert_eq!( - config.env_volumes(&target())?, + config.env_volumes(&target()), Some(vec![s!("VOLUME3"), s!("VOLUME4")]) ); @@ -608,11 +737,11 @@ mod tests { let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_ARRAYS_BUILD)?), env); assert_eq!( - config.env_passthrough(&target())?, + config.env_passthrough(&target()), Some(vec![s!("VAR1"), s!("VAR2")]) ); assert_eq!( - config.env_volumes(&target())?, + config.env_volumes(&target()), Some(vec![s!("VOLUME1"), s!("VOLUME2")]) ); @@ -625,11 +754,11 @@ mod tests { let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_ARRAYS_TARGET)?), env); assert_eq!( - config.env_passthrough(&target())?, + config.env_passthrough(&target()), Some(vec![s!("VAR3"), s!("VAR4")]) ); assert_eq!( - config.env_volumes(&target())?, + config.env_volumes(&target()), Some(vec![s!("VOLUME3"), s!("VOLUME4")]) ); @@ -642,9 +771,9 @@ mod tests { map.insert("CROSS_BUILD_ENV_VOLUMES", "VOLUME1 VOLUME2"); let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_BUILD_VOLUMES)?), env); - let expected = vec![s!("VOLUME1"), s!("VOLUME2")]; + let expected = [s!("VOLUME1"), s!("VOLUME2")]; - let result = config.env_volumes(&target()).unwrap().unwrap_or_default(); + let result = config.env_volumes(&target()).unwrap_or_default(); dbg!(&result); assert!(result.len() == 2); assert!(result.contains(&expected[0])); @@ -658,9 +787,9 @@ mod tests { let map = HashMap::new(); let env = Environment::new(Some(map)); let config = Config::new_with(Some(toml(TOML_BUILD_VOLUMES)?), env); - let expected = vec![s!("VOLUME3"), s!("VOLUME4")]; + let expected = [s!("VOLUME3"), s!("VOLUME4")]; - let result = config.env_volumes(&target()).unwrap().unwrap_or_default(); + let result = config.env_volumes(&target()).unwrap_or_default(); dbg!(&result); assert!(result.len() == 2); assert!(result.contains(&expected[0])); diff --git a/src/cross_toml.rs b/src/cross_toml.rs index 819143f05..40ae74317 100644 --- a/src/cross_toml.rs +++ b/src/cross_toml.rs @@ -1,6 +1,13 @@ -#![doc = include_str!("../docs/cross_toml.md")] - +//! The `Cross.toml` configuration file. +//! +//! For a detailed user documentation of the file and the contents please refer to the [docs in the +//! repo][1]. +//! +//! [1]: https://github.com/cross-rs/cross/blob/main/docs/config_file.md + +use crate::config::ConfVal; use crate::docker::custom::PreBuild; +use crate::docker::PossibleImage; use crate::shell::MessageInfo; use crate::{config, errors::*}; use crate::{Target, TargetList}; @@ -23,7 +30,9 @@ pub struct CrossBuildConfig { #[serde(default)] env: CrossEnvConfig, xargo: Option, - build_std: Option, + build_std: Option, + #[serde(default, deserialize_with = "opt_string_bool_or_struct")] + zig: Option, default_target: Option, #[serde(default, deserialize_with = "opt_string_or_string_vec")] pre_build: Option, @@ -36,8 +45,11 @@ pub struct CrossBuildConfig { #[serde(rename_all = "kebab-case")] pub struct CrossTargetConfig { xargo: Option, - build_std: Option, - image: Option, + build_std: Option, + #[serde(default, deserialize_with = "opt_string_bool_or_struct")] + zig: Option, + #[serde(default, deserialize_with = "opt_string_or_struct")] + image: Option, #[serde(default, deserialize_with = "opt_string_or_struct")] dockerfile: Option, #[serde(default, deserialize_with = "opt_string_or_string_vec")] @@ -47,6 +59,28 @@ pub struct CrossTargetConfig { env: CrossEnvConfig, } +#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Clone)] +#[serde(untagged, rename_all = "kebab-case")] +pub enum BuildStd { + Bool(bool), + Crates(Vec), +} + +impl Default for BuildStd { + fn default() -> Self { + Self::Bool(false) + } +} + +impl BuildStd { + pub fn enabled(&self) -> bool { + match self { + Self::Bool(enabled) => *enabled, + Self::Crates(arr) => !arr.is_empty(), + } + } +} + /// Dockerfile configuration #[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] @@ -68,6 +102,44 @@ impl FromStr for CrossTargetDockerfileConfig { } } +/// Zig configuration +#[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] +#[serde(rename_all = "kebab-case")] +pub struct CrossZigConfig { + enable: Option, + version: Option, + #[serde(default, deserialize_with = "opt_string_or_struct")] + image: Option, +} + +impl From<&str> for CrossZigConfig { + fn from(s: &str) -> CrossZigConfig { + CrossZigConfig { + enable: Some(true), + version: Some(s.to_owned()), + image: None, + } + } +} + +impl From for CrossZigConfig { + fn from(s: bool) -> CrossZigConfig { + CrossZigConfig { + enable: Some(s), + version: None, + image: None, + } + } +} + +impl FromStr for CrossZigConfig { + type Err = std::convert::Infallible; + + fn from_str(s: &str) -> Result { + Ok(s.into()) + } +} + /// Cross configuration #[derive(Debug, Serialize, Deserialize, PartialEq, Eq, Default)] pub struct CrossToml { @@ -78,33 +150,18 @@ pub struct CrossToml { } impl CrossToml { - /// Parses the [`CrossToml`] from all of the config sources - pub fn parse( - cargo_toml: &str, - cross_toml: &str, - msg_info: &mut MessageInfo, - ) -> Result<(Self, BTreeSet)> { - let (cross_toml, mut unused) = Self::parse_from_cross(cross_toml, msg_info)?; - - if let Some((cargo_toml, u_cargo)) = Self::parse_from_cargo(cargo_toml, msg_info)? { - unused.extend(u_cargo.into_iter()); - Ok((cargo_toml.merge(cross_toml)?, unused)) - } else { - Ok((cross_toml, unused)) - } - } - /// Parses the [`CrossToml`] from a string - pub fn parse_from_cross( + pub fn parse_from_cross_str( toml_str: &str, + source: Option<&str>, msg_info: &mut MessageInfo, ) -> Result<(Self, BTreeSet)> { - let mut tomld = toml::Deserializer::new(toml_str); - Self::parse_from_deserializer(&mut tomld, msg_info) + let tomld = toml::Deserializer::new(toml_str); + Self::parse_from_deserializer(tomld, source, msg_info) } /// Parses the [`CrossToml`] from a string containing the Cargo.toml contents - pub fn parse_from_cargo( + pub fn parse_from_cargo_package_str( cargo_toml_str: &str, msg_info: &mut MessageInfo, ) -> Result)>> { @@ -117,6 +174,7 @@ impl CrossToml { if let Some(cross_meta) = cross_metadata_opt { Ok(Some(Self::parse_from_deserializer( cross_meta.clone(), + None, msg_info, )?)) } else { @@ -125,8 +183,9 @@ impl CrossToml { } /// Parses the [`CrossToml`] from a [`Deserializer`] - fn parse_from_deserializer<'de, D>( + pub fn parse_from_deserializer<'de, D>( deserializer: D, + source: Option<&str>, msg_info: &mut MessageInfo, ) -> Result<(Self, BTreeSet)> where @@ -139,8 +198,9 @@ impl CrossToml { })?; if !unused.is_empty() { - msg_info.warn(format!( - "found unused key(s) in Cross configuration:\n > {}", + msg_info.warn(format_args!( + "found unused key(s) in Cross configuration{}:\n > {}", + source.map(|s| format!(" at {s}")).unwrap_or_default(), unused.clone().into_iter().collect::>().join(", ") ))?; } @@ -207,7 +267,7 @@ impl CrossToml { } // Builds maps of objects - let mut self_map = to_map(&self)?; + let mut self_map = to_map(self)?; let other_map = to_map(other)?; merge_objects(&mut self_map, &other_map).ok_or_else(|| eyre::eyre!("could not merge"))?; @@ -215,12 +275,12 @@ impl CrossToml { } /// Returns the `target.{}.image` part of `Cross.toml` - pub fn image(&self, target: &Target) -> Option { - self.get_string(target, |_| None, |t| t.image.as_ref()) + pub fn image(&self, target: &Target) -> Option<&PossibleImage> { + self.get_target(target).and_then(|t| t.image.as_ref()) } /// Returns the `{}.dockerfile` or `{}.dockerfile.file` part of `Cross.toml` - pub fn dockerfile(&self, target: &Target) -> (Option<&String>, Option<&String>) { + pub fn dockerfile(&self, target: &Target) -> ConfVal<&String> { self.get_ref( target, |b| b.dockerfile.as_ref().map(|c| &c.file), @@ -229,7 +289,7 @@ impl CrossToml { } /// Returns the `target.{}.dockerfile.context` part of `Cross.toml` - pub fn dockerfile_context(&self, target: &Target) -> (Option<&String>, Option<&String>) { + pub fn dockerfile_context(&self, target: &Target) -> ConfVal<&String> { self.get_ref( target, |b| b.dockerfile.as_ref().and_then(|c| c.context.as_ref()), @@ -254,27 +314,54 @@ impl CrossToml { } /// Returns the `build.dockerfile.pre-build` and `target.{}.dockerfile.pre-build` part of `Cross.toml` - pub fn pre_build(&self, target: &Target) -> (Option<&PreBuild>, Option<&PreBuild>) { + pub fn pre_build(&self, target: &Target) -> ConfVal<&PreBuild> { self.get_ref(target, |b| b.pre_build.as_ref(), |t| t.pre_build.as_ref()) } /// Returns the `target.{}.runner` part of `Cross.toml` - pub fn runner(&self, target: &Target) -> Option { - self.get_string(target, |_| None, |t| t.runner.as_ref()) + pub fn runner(&self, target: &Target) -> Option<&String> { + self.get_target(target).and_then(|t| t.runner.as_ref()) } /// Returns the `build.xargo` or the `target.{}.xargo` part of `Cross.toml` - pub fn xargo(&self, target: &Target) -> (Option, Option) { + pub fn xargo(&self, target: &Target) -> ConfVal { self.get_value(target, |b| b.xargo, |t| t.xargo) } /// Returns the `build.build-std` or the `target.{}.build-std` part of `Cross.toml` - pub fn build_std(&self, target: &Target) -> (Option, Option) { - self.get_value(target, |b| b.build_std, |t| t.build_std) + pub fn build_std(&self, target: &Target) -> ConfVal<&BuildStd> { + self.get_ref(target, |b| b.build_std.as_ref(), |t| t.build_std.as_ref()) + } + + /// Returns the `{}.zig` or `{}.zig.version` part of `Cross.toml` + pub fn zig(&self, target: &Target) -> ConfVal { + self.get_value( + target, + |b| b.zig.as_ref().and_then(|z| z.enable), + |t| t.zig.as_ref().and_then(|z| z.enable), + ) + } + + /// Returns the `{}.zig` or `{}.zig.version` part of `Cross.toml` + pub fn zig_version(&self, target: &Target) -> ConfVal { + self.get_value( + target, + |b| b.zig.as_ref().and_then(|c| c.version.clone()), + |t| t.zig.as_ref().and_then(|c| c.version.clone()), + ) + } + + /// Returns the `{}.zig.image` part of `Cross.toml` + pub fn zig_image(&self, target: &Target) -> ConfVal { + self.get_value( + target, + |b| b.zig.as_ref().and_then(|c| c.image.clone()), + |t| t.zig.as_ref().and_then(|c| c.image.clone()), + ) } /// Returns the list of environment variables to pass through for `build` and `target` - pub fn env_passthrough(&self, target: &Target) -> (Option<&[String]>, Option<&[String]>) { + pub fn env_passthrough(&self, target: &Target) -> ConfVal<&[String]> { self.get_ref( target, |build| build.env.passthrough.as_deref(), @@ -283,7 +370,7 @@ impl CrossToml { } /// Returns the list of environment variables to pass through for `build` and `target` - pub fn env_volumes(&self, target: &Target) -> (Option<&[String]>, Option<&[String]>) { + pub fn env_volumes(&self, target: &Target) -> ConfVal<&[String]> { self.get_ref( target, |build| build.env.volumes.as_deref(), @@ -304,27 +391,15 @@ impl CrossToml { self.targets.get(target) } - fn get_string<'a>( - &'a self, - target: &Target, - get_build: impl Fn(&'a CrossBuildConfig) -> Option<&'a String>, - get_target: impl Fn(&'a CrossTargetConfig) -> Option<&'a String>, - ) -> Option { - self.get_target(target) - .and_then(get_target) - .or_else(|| get_build(&self.build)) - .map(ToOwned::to_owned) - } - fn get_value( &self, target_triple: &Target, get_build: impl Fn(&CrossBuildConfig) -> Option, get_target: impl Fn(&CrossTargetConfig) -> Option, - ) -> (Option, Option) { + ) -> ConfVal { let build = get_build(&self.build); let target = self.get_target(target_triple).and_then(get_target); - (build, target) + ConfVal::new(build, target) } fn get_ref( @@ -332,10 +407,10 @@ impl CrossToml { target_triple: &Target, get_build: impl Fn(&CrossBuildConfig) -> Option<&T>, get_target: impl Fn(&CrossTargetConfig) -> Option<&T>, - ) -> (Option<&T>, Option<&T>) { + ) -> ConfVal<&T> { let build = get_build(&self.build); let target = self.get_target(target_triple).and_then(get_target); - (build, target) + ConfVal::new(build, target) } } @@ -452,8 +527,72 @@ where deserializer.deserialize_any(StringOrStringVec(PhantomData)) } +fn opt_string_bool_or_struct<'de, T, D>(deserializer: D) -> Result, D::Error> +where + T: Deserialize<'de> + From + std::str::FromStr, + D: serde::Deserializer<'de>, +{ + use std::{fmt, marker::PhantomData}; + + use serde::de::{self, MapAccess, Visitor}; + + struct StringBoolOrStruct(PhantomData T>); + + impl<'de, T> Visitor<'de> for StringBoolOrStruct + where + T: Deserialize<'de> + From + std::str::FromStr, + { + type Value = Option; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("string, bool, or map") + } + + fn visit_bool(self, value: bool) -> Result + where + E: de::Error, + { + Ok(Some(From::from(value))) + } + + fn visit_str(self, value: &str) -> Result + where + E: de::Error, + { + Ok(FromStr::from_str(value).ok()) + } + + fn visit_map(self, map: M) -> Result + where + M: MapAccess<'de>, + { + let t: Result = + Deserialize::deserialize(de::value::MapAccessDeserializer::new(map)); + t.map(Some) + } + + fn visit_none(self) -> Result + where + E: de::Error, + { + Ok(None) + } + + fn visit_unit(self) -> Result + where + E: de::Error, + { + Ok(None) + } + } + + deserializer.deserialize_any(StringBoolOrStruct(PhantomData)) +} + #[cfg(test)] mod tests { + use crate::docker::{ImagePlatform, ImageReference}; + use super::*; use crate::shell; @@ -463,9 +602,9 @@ mod tests { }; } - macro_rules! s { + macro_rules! p { ($x:literal) => { - $x.to_owned() + $x.parse()? }; } @@ -475,7 +614,7 @@ mod tests { targets: HashMap::new(), build: CrossBuildConfig::default(), }; - let (parsed_cfg, unused) = CrossToml::parse_from_cross("", &mut m!())?; + let (parsed_cfg, unused) = CrossToml::parse_from_cross_str("", None, &mut m!())?; assert_eq!(parsed_cfg, cfg); assert!(unused.is_empty()); @@ -489,13 +628,14 @@ mod tests { targets: HashMap::new(), build: CrossBuildConfig { env: CrossEnvConfig { - volumes: Some(vec![s!("VOL1_ARG"), s!("VOL2_ARG")]), - passthrough: Some(vec![s!("VAR1"), s!("VAR2")]), + volumes: Some(vec![p!("VOL1_ARG"), p!("VOL2_ARG")]), + passthrough: Some(vec![p!("VAR1"), p!("VAR2")]), }, xargo: Some(true), build_std: None, + zig: None, default_target: None, - pre_build: Some(PreBuild::Lines(vec![s!("echo 'Hello World!'")])), + pre_build: Some(PreBuild::Lines(vec![p!("echo 'Hello World!'")])), dockerfile: None, }, }; @@ -509,7 +649,7 @@ mod tests { volumes = ["VOL1_ARG", "VOL2_ARG"] passthrough = ["VAR1", "VAR2"] "#; - let (parsed_cfg, unused) = CrossToml::parse_from_cross(test_str, &mut m!())?; + let (parsed_cfg, unused) = CrossToml::parse_from_cross_str(test_str, None, &mut m!())?; assert_eq!(parsed_cfg, cfg); assert!(unused.is_empty()); @@ -522,21 +662,44 @@ mod tests { let mut target_map = HashMap::new(); target_map.insert( Target::BuiltIn { - triple: s!("aarch64-unknown-linux-gnu"), + triple: "aarch64-unknown-linux-gnu".into(), }, CrossTargetConfig { env: CrossEnvConfig { - passthrough: Some(vec![s!("VAR1"), s!("VAR2")]), - volumes: Some(vec![s!("VOL1_ARG"), s!("VOL2_ARG")]), + passthrough: Some(vec![p!("VAR1"), p!("VAR2")]), + volumes: Some(vec![p!("VOL1_ARG"), p!("VOL2_ARG")]), }, xargo: Some(false), - build_std: Some(true), - image: Some(s!("test-image")), + build_std: Some(BuildStd::Bool(true)), + zig: None, + image: Some("test-image".into()), runner: None, dockerfile: None, pre_build: Some(PreBuild::Lines(vec![])), }, ); + target_map.insert( + Target::BuiltIn { + triple: "aarch64-unknown-linux-musl".into(), + }, + CrossTargetConfig { + env: CrossEnvConfig { + passthrough: None, + volumes: None, + }, + xargo: None, + build_std: None, + zig: Some(CrossZigConfig { + enable: Some(true), + version: Some(p!("2.17")), + image: Some("zig:local".into()), + }), + image: None, + runner: None, + dockerfile: None, + pre_build: None, + }, + ); let cfg = CrossToml { targets: target_map, @@ -552,8 +715,13 @@ mod tests { build-std = true image = "test-image" pre-build = [] + + [target.aarch64-unknown-linux-musl.zig] + enable = true + version = "2.17" + image = "zig:local" "#; - let (parsed_cfg, unused) = CrossToml::parse_from_cross(test_str, &mut m!())?; + let (parsed_cfg, unused) = CrossToml::parse_from_cross_str(test_str, None, &mut m!())?; assert_eq!(parsed_cfg, cfg); assert!(unused.is_empty()); @@ -566,22 +734,28 @@ mod tests { let mut target_map = HashMap::new(); target_map.insert( Target::BuiltIn { - triple: s!("aarch64-unknown-linux-gnu"), + triple: "aarch64-unknown-linux-gnu".into(), }, CrossTargetConfig { xargo: Some(false), build_std: None, - image: None, + zig: None, + image: Some(PossibleImage { + reference: ImageReference::Name("test-image".to_owned()), + toolchain: vec![ImagePlatform::from_target( + "aarch64-unknown-linux-musl".into(), + )?], + }), dockerfile: Some(CrossTargetDockerfileConfig { - file: s!("Dockerfile.test"), + file: p!("Dockerfile.test"), context: None, build_args: None, }), - pre_build: Some(PreBuild::Lines(vec![s!("echo 'Hello'")])), + pre_build: Some(PreBuild::Lines(vec![p!("echo 'Hello'")])), runner: None, env: CrossEnvConfig { passthrough: None, - volumes: Some(vec![s!("VOL")]), + volumes: Some(vec![p!("VOL")]), }, }, ); @@ -595,6 +769,16 @@ mod tests { }, xargo: Some(true), build_std: None, + zig: Some(CrossZigConfig { + enable: None, + version: None, + image: Some(PossibleImage { + reference: ImageReference::Name("zig:local".to_owned()), + toolchain: vec![ImagePlatform::from_target( + "aarch64-unknown-linux-gnu".into(), + )?], + }), + }), default_target: None, pre_build: Some(PreBuild::Lines(vec![])), dockerfile: None, @@ -606,6 +790,10 @@ mod tests { xargo = true pre-build = [] + [build.zig.image] + name = "zig:local" + toolchain = ["aarch64-unknown-linux-gnu"] + [build.env] passthrough = [] @@ -613,11 +801,13 @@ mod tests { xargo = false dockerfile = "Dockerfile.test" pre-build = ["echo 'Hello'"] + image.name = "test-image" + image.toolchain = ["aarch64-unknown-linux-musl"] [target.aarch64-unknown-linux-gnu.env] volumes = ["VOL"] "#; - let (parsed_cfg, unused) = CrossToml::parse_from_cross(test_str, &mut m!())?; + let (parsed_cfg, unused) = CrossToml::parse_from_cross_str(test_str, None, &mut m!())?; assert_eq!(parsed_cfg, cfg); assert!(unused.is_empty()); @@ -636,7 +826,7 @@ mod tests { cross = "1.2.3" "#; - let res = CrossToml::parse_from_cargo(test_str, &mut m!())?; + let res = CrossToml::parse_from_cargo_package_str(test_str, &mut m!())?; assert!(res.is_none()); Ok(()) @@ -653,6 +843,7 @@ mod tests { }, build_std: None, xargo: Some(true), + zig: None, default_target: None, pre_build: None, dockerfile: None, @@ -671,7 +862,9 @@ mod tests { xargo = true "#; - if let Some((parsed_cfg, _unused)) = CrossToml::parse_from_cargo(test_str, &mut m!())? { + if let Some((parsed_cfg, _unused)) = + CrossToml::parse_from_cargo_package_str(test_str, &mut m!())? + { assert_eq!(parsed_cfg, cfg); } else { panic!("Parsing result is None"); @@ -680,6 +873,29 @@ mod tests { Ok(()) } + #[test] + pub fn fully_populated_roundtrip() -> Result<()> { + let cfg = r#" + [target.a] + xargo = false + build-std = true + image.name = "local" + image.toolchain = ["x86_64-unknown-linux-gnu"] + dockerfile.file = "Dockerfile" + dockerfile.context = ".." + pre-build = ["sh"] + zig = true + + [target.b] + pre-build = "sh" + zig = "2.17" + "#; + + let (cfg, _) = CrossToml::parse_from_cross_str(cfg, None, &mut m!())?; + serde_json::from_value::(serde_json::to_value(cfg)?)?; + Ok(()) + } + #[test] pub fn merge() -> Result<()> { let cfg1_str = r#" @@ -723,14 +939,14 @@ mod tests { [target.target3] xargo = false build-std = true - image = "test-image3" + image = "@sha256:test-image3" [target.target3.env] volumes = ["VOL3_ARG"] passthrough = ["VAR3"] [build] - build-std = true + build-std = ["core", "alloc"] xargo = false default-target = "aarch64-unknown-linux-gnu" @@ -762,14 +978,14 @@ mod tests { [target.target3] xargo = false build-std = true - image = "test-image3" + image = "@sha256:test-image3" [target.target3.env] volumes = ["VOL3_ARG"] passthrough = ["VAR3"] [build] - build-std = true + build-std = ["core", "alloc"] xargo = false default-target = "aarch64-unknown-linux-gnu" @@ -779,9 +995,9 @@ mod tests { "#; // Parses configs - let (cfg1, _) = CrossToml::parse_from_cross(cfg1_str, &mut m!())?; - let (cfg2, _) = CrossToml::parse_from_cross(cfg2_str, &mut m!())?; - let (cfg_expected, _) = CrossToml::parse_from_cross(cfg_expected_str, &mut m!())?; + let (cfg1, _) = CrossToml::parse_from_cross_str(cfg1_str, None, &mut m!())?; + let (cfg2, _) = CrossToml::parse_from_cross_str(cfg2_str, None, &mut m!())?; + let (cfg_expected, _) = CrossToml::parse_from_cross_str(cfg_expected_str, None, &mut m!())?; // Merges config and compares let cfg_merged = cfg1.merge(cfg2)?; @@ -790,41 +1006,47 @@ mod tests { // need to test individual values. i've broken this down into // tests on values for better error reporting let build = &cfg_expected.build; - assert_eq!(build.build_std, Some(true)); + assert_eq!( + build.build_std, + Some(BuildStd::Crates(vec![ + "core".to_owned(), + "alloc".to_owned() + ])) + ); assert_eq!(build.xargo, Some(false)); - assert_eq!(build.default_target, Some(s!("aarch64-unknown-linux-gnu"))); + assert_eq!(build.default_target, Some(p!("aarch64-unknown-linux-gnu"))); assert_eq!(build.pre_build, None); assert_eq!(build.dockerfile, None); - assert_eq!(build.env.passthrough, Some(vec![s!("VAR3"), s!("VAR4")])); + assert_eq!(build.env.passthrough, Some(vec![p!("VAR3"), p!("VAR4")])); assert_eq!(build.env.volumes, Some(vec![])); let targets = &cfg_expected.targets; let aarch64 = &targets[&Target::new_built_in("aarch64-unknown-linux-gnu")]; - assert_eq!(aarch64.build_std, Some(true)); + assert_eq!(aarch64.build_std, Some(BuildStd::Bool(true))); assert_eq!(aarch64.xargo, Some(false)); - assert_eq!(aarch64.image, Some(s!("test-image1"))); + assert_eq!(aarch64.image, Some(p!("test-image1"))); assert_eq!(aarch64.pre_build, None); assert_eq!(aarch64.dockerfile, None); - assert_eq!(aarch64.env.passthrough, Some(vec![s!("VAR1")])); - assert_eq!(aarch64.env.volumes, Some(vec![s!("VOL1_ARG")])); + assert_eq!(aarch64.env.passthrough, Some(vec![p!("VAR1")])); + assert_eq!(aarch64.env.volumes, Some(vec![p!("VOL1_ARG")])); let target2 = &targets[&Target::new_custom("target2")]; - assert_eq!(target2.build_std, Some(false)); + assert_eq!(target2.build_std, Some(BuildStd::Bool(false))); assert_eq!(target2.xargo, Some(false)); - assert_eq!(target2.image, Some(s!("test-image2-precedence"))); + assert_eq!(target2.image, Some(p!("test-image2-precedence"))); assert_eq!(target2.pre_build, None); assert_eq!(target2.dockerfile, None); - assert_eq!(target2.env.passthrough, Some(vec![s!("VAR2_PRECEDENCE")])); - assert_eq!(target2.env.volumes, Some(vec![s!("VOL2_ARG_PRECEDENCE")])); + assert_eq!(target2.env.passthrough, Some(vec![p!("VAR2_PRECEDENCE")])); + assert_eq!(target2.env.volumes, Some(vec![p!("VOL2_ARG_PRECEDENCE")])); let target3 = &targets[&Target::new_custom("target3")]; - assert_eq!(target3.build_std, Some(true)); + assert_eq!(target3.build_std, Some(BuildStd::Bool(true))); assert_eq!(target3.xargo, Some(false)); - assert_eq!(target3.image, Some(s!("test-image3"))); + assert_eq!(target3.image, Some(p!("@sha256:test-image3"))); assert_eq!(target3.pre_build, None); assert_eq!(target3.dockerfile, None); - assert_eq!(target3.env.passthrough, Some(vec![s!("VAR3")])); - assert_eq!(target3.env.volumes, Some(vec![s!("VOL3_ARG")])); + assert_eq!(target3.env.passthrough, Some(vec![p!("VAR3")])); + assert_eq!(target3.env.volumes, Some(vec![p!("VOL3_ARG")])); Ok(()) } @@ -838,11 +1060,14 @@ mod tests { [build] pre-build = ["echo Hello World"] "#; - let (toml, unused) = CrossToml::parse_from_cross(toml_str, &mut m!())?; + let (toml, unused) = CrossToml::parse_from_cross_str(toml_str, None, &mut m!())?; assert!(unused.is_empty()); assert!(matches!( toml.pre_build(&Target::new_built_in("aarch64-unknown-linux-gnu")), - (Some(&PreBuild::Lines(_)), Some(&PreBuild::Single { .. })) + ConfVal { + build: Some(&PreBuild::Lines(_)), + target: Some(&PreBuild::Single { .. }), + }, )); Ok(()) } diff --git a/src/docker/build.rs b/src/docker/build.rs new file mode 100644 index 000000000..9a98c2839 --- /dev/null +++ b/src/docker/build.rs @@ -0,0 +1,121 @@ +use std::env; +use std::process::Command; +use std::str::FromStr; + +use super::engine::Engine; +use crate::errors::*; +use crate::shell::Verbosity; + +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub enum Progress { + Plain, + Auto, + Tty, +} + +impl FromStr for Progress { + type Err = eyre::ErrReport; + + fn from_str(progress: &str) -> Result { + Ok(match progress { + "plain" => Progress::Plain, + "auto" => Progress::Auto, + "tty" => Progress::Tty, + s => eyre::bail!("unexpect progress type: expected plain, auto, or tty and got {s}"), + }) + } +} + +impl From for &str { + fn from(progress: Progress) -> Self { + match progress { + Progress::Plain => "plain", + Progress::Auto => "auto", + Progress::Tty => "tty", + } + } +} + +pub trait BuildCommandExt { + fn invoke_build_command(&mut self) -> &mut Self; + fn progress(&mut self, progress: Option) -> Result<&mut Self>; + fn verbose(&mut self, verbosity: Verbosity) -> &mut Self; + fn disable_scan_suggest(&mut self) -> &mut Self; + fn cross_labels(&mut self, target: &str, platform: &str) -> &mut Self; +} + +impl BuildCommandExt for Command { + fn invoke_build_command(&mut self) -> &mut Self { + match Engine::has_buildkit() { + true => self.args(["buildx", "build"]), + false => self.arg("build"), + } + } + + fn progress(&mut self, progress: Option) -> Result<&mut Self> { + let progress: Progress = match progress { + None => env::var("CROSS_BUILD_PROGRESS") + .as_deref() + .unwrap_or("auto") + .parse()?, + Some(progress) => progress, + }; + Ok(self.args(["--progress", progress.into()])) + } + + fn verbose(&mut self, verbosity: Verbosity) -> &mut Self { + match verbosity { + Verbosity::Verbose(2..) => self.args(["--build-arg", "VERBOSE=1"]), + _ => self, + } + } + + fn disable_scan_suggest(&mut self) -> &mut Self { + self.env("DOCKER_SCAN_SUGGEST", "false") + } + + fn cross_labels(&mut self, target: &str, platform: &str) -> &mut Self { + self.args([ + "--label", + &format!("{}.for-cross-target={target}", crate::CROSS_LABEL_DOMAIN,), + ]); + self.args([ + "--label", + &format!("{}.runs-with={platform}", crate::CROSS_LABEL_DOMAIN,), + ]) + } +} + +pub trait BuildResultExt { + fn engine_warning(self, engine: &Engine) -> Result<()>; + fn buildkit_warning(self) -> Result<()>; +} + +impl BuildResultExt for Result<()> { + fn engine_warning(self, engine: &Engine) -> Result<()> { + self.with_warning(|| { + format!( + "call to {} failed", + engine + .path + .file_name() + .and_then(|s| s.to_str()) + .map_or_else(|| "container engine", |s| s) + ) + }) + } + + fn buildkit_warning(mut self) -> Result<()> { + if Engine::has_buildkit() { + self = self + .suggestion("is `buildx` available for the container engine?") + .with_note(|| { + format!( + "disable the `buildkit` dependency optionally with `{}=1`", + Engine::CROSS_CONTAINER_ENGINE_NO_BUILDKIT_ENV + ) + }); + } + self + } +} diff --git a/src/docker/custom.rs b/src/docker/custom.rs index 31e2df6db..a58b2370c 100644 --- a/src/docker/custom.rs +++ b/src/docker/custom.rs @@ -2,14 +2,17 @@ use std::io::Write; use std::path::PathBuf; use std::str::FromStr; -use crate::docker::{DockerOptions, DockerPaths}; +use crate::docker::{self, DockerOptions, DockerPaths}; use crate::shell::MessageInfo; -use crate::{docker, CargoMetadata, Target}; use crate::{errors::*, file, CommandExt, ToUtf8}; +use crate::{CargoMetadata, TargetTriple}; -use super::{image_name, parse_docker_opts, path_hash}; +use super::{ + create_target_dir, get_image_name, path_hash, BuildCommandExt, BuildResultExt, Engine, + ImagePlatform, +}; -pub const CROSS_CUSTOM_DOCKERFILE_IMAGE_PREFIX: &str = "cross-custom-"; +pub const CROSS_CUSTOM_DOCKERFILE_IMAGE_PREFIX: &str = "localhost/cross-rs/cross-custom-"; #[derive(Debug, PartialEq, Eq)] pub enum Dockerfile<'a> { @@ -17,13 +20,15 @@ pub enum Dockerfile<'a> { path: &'a str, context: Option<&'a str>, name: Option<&'a str>, + runs_with: &'a ImagePlatform, }, Custom { content: String, + runs_with: &'a ImagePlatform, }, } -#[derive(Debug, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)] pub enum PreBuild { /// A path to a file to copy or a single line to `RUN` if line comes from env Single { line: String, env: bool }, @@ -31,6 +36,21 @@ pub enum PreBuild { Lines(Vec), } +impl serde::Serialize for PreBuild { + fn serialize(&self, serializer: S) -> Result { + match self { + PreBuild::Single { line, .. } => serializer.serialize_str(line), + PreBuild::Lines(lines) => { + use serde::ser::SerializeSeq; + let mut seq = serializer.serialize_seq(Some(lines.len()))?; + for line in lines { + seq.serialize_element(line)?; + } + seq.end() + } + } + } +} impl FromStr for PreBuild { type Err = std::convert::Infallible; @@ -68,17 +88,16 @@ impl<'a> Dockerfile<'a> { build_args: impl IntoIterator, impl AsRef)>, msg_info: &mut MessageInfo, ) -> Result { - let mut docker_build = docker::subcommand(&options.engine, "build"); - docker_build.current_dir(paths.host_root()); - docker_build.env("DOCKER_SCAN_SUGGEST", "false"); - docker_build.args([ - "--label", - &format!( - "{}.for-cross-target={}", - crate::CROSS_LABEL_DOMAIN, - options.target, - ), - ]); + let uses_zig = options.command_variant.uses_zig(); + let mut docker_build = options.engine.command(); + docker_build.invoke_build_command(); + docker_build.disable_scan_suggest(); + self.runs_with() + .specify_platform(&options.engine, &mut docker_build); + + docker_build.progress(None)?; + docker_build.verbose(msg_info.verbosity); + docker_build.cross_labels(options.target.triple(), self.runs_with().target.triple()); docker_build.args([ "--label", @@ -89,25 +108,26 @@ impl<'a> Dockerfile<'a> { ), ]); - let image_name = self.image_name(&options.target, &paths.metadata)?; + let image_name = self.image_name(options.target.target(), &paths.metadata)?; docker_build.args(["--tag", &image_name]); for (key, arg) in build_args { docker_build.args(["--build-arg", &format!("{}={}", key.as_ref(), arg.as_ref())]); } - if let Some(arch) = options.target.deb_arch() { + if let Some(arch) = options.target.target().deb_arch() { docker_build.args(["--build-arg", &format!("CROSS_DEB_ARCH={arch}")]); } let path = match self { Dockerfile::File { path, .. } => PathBuf::from(path), - Dockerfile::Custom { content } => { - let path = paths + Dockerfile::Custom { content, .. } => { + let target_dir = paths .metadata .target_directory - .join(options.target.to_string()) - .join(format!("Dockerfile.{}-custom", &options.target)); + .join(options.target.to_string()); + create_target_dir(&target_dir)?; + let path = target_dir.join(format!("Dockerfile.{}-custom", &options.target)); { let mut file = file::write_file(&path, true)?; file.write_all(content.as_bytes())?; @@ -117,7 +137,9 @@ impl<'a> Dockerfile<'a> { }; if matches!(self, Dockerfile::File { .. }) { - if let Ok(cross_base_image) = self::image_name(&options.config, &options.target) { + if let Ok(cross_base_image) = + self::get_image_name(&options.config, &options.target, uses_zig) + { docker_build.args([ "--build-arg", &format!("CROSS_BASE_IMAGE={cross_base_image}"), @@ -125,23 +147,44 @@ impl<'a> Dockerfile<'a> { } } + // note that this is always relative to the PWD: if we have + // `$workspace_root/Dockerfile`, then running a build + // `PWD=$workspace_root/src/ cross build` would require + // the Dockerfile path to be specified as `../Dockerfile`. docker_build.args(["--file".into(), path]); - if let Ok(build_opts) = std::env::var("CROSS_BUILD_OPTS") { - // FIXME: Use shellwords - docker_build.args(parse_docker_opts(&build_opts)?); + if let Some(build_opts) = options.config.build_opts() { + docker_build.args(Engine::parse_opts(&build_opts)?); } + + let has_output = options.config.build_opts().map_or(false, |opts| { + opts.contains("--load") || opts.contains("--output") + }); + if options.engine.kind.is_docker() && !has_output { + docker_build.args(["--output", "type=docker"]); + }; + if let Some(context) = self.context() { - docker_build.arg(&context); + docker_build.arg(context); } else { docker_build.arg(paths.host_root()); } - docker_build.run(msg_info, true)?; + // FIXME: Inspect the error message, while still inheriting stdout on verbose mode to + // conditionally apply this suggestion and note. This could then inspect if a help string is emitted, + // if the daemon is not running, etc. + docker_build + .run(msg_info, true) + .engine_warning(&options.engine) + .buildkit_warning()?; Ok(image_name) } - pub fn image_name(&self, target_triple: &Target, metadata: &CargoMetadata) -> Result { + pub fn image_name( + &self, + target_triple: &TargetTriple, + metadata: &CargoMetadata, + ) -> Result { match self { Dockerfile::File { name: Some(name), .. @@ -150,7 +193,7 @@ impl<'a> Dockerfile<'a> { "{}{package_name}:{target_triple}-{path_hash}{custom}", CROSS_CUSTOM_DOCKERFILE_IMAGE_PREFIX, package_name = docker_package_name(metadata), - path_hash = path_hash(&metadata.workspace_root)?, + path_hash = path_hash(&metadata.workspace_root, docker::PATH_HASH_SHORT)?, custom = if matches!(self, Self::File { .. }) { "" } else { @@ -169,6 +212,12 @@ impl<'a> Dockerfile<'a> { _ => None, } } + fn runs_with(&self) -> &ImagePlatform { + match self { + Dockerfile::File { runs_with, .. } => runs_with, + Dockerfile::Custom { runs_with, .. } => runs_with, + } + } } fn docker_package_name(metadata: &CargoMetadata) -> String { @@ -198,7 +247,7 @@ fn docker_tag_name(file_name: &str) -> String { let mut consecutive_underscores = 0; for c in file_name.chars() { match c { - 'a'..='z' | '.' | '-' => { + 'a'..='z' | '0'..='9' | '.' | '-' => { consecutive_underscores = 0; result.push(c); } @@ -217,7 +266,12 @@ fn docker_tag_name(file_name: &str) -> String { } } - // in case all characters were invalid, use a non-empty filename + // in case our result ends in an invalid last char `-` or `.` + // we remove + result = result.trim_end_matches(&['.', '-']).to_owned(); + + // in case all characters were invalid or we had all non-ASCII + // characters followed by a `-` or `.`, we use a non-empty filename if result.is_empty() { result = "empty".to_owned(); } @@ -251,5 +305,8 @@ mod tests { docker_tag_name("pAcKaGe---test.name"), s!("package---test.name") ); + + assert_eq!(docker_tag_name("foo-123"), s!("foo-123")); + assert_eq!(docker_tag_name("foo-123-"), s!("foo-123")); } } diff --git a/src/docker/engine.rs b/src/docker/engine.rs index 728f41274..0e42cb3f1 100644 --- a/src/docker/engine.rs +++ b/src/docker/engine.rs @@ -3,9 +3,11 @@ use std::path::{Path, PathBuf}; use std::process::Command; use crate::config::bool_from_envvar; -use crate::errors::*; use crate::extensions::CommandExt; use crate::shell::MessageInfo; +use crate::{errors::*, OutputExt}; + +use super::{Architecture, ContainerOs}; pub const DOCKER: &str = "docker"; pub const PODMAN: &str = "podman"; @@ -15,18 +17,61 @@ pub enum EngineType { Docker, Podman, PodmanRemote, + Nerdctl, Other, } +impl EngineType { + /// Returns `true` if the engine type is [`Podman`](Self::Podman) or [`PodmanRemote`](Self::PodmanRemote). + #[must_use] + pub const fn is_podman(&self) -> bool { + matches!(self, Self::Podman | Self::PodmanRemote) + } + + /// Returns `true` if the engine type is [`Docker`](EngineType::Docker). + #[must_use] + pub const fn is_docker(&self) -> bool { + matches!(self, Self::Docker) + } + + /// Returns `true` if the build command supports the `--output` flag. + #[must_use] + pub const fn supports_output_flag(&self) -> bool { + !matches!(self, Self::Other) + } + + /// Returns `true` if the build command supports the `--pull` flag. + #[must_use] + pub const fn supports_pull_flag(&self) -> bool { + !matches!(self, Self::Nerdctl | Self::Other) + } + + /// Returns `true` if the build command supports the `--cache-from type=` key. + /// + /// Some container engines, especially podman, do not support the `type` + /// key of `--cache-from` during the image build steps. They also do + /// not support any tags for the `--cache-from` steps either. See: + /// + #[must_use] + pub const fn supports_cache_from_type(&self) -> bool { + matches!(self, Self::Docker | Self::Nerdctl) + } +} + #[derive(Clone, Debug, PartialEq, Eq)] pub struct Engine { pub kind: EngineType, pub path: PathBuf, pub in_docker: bool, + pub arch: Option, + pub os: Option, pub is_remote: bool, + pub is_rootless: bool, } impl Engine { + pub const CROSS_CONTAINER_ENGINE_NO_BUILDKIT_ENV: &'static str = + "CROSS_CONTAINER_ENGINE_NO_BUILDKIT"; pub fn new( in_docker: Option, is_remote: Option, @@ -45,17 +90,21 @@ impl Engine { is_remote: Option, msg_info: &mut MessageInfo, ) -> Result { - let kind = get_engine_type(&path, msg_info)?; let in_docker = match in_docker { Some(v) => v, None => Self::in_docker(msg_info)?, }; + let (kind, arch, os) = get_engine_info(&path, msg_info)?; + let is_rootless = is_rootless(kind).unwrap_or_else(|| is_docker_rootless(&path, msg_info)); let is_remote = is_remote.unwrap_or_else(Self::is_remote); Ok(Engine { path, kind, in_docker, + arch, + os, is_remote, + is_rootless, }) } @@ -88,27 +137,194 @@ impl Engine { .map(|s| bool_from_envvar(&s)) .unwrap_or_default() } + + #[must_use] + pub fn has_buildkit() -> bool { + !env::var(Self::CROSS_CONTAINER_ENGINE_NO_BUILDKIT_ENV) + .map(|x| bool_from_envvar(&x)) + .unwrap_or_default() + } +} + +fn is_rootless(kind: EngineType) -> Option { + env::var("CROSS_ROOTLESS_CONTAINER_ENGINE") + .ok() + .and_then(|s| match s.as_ref() { + "auto" => None, + b => Some(bool_from_envvar(b)), + }) + .or_else(|| (!kind.is_docker()).then_some(true)) +} + +#[must_use] +fn is_docker_rootless(ce: &Path, msg_info: &mut MessageInfo) -> bool { + let mut cmd = Command::new(ce); + cmd.args(["info", "-f", "{{.SecurityOptions}}"]) + .run_and_get_output(msg_info) + .ok() + .and_then(|cmd| cmd.stdout().ok()) + .map(|out| { + out.to_lowercase() + .replace(['[', ' ', ']'], ",") + .contains(",name=rootless,") + }) + .unwrap_or_default() +} + +#[test] +fn various_is_rootless_configs() { + let var = "CROSS_ROOTLESS_CONTAINER_ENGINE"; + let old = env::var(var); + env::remove_var(var); + + assert!(!is_rootless(EngineType::Docker).unwrap_or(false)); + assert!(is_rootless(EngineType::Docker).unwrap_or(true)); + + assert_eq!(is_rootless(EngineType::Docker), None); + assert_eq!(is_rootless(EngineType::Podman), Some(true)); + assert_eq!(is_rootless(EngineType::PodmanRemote), Some(true)); + assert_eq!(is_rootless(EngineType::Other), Some(true)); + + env::set_var(var, "0"); + assert_eq!(is_rootless(EngineType::Docker), Some(false)); + assert_eq!(is_rootless(EngineType::Podman), Some(false)); + assert_eq!(is_rootless(EngineType::PodmanRemote), Some(false)); + assert_eq!(is_rootless(EngineType::Other), Some(false)); + + env::set_var(var, "1"); + assert_eq!(is_rootless(EngineType::Docker), Some(true)); + assert_eq!(is_rootless(EngineType::Podman), Some(true)); + assert_eq!(is_rootless(EngineType::PodmanRemote), Some(true)); + assert_eq!(is_rootless(EngineType::Other), Some(true)); + + env::set_var(var, "auto"); + assert_eq!(is_rootless(EngineType::Docker), None); + assert_eq!(is_rootless(EngineType::Podman), Some(true)); + assert_eq!(is_rootless(EngineType::PodmanRemote), Some(true)); + assert_eq!(is_rootless(EngineType::Other), Some(true)); + + match old { + Ok(v) => env::set_var(var, v), + Err(_) => env::remove_var(var), + } } // determine if the container engine is docker. this fixes issues with // any aliases (#530), and doesn't fail if an executable suffix exists. -fn get_engine_type(ce: &Path, msg_info: &mut MessageInfo) -> Result { - let stdout = Command::new(ce) +fn get_engine_info( + ce: &Path, + msg_info: &mut MessageInfo, +) -> Result<(EngineType, Option, Option)> { + let stdout_help = Command::new(ce) .arg("--help") .run_and_get_stdout(msg_info)? .to_lowercase(); - if stdout.contains("podman-remote") { - Ok(EngineType::PodmanRemote) - } else if stdout.contains("podman") { - Ok(EngineType::Podman) - } else if stdout.contains("docker") && !stdout.contains("emulate") { - Ok(EngineType::Docker) + let kind = if stdout_help.contains("podman-remote") { + EngineType::PodmanRemote + } else if stdout_help.contains("podman") { + EngineType::Podman + } else if stdout_help.contains("nerdctl") { + EngineType::Nerdctl + } else if stdout_help.contains("docker") && !stdout_help.contains("emulate") { + EngineType::Docker } else { - Ok(EngineType::Other) + EngineType::Other + }; + + // this can fail: podman can give partial output + // linux,,,Error: template: version:1:15: executing "version" at <.Arch>: + // can't evaluate field Arch in type *define.Version + let os_arch_server = engine_info( + ce, + &["version", "-f", "{{ .Server.Os }},,,{{ .Server.Arch }}"], + ",,,", + msg_info, + ); + + let (os_arch_other, os_arch_server_result) = match os_arch_server { + Ok(Some(os_arch)) => (Ok(Some(os_arch)), None), + result => { + if kind.is_podman() { + (get_podman_info(ce, msg_info), result.err()) + } else { + (get_custom_info(ce, msg_info), result.err()) + } + } + }; + + let os_arch = match (os_arch_other, os_arch_server_result) { + (Ok(os_arch), _) => os_arch, + (Err(e), Some(server_err)) => return Err(server_err.to_section_report().with_error(|| e)), + (Err(e), None) => return Err(e.to_section_report()), + }; + + let (os, arch) = os_arch.map_or(<_>::default(), |(os, arch)| (Some(os), Some(arch))); + Ok((kind, arch, os)) +} + +#[derive(Debug, thiserror::Error)] +pub enum EngineInfoError { + #[error(transparent)] + Eyre(eyre::Report), + #[error("could not get os and arch")] + CommandError(#[from] CommandError), +} + +impl EngineInfoError { + pub fn to_section_report(self) -> eyre::Report { + match self { + EngineInfoError::Eyre(e) => e, + EngineInfoError::CommandError(e) => { + e.to_section_report().wrap_err("could not get os and arch") + } + } } } +/// Get engine info +fn engine_info( + ce: &Path, + args: &[&str], + sep: &str, + msg_info: &mut MessageInfo, +) -> Result, EngineInfoError> { + let mut cmd = Command::new(ce); + cmd.args(args); + let out = cmd + .run_and_get_output(msg_info) + .map_err(EngineInfoError::Eyre)?; + + cmd.status_result(msg_info, out.status, Some(&out))?; + + out.stdout()? + .to_lowercase() + .trim() + .split_once(sep) + .map(|(os, arch)| -> Result<_> { Ok((ContainerOs::new(os)?, Architecture::new(arch)?)) }) + .transpose() + .map_err(EngineInfoError::Eyre) +} + +fn get_podman_info( + ce: &Path, + msg_info: &mut MessageInfo, +) -> Result, EngineInfoError> { + engine_info(ce, &["info", "-f", "{{ .Version.OsArch }}"], "/", msg_info) +} + +fn get_custom_info( + ce: &Path, + msg_info: &mut MessageInfo, +) -> Result, EngineInfoError> { + engine_info( + ce, + &["version", "-f", "{{ .Client.Os }},,,{{ .Client.Arch }}"], + ",,,", + msg_info, + ) +} + pub fn get_container_engine() -> Result { if let Ok(ce) = env::var("CROSS_CONTAINER_ENGINE") { which::which(ce) diff --git a/src/docker/image.rs b/src/docker/image.rs new file mode 100644 index 000000000..3d3c15bca --- /dev/null +++ b/src/docker/image.rs @@ -0,0 +1,508 @@ +use std::str::FromStr; + +use serde::{Deserialize, Serialize}; + +use crate::{ + docker::{CROSS_IMAGE, DEFAULT_IMAGE_VERSION}, + errors::*, + shell::MessageInfo, + TargetTriple, +}; + +use super::Engine; + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +pub struct Image { + pub name: String, + // The toolchain triple the image is built for + pub platform: ImagePlatform, +} + +impl std::fmt::Display for Image { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.name) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +pub struct PossibleImage { + #[serde(rename = "name")] + pub reference: ImageReference, + // The toolchain triple the image is built for + pub toolchain: Vec, +} + +impl PossibleImage { + pub fn to_definite_with(&self, engine: &Engine, msg_info: &mut MessageInfo) -> Result { + let ImageReference::Name(name) = self.reference.clone() else { + eyre::bail!("cannot make definite Image from unqualified PossibleImage"); + }; + + if self.toolchain.is_empty() { + Ok(Image { + name, + platform: ImagePlatform::DEFAULT, + }) + } else { + let platform = if self.toolchain.len() == 1 { + self.toolchain.first().expect("should contain at least one") + } else { + let same_arch = self + .toolchain + .iter() + .filter(|platform| { + &platform.architecture + == engine.arch.as_ref().unwrap_or(&Architecture::Amd64) + }) + .collect::>(); + + if same_arch.len() == 1 { + // pick the platform with the same architecture + same_arch.first().expect("should contain one element") + } else if let Some(platform) = same_arch + .iter() + .find(|platform| &platform.os == engine.os.as_ref().unwrap_or(&Os::Linux)) + { + *platform + } else if let Some(platform) = + same_arch.iter().find(|platform| platform.os == Os::Linux) + { + // container engine should be fine with linux + platform + } else { + let platform = self + .toolchain + .first() + .expect("should be at least one platform"); + // FIXME: Don't throw away + msg_info.warn( + format_args!("could not determine what toolchain to use for image, defaulting to `{}`", platform.target), + ).ok(); + platform + } + }; + Ok(Image { + platform: platform.clone(), + name, + }) + } + } +} + +impl> From for PossibleImage { + fn from(s: T) -> Self { + PossibleImage { + reference: s.as_ref().to_owned().into(), + toolchain: vec![], + } + } +} + +impl FromStr for PossibleImage { + type Err = std::convert::Infallible; + + fn from_str(s: &str) -> Result { + Ok(s.into()) + } +} + +impl std::fmt::Display for PossibleImage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.reference.get()) + } +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)] +#[serde(from = "String", untagged)] +pub enum ImageReference { + /// Partially qualified reference, with or without tag/digest + Name(String), + /// Unqualified reference, only a tag or digest + Identifier(String), + /// Unqualified reference, only a subtarget + Subtarget(String), +} + +impl ImageReference { + pub fn get(&self) -> &str { + match self { + Self::Name(s) => s, + Self::Identifier(s) => s, + Self::Subtarget(s) => s, + } + } + + pub fn ensure_qualified(&mut self, target_name: &str) { + let image_name = match self { + Self::Name(_) => return, + Self::Identifier(id) => { + format!("{CROSS_IMAGE}/{target_name}{id}") + } + Self::Subtarget(sub) => { + format!("{CROSS_IMAGE}/{target_name}:{DEFAULT_IMAGE_VERSION}{sub}") + } + }; + + *self = Self::Name(image_name); + } +} + +impl From for ImageReference { + fn from(s: String) -> Self { + if s.starts_with('-') { + Self::Subtarget(s) + } else if s.starts_with(':') || s.starts_with('@') { + Self::Identifier(s) + } else { + Self::Name(s) + } + } +} + +/// The architecture/platform to use in the image +/// +/// +#[derive(Debug, Clone, PartialEq, Eq, serde::Deserialize)] +#[serde(try_from = "String")] +pub struct ImagePlatform { + /// CPU architecture, x86_64, aarch64 etc + pub architecture: Architecture, + /// The OS, i.e linux, windows, darwin + pub os: Os, + /// The platform variant, i.e v8, v7, v6 etc + pub variant: Option, + pub target: TargetTriple, +} + +impl ImagePlatform { + pub const DEFAULT: Self = ImagePlatform::from_const_target(TargetTriple::DEFAULT); + pub const X86_64_UNKNOWN_LINUX_GNU: Self = + ImagePlatform::from_const_target(TargetTriple::X86_64UnknownLinuxGnu); + pub const AARCH64_UNKNOWN_LINUX_GNU: Self = + ImagePlatform::from_const_target(TargetTriple::Aarch64UnknownLinuxGnu); + + /// Get a representative version of this platform specifier for usage in `--platform` + /// + /// Prefer using [`ImagePlatform::specify_platform`] which will supply the flag if needed + pub fn docker_platform(&self) -> String { + if let Some(variant) = &self.variant { + format!("{}/{}/{variant}", self.os, self.architecture) + } else { + format!("{}/{}", self.os, self.architecture) + } + } +} + +impl Default for ImagePlatform { + fn default() -> ImagePlatform { + ImagePlatform::DEFAULT + } +} + +impl TryFrom for ImagePlatform { + type Error = ::Err; + + fn try_from(value: String) -> Result { + value.parse() + } +} + +impl Serialize for ImagePlatform { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&format!("{}={}", self.docker_platform(), self.target)) + } +} + +impl std::str::FromStr for ImagePlatform { + type Err = eyre::Report; + // [os/arch[/variant]=]toolchain + fn from_str(s: &str) -> Result { + use serde::de::{ + value::{Error as SerdeError, StrDeserializer}, + IntoDeserializer, + }; + if let Some((platform, toolchain)) = s.split_once('=') { + let image_toolchain = toolchain.into(); + let (os, arch, variant) = if let Some((os, rest)) = platform.split_once('/') { + let os: StrDeserializer<'_, SerdeError> = os.into_deserializer(); + let (arch, variant) = if let Some((arch, variant)) = rest.split_once('/') { + let arch: StrDeserializer<'_, SerdeError> = arch.into_deserializer(); + (arch, Some(variant)) + } else { + let arch: StrDeserializer<'_, SerdeError> = rest.into_deserializer(); + (arch, None) + }; + (os, arch, variant) + } else { + eyre::bail!("invalid platform specified") + }; + Ok(ImagePlatform { + architecture: Architecture::deserialize(arch)?, + os: Os::deserialize(os)?, + variant: variant.map(ToOwned::to_owned), + target: image_toolchain, + }) + } else { + Ok(ImagePlatform::from_target(s.into()) + .wrap_err_with(|| format!("could not map `{s}` to a platform"))?) + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum Architecture { + I386, + #[serde(alias = "x86_64")] + Amd64, + #[serde(alias = "armv7")] + Arm, + #[serde(alias = "aarch64")] + Arm64, + Mips, + Mips64, + Mips64Le, + MipsLe, + #[serde(alias = "powerpc64")] + Ppc64, + Ppc64Le, + #[serde(alias = "riscv64gc")] + Riscv64, + S390x, + Wasm, + #[serde(alias = "loongarch64")] + LoongArch64, +} + +impl Architecture { + pub fn from_target(target: &TargetTriple) -> Result { + let arch = target + .triple() + .split_once('-') + .ok_or_else(|| eyre::eyre!("malformed target"))? + .0; + Self::new(arch) + } + + pub fn new(s: &str) -> Result { + use serde::de::IntoDeserializer; + + Self::deserialize(<&str as IntoDeserializer>::into_deserializer(s)) + .wrap_err_with(|| format!("architecture {s} is not supported")) + } +} + +impl std::fmt::Display for Architecture { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.serialize(f) + } +} + +// Supported Oses are on +// https://rust-lang.github.io/rustup-components-history/aarch64-unknown-linux-gnu.html +// where rust, rustc and cargo is available (e.g rustup toolchain add works) +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum Os { + Android, + #[serde(alias = "macos")] + Darwin, + Freebsd, + Illumos, + Linux, + Netbsd, + Solaris, + Windows, + // Aix + // Dragonfly + // Ios + // Js + // Openbsd + // Plan9 +} + +impl Os { + pub fn from_target(target: &TargetTriple) -> Result { + let mut iter = target.triple().rsplit('-'); + Ok( + match ( + iter.next().ok_or_else(|| eyre::eyre!("malformed target"))?, + iter.next().ok_or_else(|| eyre::eyre!("malformed target"))?, + ) { + ("darwin", _) => Os::Darwin, + ("freebsd", _) => Os::Freebsd, + ("netbsd", _) => Os::Netbsd, + ("illumos", _) => Os::Illumos, + ("solaris", _) => Os::Solaris, + // android targets also set linux, so must occur first + ("android", _) => Os::Android, + (_, "linux") => Os::Linux, + (_, "windows") => Os::Windows, + (abi, system) => { + eyre::bail!("unsupported os in target, abi: {abi:?}, system: {system:?} ") + } + }, + ) + } + + pub fn new(s: &str) -> Result { + use serde::de::IntoDeserializer; + + Self::deserialize(<&str as IntoDeserializer>::into_deserializer(s)) + .wrap_err_with(|| format!("architecture {s} is not supported")) + } +} + +impl std::fmt::Display for Os { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.serialize(f) + } +} + +impl ImagePlatform { + pub fn from_target(target: TargetTriple) -> Result { + match target { + target @ TargetTriple::Other(_) => { + let os = Os::from_target(&target) + .wrap_err("could not determine os in target triplet")?; + let architecture = Architecture::from_target(&target) + .wrap_err("could not determine architecture in target triplet")?; + let variant = match target.triple() { + // v7 is default for arm architecture, we still specify it for clarity + armv7 if armv7.starts_with("armv7-") => Some("v7".to_owned()), + arm if arm.starts_with("arm-") => Some("v6".to_owned()), + _ => None, + }; + Ok(ImagePlatform { + architecture, + os, + variant, + target, + }) + } + target => Ok(Self::from_const_target(target)), + } + } + #[track_caller] + pub const fn from_const_target(target: TargetTriple) -> Self { + match target { + TargetTriple::Other(_) => { + unimplemented!() + } + TargetTriple::X86_64AppleDarwin => ImagePlatform { + architecture: Architecture::Amd64, + os: Os::Darwin, + variant: None, + target, + }, + TargetTriple::Aarch64AppleDarwin => ImagePlatform { + architecture: Architecture::Arm64, + os: Os::Linux, + variant: None, + target, + }, + TargetTriple::X86_64UnknownLinuxGnu => ImagePlatform { + architecture: Architecture::Amd64, + os: Os::Linux, + variant: None, + target, + }, + TargetTriple::Aarch64UnknownLinuxGnu => ImagePlatform { + architecture: Architecture::Arm64, + os: Os::Linux, + variant: None, + target, + }, + TargetTriple::X86_64UnknownLinuxMusl => ImagePlatform { + architecture: Architecture::Amd64, + os: Os::Linux, + variant: None, + target, + }, + TargetTriple::Aarch64UnknownLinuxMusl => ImagePlatform { + architecture: Architecture::Arm64, + os: Os::Linux, + variant: None, + target, + }, + TargetTriple::X86_64PcWindowsMsvc => ImagePlatform { + architecture: Architecture::Amd64, + os: Os::Windows, + variant: None, + target, + }, + } + } + + pub fn specify_platform(&self, engine: &Engine, cmd: &mut std::process::Command) { + if self.variant.is_none() + && Some(&self.architecture) == engine.arch.as_ref() + && Some(&self.os) == engine.os.as_ref() + { + } else { + cmd.args(["--platform", &self.docker_platform()]); + } + } +} + +#[cfg(test)] +pub mod tests { + use super::*; + + macro_rules! t { + ($t:literal) => { + TargetTriple::from($t) + }; + } + + macro_rules! arch { + ($t:literal) => { + Architecture::from_target(&TargetTriple::from($t)) + }; + } + + #[test] + fn architecture_from_target() -> Result<()> { + assert_eq!(arch!("x86_64-apple-darwin")?, Architecture::Amd64); + assert_eq!(arch!("arm-unknown-linux-gnueabihf")?, Architecture::Arm); + assert_eq!(arch!("armv7-unknown-linux-gnueabihf")?, Architecture::Arm); + assert_eq!(arch!("aarch64-unknown-linux-gnu")?, Architecture::Arm64); + assert_eq!(arch!("aarch64-unknown-freebsd")?, Architecture::Arm64); + assert_eq!( + arch!("loongarch64-unknown-linux-gnu")?, + Architecture::LoongArch64 + ); + assert_eq!(arch!("mips-unknown-linux-gnu")?, Architecture::Mips); + assert_eq!( + arch!("mips64-unknown-linux-gnuabi64")?, + Architecture::Mips64 + ); + assert_eq!( + arch!("mips64le-unknown-linux-gnuabi64")?, + Architecture::Mips64Le + ); + assert_eq!(arch!("mipsle-unknown-linux-gnu")?, Architecture::MipsLe); + Ok(()) + } + + #[test] + fn os_from_target() -> Result<()> { + assert_eq!(Os::from_target(&t!("x86_64-apple-darwin"))?, Os::Darwin); + assert_eq!(Os::from_target(&t!("x86_64-unknown-freebsd"))?, Os::Freebsd); + assert_eq!( + Os::from_target(&t!("aarch64-unknown-freebsd"))?, + Os::Freebsd + ); + assert_eq!( + Os::from_target(&t!("loongarch64-unknown-linux-gnu"))?, + Os::Linux + ); + assert_eq!(Os::from_target(&t!("x86_64-unknown-netbsd"))?, Os::Netbsd); + assert_eq!(Os::from_target(&t!("sparcv9-sun-solaris"))?, Os::Solaris); + assert_eq!(Os::from_target(&t!("sparcv9-sun-illumos"))?, Os::Illumos); + assert_eq!(Os::from_target(&t!("aarch64-linux-android"))?, Os::Android); + assert_eq!(Os::from_target(&t!("x86_64-unknown-linux-gnu"))?, Os::Linux); + assert_eq!(Os::from_target(&t!("x86_64-pc-windows-msvc"))?, Os::Windows); + Ok(()) + } +} diff --git a/src/docker/local.rs b/src/docker/local.rs index 2d86d3d07..f7d0507d7 100644 --- a/src/docker/local.rs +++ b/src/docker/local.rs @@ -1,5 +1,7 @@ use std::io; -use std::process::ExitStatus; +use std::path::Path; +use std::process::{Command, ExitStatus}; +use std::sync::atomic::Ordering; use super::shared::*; use crate::errors::Result; @@ -8,79 +10,165 @@ use crate::file::{PathExt, ToUtf8}; use crate::shell::{MessageInfo, Stream}; use eyre::Context; +// NOTE: host path must be absolute +fn mount( + docker: &mut Command, + host_path: &Path, + absolute_path: &Path, + prefix: &str, + selinux: &str, +) -> Result<()> { + let mount_path = absolute_path.as_posix_absolute()?; + docker.args([ + "-v", + &format!("{}:{prefix}{}{selinux}", host_path.to_utf8()?, mount_path), + ]); + Ok(()) +} + pub(crate) fn run( options: DockerOptions, paths: DockerPaths, args: &[String], msg_info: &mut MessageInfo, -) -> Result { +) -> Result> { let engine = &options.engine; - let dirs = &paths.directories; + let toolchain_dirs = paths.directories.toolchain_directories(); + let package_dirs = paths.directories.package_directories(); - let mut cmd = cargo_safe_command(options.uses_xargo); + let mut cmd = options.command_variant.safe_command(); cmd.args(args); - let mut docker = subcommand(engine, "run"); - docker_userns(&mut docker); - docker_envvars(&mut docker, &options.config, &options.target, msg_info)?; + let mut docker = engine.subcommand("run"); + docker.add_userns(); + + // Podman on macOS doesn't support selinux labels, see issue #756 + #[cfg(target_os = "macos")] + let (selinux, selinux_ro) = if engine.kind.is_podman() { + ("", ":ro") + } else { + (":z", ":z,ro") + }; + #[cfg(not(target_os = "macos"))] + let (selinux, selinux_ro) = (":z", ":z,ro"); + + options + .image + .platform + .specify_platform(&options.engine, &mut docker); + docker.add_envvars(&options, toolchain_dirs, msg_info)?; - let mount_volumes = docker_mount( - &mut docker, + docker.add_mounts( &options, &paths, - |docker, val| mount(docker, val, ""), + |docker, host, absolute| mount(docker, host, absolute, "", selinux), |_| {}, + msg_info, )?; + let container_id = toolchain_dirs.unique_container_identifier(options.target.target())?; + docker.args(["--name", &container_id]); docker.arg("--rm"); - docker_seccomp(&mut docker, engine.kind, &options.target, &paths.metadata) + docker + .add_seccomp(engine.kind, &options.target, &paths.metadata) .wrap_err("when copying seccomp profile")?; - docker_user_id(&mut docker, engine.kind); + docker.add_user_id(engine.is_rootless); docker - .args(&["-v", &format!("{}:/xargo:Z", dirs.xargo.to_utf8()?)]) - .args(&["-v", &format!("{}:/cargo:Z", dirs.cargo.to_utf8()?)]) + .args([ + "-v", + &format!( + "{}:{}{selinux}", + toolchain_dirs.xargo_host_path()?, + toolchain_dirs.xargo_mount_path() + ), + ]) + .args([ + "-v", + &format!( + "{}:{}{selinux}", + toolchain_dirs.cargo_host_path()?, + toolchain_dirs.cargo_mount_path() + ), + ]) // Prevent `bin` from being mounted inside the Docker container. - .args(&["-v", "/cargo/bin"]); - if mount_volumes { - docker.args(&[ + .args(["-v", &format!("{}/bin", toolchain_dirs.cargo_mount_path())]); + + let host_root = paths.mount_finder.find_mount_path(package_dirs.host_root()); + docker.args([ + "-v", + &format!( + "{}:{}{selinux}", + host_root.to_utf8()?, + package_dirs.mount_root() + ), + ]); + + let sysroot = paths + .mount_finder + .find_mount_path(toolchain_dirs.get_sysroot()); + docker + .args([ + "-v", + &format!( + "{}:{}{selinux_ro}", + sysroot.to_utf8()?, + toolchain_dirs.sysroot_mount_path() + ), + ]) + .args([ "-v", - &format!("{}:{}:Z", dirs.host_root.to_utf8()?, dirs.mount_root), + &format!("{}:/target{selinux}", package_dirs.target().to_utf8()?), ]); - } else { - docker.args(&["-v", &format!("{}:/project:Z", dirs.host_root.to_utf8()?)]); - } - docker - .args(&["-v", &format!("{}:/rust:Z,ro", dirs.sysroot.to_utf8()?)]) - .args(&["-v", &format!("{}:/target:Z", dirs.target.to_utf8()?)]); - docker_cwd(&mut docker, &paths, mount_volumes)?; + docker.add_cwd(&paths)?; // When running inside NixOS or using Nix packaging we need to add the Nix // Store to the running container so it can load the needed binaries. - if let Some(ref nix_store) = dirs.nix_store { - docker.args(&[ + if let Some(nix_store) = toolchain_dirs.nix_store() { + docker.args([ "-v", - &format!("{}:{}:Z", nix_store.to_utf8()?, nix_store.as_posix()?), + &format!( + "{}:{}{selinux}", + nix_store.to_utf8()?, + nix_store.as_posix_absolute()? + ), ]); } - if io::Stdin::is_atty() { + if io::Stdin::is_atty() && io::Stdout::is_atty() && io::Stderr::is_atty() { + docker.arg("-t"); + } + + if options.interactive { docker.arg("-i"); - if io::Stdout::is_atty() && io::Stderr::is_atty() { - docker.arg("-t"); - } } - let mut image = options.image_name()?; + + let mut image_name = options.image.name.clone(); if options.needs_custom_image() { - image = options + image_name = options .custom_image_build(&paths, msg_info) .wrap_err("when building custom image")?; } - docker - .arg(&image) - .args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]) - .run_and_get_status(msg_info, false) - .map_err(Into::into) + ChildContainer::create(engine.clone(), container_id)?; + if msg_info.should_fail() { + return Ok(None); + } + let status = docker + .arg(&image_name) + .add_build_command(toolchain_dirs, &cmd) + .run_and_get_status(msg_info, false); + + // `cargo` generally returns 0 or 101 on completion, but isn't guaranteed + // to. `ExitStatus::code()` may be None if a signal caused the process to + // terminate or it may be a known interrupt return status (130, 137, 143). + // simpler: just test if the program termination handler was called. + // SAFETY: an atomic load. + let is_terminated = unsafe { crate::errors::TERMINATED.load(Ordering::SeqCst) }; + if !is_terminated { + ChildContainer::exit_static(); + } + + status.map(Some) } diff --git a/src/docker/mod.rs b/src/docker/mod.rs index aececd69b..def816933 100644 --- a/src/docker/mod.rs +++ b/src/docker/mod.rs @@ -1,23 +1,62 @@ -pub mod custom; +mod build; +pub(crate) mod custom; mod engine; +mod image; mod local; +mod provided_images; pub mod remote; mod shared; +pub use self::build::{BuildCommandExt, BuildResultExt, Progress}; pub use self::engine::*; +pub use self::provided_images::PROVIDED_IMAGES; pub use self::shared::*; +pub use image::{ + Architecture, Image, ImagePlatform, ImageReference, Os as ContainerOs, PossibleImage, +}; + use std::process::ExitStatus; use crate::errors::*; use crate::shell::MessageInfo; +#[derive(Debug)] +pub struct ProvidedImage { + /// The `name` of the image, usually the target triplet + pub name: &'static str, + pub platforms: &'static [ImagePlatform], + pub sub: Option<&'static str>, +} + +impl ProvidedImage { + pub fn image_name(&self, repository: &str, tag: &str) -> String { + image_name(self.name, self.sub, repository, tag) + } + + pub fn default_image_name(&self) -> String { + self.image_name(CROSS_IMAGE, DEFAULT_IMAGE_VERSION) + } +} + +pub fn image_name(target: &str, sub: Option<&str>, repository: &str, tag: &str) -> String { + if let Some(sub) = sub { + format!("{repository}/{target}:{tag}-{sub}") + } else { + format!("{repository}/{target}:{tag}") + } +} + +// TODO: The Option here in the result should be removed and Result::Error replaced with a enum to properly signal error + +// Ok(None) means that the command failed, due to a warning or error, when `msg_info.should_fail() == true` pub fn run( options: DockerOptions, paths: DockerPaths, args: &[String], + subcommand: Option, msg_info: &mut MessageInfo, -) -> Result { +) -> Result> { if cfg!(target_os = "windows") && options.in_docker() { msg_info.fatal( "running cross insider a container running windows is currently unsupported", @@ -25,7 +64,8 @@ pub fn run( ); } if options.is_remote() { - remote::run(options, paths, args, msg_info).wrap_err("could not complete remote run") + remote::run(options, paths, args, subcommand, msg_info) + .wrap_err("could not complete remote run") } else { local::run(options, paths, args, msg_info) } diff --git a/src/docker/provided_images.rs b/src/docker/provided_images.rs new file mode 100644 index 000000000..be7f087e7 --- /dev/null +++ b/src/docker/provided_images.rs @@ -0,0 +1,291 @@ +#![doc = "*** AUTO-GENERATED, do not touch. Run `cargo xtask codegen` to update ***"] +use super::{ImagePlatform, ProvidedImage}; + +#[rustfmt::skip] +pub static PROVIDED_IMAGES: &[ProvidedImage] = &[ + ProvidedImage { + name: "x86_64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-linux-musl", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: Some("centos") + }, + ProvidedImage { + name: "aarch64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "arm-unknown-linux-gnueabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "arm-unknown-linux-gnueabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv7-unknown-linux-gnueabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv7-unknown-linux-gnueabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv7neon-unknown-linux-gnueabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i586-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i686-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "loongarch64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "mips-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "mipsel-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "mips64-unknown-linux-gnuabi64", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "mips64el-unknown-linux-gnuabi64", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "powerpc-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "powerpc64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "powerpc64le-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "riscv64gc-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "s390x-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "sparc64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "aarch64-unknown-linux-musl", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "arm-unknown-linux-musleabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "arm-unknown-linux-musleabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv5te-unknown-linux-gnueabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv5te-unknown-linux-musleabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv7-unknown-linux-musleabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv7-unknown-linux-musleabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i586-unknown-linux-musl", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i686-unknown-linux-musl", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "aarch64-linux-android", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "arm-linux-androideabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "armv7-linux-androideabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv7neon-linux-androideabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i686-linux-android", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-linux-android", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-pc-windows-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i686-pc-windows-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "wasm32-unknown-emscripten", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-dragonfly", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "i686-unknown-freebsd", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-freebsd", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "aarch64-unknown-freebsd", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-netbsd", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "sparcv9-sun-solaris", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-pc-solaris", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "x86_64-unknown-illumos", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv6m-none-eabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv7em-none-eabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv7em-none-eabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv7m-none-eabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv8m.base-none-eabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv8m.main-none-eabi", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "thumbv8m.main-none-eabihf", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "zig", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: None + }, + ProvidedImage { + name: "aarch64-unknown-linux-gnu", + platforms: &[ImagePlatform::X86_64_UNKNOWN_LINUX_GNU], + sub: Some("centos") + }, +]; diff --git a/src/docker/remote.rs b/src/docker/remote.rs index b4cc21dc5..b8c9ee803 100644 --- a/src/docker/remote.rs +++ b/src/docker/remote.rs @@ -1,191 +1,455 @@ use std::collections::BTreeMap; use std::io::{self, BufRead, Read, Write}; -use std::path::{Path, PathBuf}; -use std::process::{Command, ExitStatus, Output}; -use std::sync::atomic::{AtomicBool, Ordering}; +use std::path::Path; +use std::process::{Command, ExitStatus}; use std::{env, fs, time}; use eyre::Context; use super::engine::Engine; use super::shared::*; -use crate::cargo::CargoMetadata; use crate::config::bool_from_envvar; use crate::errors::Result; use crate::extensions::CommandExt; use crate::file::{self, PathExt, ToUtf8}; -use crate::rustc::{self, VersionMetaExt}; -use crate::rustup; -use crate::shell::{ColorChoice, MessageInfo, Stream, Verbosity}; +use crate::rustc::{self, QualifiedToolchain, VersionMetaExt}; +use crate::shell::{MessageInfo, Stream}; use crate::temp; -use crate::{Host, Target}; - -// the mount directory for the data volume. -pub const MOUNT_PREFIX: &str = "/cross"; -// default timeout to stop a container (in seconds) -pub const DEFAULT_TIMEOUT: u32 = 2; -// instant kill in case of a non-graceful exit -pub const NO_TIMEOUT: u32 = 0; - -// we need to specify drops for the containers, but we -// also need to ensure the drops are called on a -// termination handler. we use an atomic bool to ensure -// that the drop only gets called once, even if we have -// the signal handle invoked multiple times or it fails. -pub(crate) static mut CONTAINER: Option = None; -pub(crate) static mut CONTAINER_EXISTS: AtomicBool = AtomicBool::new(false); - -// it's unlikely that we ever need to erase a line in the destructors, -// and it's better than keep global state everywhere, or keeping a ref -// cell which could have already deleted a line -pub(crate) struct DeleteContainer(Engine, String, u32, ColorChoice, Verbosity); - -impl Drop for DeleteContainer { - fn drop(&mut self) { - // SAFETY: safe, since guarded by a thread-safe atomic swap. - unsafe { - if CONTAINER_EXISTS.swap(false, Ordering::SeqCst) { - let mut msg_info = MessageInfo::new(self.3, self.4); - container_stop(&self.0, &self.1, self.2, &mut msg_info).ok(); - container_rm(&self.0, &self.1, &mut msg_info).ok(); - } +use crate::TargetTriple; + +// prevent further commands from running if we handled +// a signal earlier, and the volume is exited. +// this isn't required, but avoids unnecessary +// commands while the container is cleaning up. +macro_rules! bail_container_exited { + () => {{ + if !ChildContainer::exists_static() { + eyre::bail!("container already exited due to signal"); } - } + }}; } -#[derive(Debug, PartialEq, Eq)] -pub enum ContainerState { - Created, - Running, - Paused, - Restarting, - Dead, - Exited, - DoesNotExist, -} +#[track_caller] +fn subcommand_or_exit(engine: &Engine, cmd: &str) -> Result { + bail_container_exited!(); + Ok(engine.subcommand(cmd)) +} + +pub fn posix_parent(path: &str) -> Option<&str> { + Path::new(path).parent()?.to_str() +} + +impl<'a, 'b, 'c> ContainerDataVolume<'a, 'b, 'c> { + // NOTE: `reldir` should be a relative POSIX path to the root directory + // on windows, this should be something like `mnt/c`. that is, all paths + // inside the container should not have the mount prefix. + #[track_caller] + fn create_dir( + &self, + reldir: &str, + mount_prefix: &str, + msg_info: &mut MessageInfo, + ) -> Result { + // make our parent directory if needed + subcommand_or_exit(self.engine, "exec")? + .arg(self.container) + .args(["sh", "-c", &format!("mkdir -p '{mount_prefix}/{reldir}'")]) + .run_and_get_status(msg_info, false) + } -impl ContainerState { - pub fn new(state: &str) -> Result { - match state { - "created" => Ok(ContainerState::Created), - "running" => Ok(ContainerState::Running), - "paused" => Ok(ContainerState::Paused), - "restarting" => Ok(ContainerState::Restarting), - "dead" => Ok(ContainerState::Dead), - "exited" => Ok(ContainerState::Exited), - "" => Ok(ContainerState::DoesNotExist), - _ => eyre::bail!("unknown container state: got {state}"), + /// Copy files for a docker volume + /// + /// `reldst` has the same caveats as `reldir` in [`Self::create_dir`]. + /// + /// ## Note + /// + /// if copying from a src directory to dst directory with docker, to + /// copy the contents from `src` into `dst`, `src` must end with `/.` + #[track_caller] + fn copy_files( + &self, + src: &Path, + reldst: &str, + mount_prefix: &str, + msg_info: &mut MessageInfo, + ) -> Result { + if let Some((_, rel)) = reldst.rsplit_once('/') { + if msg_info.cross_debug + && src.is_dir() + && !src.to_string_lossy().ends_with("/.") + && rel + == src + .file_name() + .expect("filename should be defined as we are a directory") + { + msg_info.warn(format_args!( + "source is pointing to a directory instead of its contents: {} -> {}\nThis might be a bug. {}", + src.as_posix_relative()?, + reldst, + std::panic::Location::caller() + ))?; + } } + subcommand_or_exit(self.engine, "cp")? + .arg("-a") + .arg(src.to_utf8()?) + .arg(format!("{}:{mount_prefix}/{reldst}", self.container)) + .run_and_get_status(msg_info, false) } - #[must_use] - pub fn is_stopped(&self) -> bool { - matches!(self, Self::Exited | Self::DoesNotExist) - } + /// copy files for a docker volume, does not include cache directories + /// + /// ## Note + /// + /// if copying from a src directory to dst directory with docker, to + /// copy the contents from `src` into `dst`, `src` must end with `/.` + #[track_caller] + fn copy_files_nocache( + &self, + src: &Path, + reldst: &str, + mount_prefix: &str, + copy_symlinks: bool, + msg_info: &mut MessageInfo, + ) -> Result { + // avoid any cached directories when copying + // see https://bford.info/cachedir/ + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + let had_symlinks = copy_dir(src, temppath, copy_symlinks, 0, |e, _| is_cachedir(e))?; + warn_symlinks(had_symlinks, msg_info)?; + self.copy_files(&temppath.join("."), reldst, mount_prefix, msg_info) + } + + // copy files for a docker volume, for remote host support + // provides a list of files relative to src. + #[track_caller] + fn copy_file_list( + &self, + src: &Path, + reldst: &str, + mount_prefix: &str, + files: &[&str], + msg_info: &mut MessageInfo, + ) -> Result { + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + for file in files { + let src_path = src.join(file); + let dst_path = temppath.join(file); + file::create_dir_all(dst_path.parent().expect("must have parent"))?; + fs::copy(src_path, &dst_path)?; + } - #[must_use] - pub fn exists(&self) -> bool { - !matches!(self, Self::DoesNotExist) - } -} + self.copy_files(&temppath.join("."), reldst, mount_prefix, msg_info) + } + + // removed files from a docker volume, for remote host support + // provides a list of files relative to src. + #[track_caller] + fn remove_file_list( + &self, + reldst: &str, + mount_prefix: &str, + files: &[&str], + msg_info: &mut MessageInfo, + ) -> Result { + const PATH: &str = "/tmp/remove_list"; + let mut script = vec![]; + if msg_info.is_verbose() { + script.push("set -x".to_owned()); + } + script.push(format!( + "cat \"{PATH}\" | while read line; do + rm -f \"${{line}}\" + done -#[derive(Debug, Clone)] -enum VolumeId { - Keep(String), - Discard, -} + rm \"{PATH}\" + " + )); + + // SAFETY: safe, single-threaded execution. + let mut tempfile = unsafe { temp::TempFile::new()? }; + for file in files { + writeln!(tempfile.file(), "{mount_prefix}/{reldst}/{file}")?; + } + + // need to avoid having hundreds of files on the command, so + // just provide a single file name. + subcommand_or_exit(self.engine, "cp")? + .arg(tempfile.path()) + .arg(format!("{}:{PATH}", self.container)) + .run_and_get_status(msg_info, true)?; + + subcommand_or_exit(self.engine, "exec")? + .arg(self.container) + .args(["sh", "-c", &script.join("\n")]) + .run_and_get_status(msg_info, true) + } + + #[track_caller] + fn container_path_exists( + &self, + relpath: &str, + mount_prefix: &str, + msg_info: &mut MessageInfo, + ) -> Result { + Ok(subcommand_or_exit(self.engine, "exec")? + .arg(self.container) + .args([ + "bash", + "-c", + &format!("[[ -d '{mount_prefix}/{relpath}' ]]"), + ]) + .run_and_get_status(msg_info, true)? + .success()) + } + + #[track_caller] + pub fn copy_xargo(&self, mount_prefix: &str, msg_info: &mut MessageInfo) -> Result<()> { + let dirs = &self.toolchain_dirs; + let reldst = dirs.xargo_mount_path_relative()?; + if dirs.xargo().exists() { + self.create_dir( + // this always works, even if we have `/xargo`, since + // this will be an absolute path. passing an empty path + // to `create_dir` isn't an issue. + posix_parent(dirs.xargo_mount_path()) + .expect("destination should have a parent") + .strip_prefix('/') + .expect("parent directory must be absolute"), + mount_prefix, + msg_info, + )?; + self.copy_files(&dirs.xargo().join("."), &reldst, mount_prefix, msg_info)?; + } -impl VolumeId { - fn create(engine: &Engine, toolchain: &str, msg_info: &mut MessageInfo) -> Result { - if volume_exists(engine, toolchain, msg_info)? { - Ok(Self::Keep(toolchain.to_owned())) + Ok(()) + } + + #[track_caller] + pub fn copy_cargo( + &self, + mount_prefix: &str, + copy_registry: bool, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let dirs = &self.toolchain_dirs; + let reldst = dirs.cargo_mount_path_relative()?; + let copy_registry = env::var("CROSS_REMOTE_COPY_REGISTRY") + .map(|s| bool_from_envvar(&s)) + .unwrap_or(copy_registry); + + self.create_dir(&reldst, mount_prefix, msg_info)?; + if copy_registry { + self.copy_files(&dirs.cargo().join("."), &reldst, mount_prefix, msg_info)?; } else { - Ok(Self::Discard) + // can copy a limit subset of files: the rest is present. + for entry in fs::read_dir(dirs.cargo()) + .wrap_err_with(|| format!("when reading directory {:?}", dirs.cargo()))? + { + let file = entry?; + let basename = file + .file_name() + .to_utf8() + .wrap_err_with(|| format!("when reading file {file:?}"))? + .to_owned(); + if !basename.starts_with('.') && !matches!(basename.as_ref(), "git" | "registry") { + self.copy_files(&file.path(), &reldst, mount_prefix, msg_info)?; + } + } } + + Ok(()) } -} -// prevent further commands from running if we handled -// a signal earlier, and the volume is exited. -// this isn't required, but avoids unnecessary -// commands while the container is cleaning up. -macro_rules! bail_container_exited { - () => {{ - if !container_exists() { - eyre::bail!("container already exited due to signal"); + // copy over files needed for all targets in the toolchain that should never change + #[track_caller] + fn copy_rust_base(&self, mount_prefix: &str, msg_info: &mut MessageInfo) -> Result<()> { + let dirs = &self.toolchain_dirs; + + // the rust toolchain is quite large, but most of it isn't needed + // we need the bin, libexec, and etc directories, and part of the lib directory. + let reldst = dirs.sysroot_mount_path_relative()?; + let rustlib = "lib/rustlib"; + self.create_dir(&format!("{reldst}/{}", rustlib), mount_prefix, msg_info)?; + for basename in ["bin", "libexec", "etc"] { + let file = dirs.get_sysroot().join(basename); + self.copy_files(&file, &reldst, mount_prefix, msg_info)?; } - }}; -} -pub fn create_container_deleter(engine: Engine, container: String) { - // SAFETY: safe, since single-threaded execution. - unsafe { - CONTAINER_EXISTS.store(true, Ordering::Relaxed); - CONTAINER = Some(DeleteContainer( - engine, - container, - NO_TIMEOUT, - ColorChoice::Never, - Verbosity::Quiet, - )); + // the lib directories are rather large, so we want only a subset. + // now, we use a temp directory for everything else in the libdir + // we can pretty safely assume we don't have symlinks here. + + // first, copy the shared libraries inside lib, all except rustlib. + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + file::create_dir_all(temppath.join(rustlib))?; + let mut had_symlinks = copy_dir( + &dirs.get_sysroot().join("lib"), + &temppath.join("lib"), + true, + 0, + |e, d| d == 0 && e.file_name() == "rustlib", + )?; + + // next, copy the src/etc directories inside rustlib + had_symlinks |= copy_dir( + &dirs.get_sysroot().join(rustlib), + &temppath.join(rustlib), + true, + 0, + |e, d| d == 0 && !(e.file_name() == "src" || e.file_name() == "etc"), + )?; + self.copy_files(&temppath.join("lib"), &reldst, mount_prefix, msg_info)?; + + warn_symlinks(had_symlinks, msg_info) } -} -pub fn drop_container(is_tty: bool, msg_info: &mut MessageInfo) { - // SAFETY: safe, since single-threaded execution. - unsafe { - // relax the no-timeout and lack of output - if let Some(container) = &mut CONTAINER { - if is_tty { - container.2 = DEFAULT_TIMEOUT; - } - container.3 = msg_info.color_choice; - container.4 = msg_info.verbosity; + #[track_caller] + fn copy_rust_manifest(&self, mount_prefix: &str, msg_info: &mut MessageInfo) -> Result<()> { + let dirs = &self.toolchain_dirs; + + // copy over all the manifest files in rustlib + // these are small text files containing names/paths to toolchains + let reldst = dirs.sysroot_mount_path_relative()?; + let rustlib = "lib/rustlib"; + + // SAFETY: safe, single-threaded execution. + let tempdir = unsafe { temp::TempDir::new()? }; + let temppath = tempdir.path(); + file::create_dir_all(temppath.join(rustlib))?; + let had_symlinks = copy_dir( + &dirs.get_sysroot().join(rustlib), + &temppath.join(rustlib), + true, + 0, + |e, d| d != 0 || e.file_type().map(|t| !t.is_file()).unwrap_or(true), + )?; + self.copy_files(&temppath.join("lib"), &reldst, mount_prefix, msg_info)?; + + warn_symlinks(had_symlinks, msg_info) + } + + // copy over the toolchain for a specific triple + #[track_caller] + fn copy_rust_triple( + &self, + target_triple: &TargetTriple, + mount_prefix: &str, + skip_exists: bool, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let dirs = &self.toolchain_dirs; + + // copy over the files for a specific triple + let reldst = &dirs.sysroot_mount_path_relative()?; + let rustlib = "lib/rustlib"; + let reldst_rustlib = format!("{reldst}/{rustlib}"); + let src_toolchain = dirs + .get_sysroot() + .join(Path::new(rustlib)) + .join(target_triple.triple()); + let reldst_toolchain = format!("{reldst_rustlib}/{}", target_triple.triple()); + + // skip if the toolchain target component already exists. for the host toolchain + // or the first run of the target toolchain, we know it doesn't exist. + let mut skip = false; + if skip_exists { + skip = self.container_path_exists(&reldst_toolchain, mount_prefix, msg_info)?; } - CONTAINER = None; + if !skip { + self.copy_files(&src_toolchain, &reldst_rustlib, mount_prefix, msg_info)?; + } + if !skip && skip_exists { + // this means we have a persistent data volume and we have a + // new target, meaning we might have new manifests as well. + self.copy_rust_manifest(mount_prefix, msg_info)?; + } + + Ok(()) } -} -fn container_exists() -> bool { - // SAFETY: safe, not mutating an atomic bool - // this can be more relaxed: just used to ensure - // that we don't make unnecessary calls, which are - // safe even if executed, after we've signaled a - // drop to our container. - unsafe { CONTAINER_EXISTS.load(Ordering::Relaxed) } -} + #[track_caller] + pub fn copy_rust( + &self, + target_triple: Option<&TargetTriple>, + mount_prefix: &str, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let dirs = &self.toolchain_dirs; + + self.copy_rust_base(mount_prefix, msg_info)?; + self.copy_rust_manifest(mount_prefix, msg_info)?; + self.copy_rust_triple(dirs.host_target(), mount_prefix, false, msg_info)?; + if let Some(target_triple) = target_triple { + if target_triple.triple() != dirs.host_target().triple() { + self.copy_rust_triple(target_triple, mount_prefix, false, msg_info)?; + } + } -fn subcommand_or_exit(engine: &Engine, cmd: &str) -> Result { - bail_container_exited!(); - Ok(subcommand(engine, cmd)) -} + Ok(()) + } -fn create_volume_dir( - engine: &Engine, - container: &str, - dir: &Path, - msg_info: &mut MessageInfo, -) -> Result { - // make our parent directory if needed - subcommand_or_exit(engine, "exec")? - .arg(container) - .args(&["sh", "-c", &format!("mkdir -p '{}'", dir.as_posix()?)]) - .run_and_get_status(msg_info, false) -} + #[track_caller] + fn copy_mount( + &self, + src: &Path, + reldst: &str, + mount_prefix: &str, + volume: &VolumeId, + copy_cache: bool, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let copy_all = |info: &mut MessageInfo| { + if copy_cache { + self.copy_files(&src.join("."), reldst, mount_prefix, info) + } else { + self.copy_files_nocache(&src.join("."), reldst, mount_prefix, true, info) + } + }; + match volume { + VolumeId::Keep(_) => { + let parent = temp::dir()?; + file::create_dir_all(&parent)?; + + let toolchain = &self.toolchain_dirs.toolchain(); + let filename = toolchain.unique_mount_identifier(src)?; + let fingerprint = parent.join(filename); + let current = Fingerprint::read_dir(src, copy_cache)?; + // need to check if the container path exists, otherwise we might + // have stale data: the persistent volume was deleted & recreated. + if fingerprint.exists() + && self.container_path_exists(reldst, mount_prefix, msg_info)? + { + let previous = Fingerprint::read_file(&fingerprint)?; + let (to_copy, to_remove) = previous.difference(¤t); + if !to_copy.is_empty() { + self.copy_file_list(src, reldst, mount_prefix, &to_copy, msg_info)?; + } + if !to_remove.is_empty() { + self.remove_file_list(reldst, mount_prefix, &to_remove, msg_info)?; + } + + // write fingerprint afterwards, in case any failure so we + // ensure any changes will be made on subsequent runs + current.write_file(&fingerprint)?; + } else { + current.write_file(&fingerprint)?; + copy_all(msg_info)?; + } + } + VolumeId::Discard => { + copy_all(msg_info)?; + } + } -// copy files for a docker volume, for remote host support -fn copy_volume_files( - engine: &Engine, - container: &str, - src: &Path, - dst: &Path, - msg_info: &mut MessageInfo, -) -> Result { - subcommand_or_exit(engine, "cp")? - .arg("-a") - .arg(src.to_utf8()?) - .arg(format!("{container}:{}", dst.as_posix()?)) - .run_and_get_status(msg_info, false) + Ok(()) + } } fn is_cachedir_tag(path: &Path) -> Result { @@ -207,100 +471,6 @@ fn is_cachedir(entry: &fs::DirEntry) -> bool { } } -fn container_path_exists( - engine: &Engine, - container: &str, - path: &Path, - msg_info: &mut MessageInfo, -) -> Result { - Ok(subcommand_or_exit(engine, "exec")? - .arg(container) - .args(&["bash", "-c", &format!("[[ -d '{}' ]]", path.as_posix()?)]) - .run_and_get_status(msg_info, true)? - .success()) -} - -// copy files for a docker volume, for remote host support -fn copy_volume_files_nocache( - engine: &Engine, - container: &str, - src: &Path, - dst: &Path, - copy_symlinks: bool, - msg_info: &mut MessageInfo, -) -> Result { - // avoid any cached directories when copying - // see https://bford.info/cachedir/ - // SAFETY: safe, single-threaded execution. - let tempdir = unsafe { temp::TempDir::new()? }; - let temppath = tempdir.path(); - let had_symlinks = copy_dir(src, temppath, copy_symlinks, 0, |e, _| is_cachedir(e))?; - warn_symlinks(had_symlinks, msg_info)?; - copy_volume_files(engine, container, temppath, dst, msg_info) -} - -pub fn copy_volume_container_xargo( - engine: &Engine, - container: &str, - xargo_dir: &Path, - target: &Target, - mount_prefix: &Path, - msg_info: &mut MessageInfo, -) -> Result<()> { - // only need to copy the rustlib files for our current target. - let triple = target.triple(); - let relpath = Path::new("lib").join("rustlib").join(&triple); - let src = xargo_dir.join(&relpath); - let dst = mount_prefix.join("xargo").join(&relpath); - if Path::new(&src).exists() { - create_volume_dir( - engine, - container, - dst.parent().expect("destination should have a parent"), - msg_info, - )?; - copy_volume_files(engine, container, &src, &dst, msg_info)?; - } - - Ok(()) -} - -pub fn copy_volume_container_cargo( - engine: &Engine, - container: &str, - cargo_dir: &Path, - mount_prefix: &Path, - copy_registry: bool, - msg_info: &mut MessageInfo, -) -> Result<()> { - let dst = mount_prefix.join("cargo"); - let copy_registry = env::var("CROSS_REMOTE_COPY_REGISTRY") - .map(|s| bool_from_envvar(&s)) - .unwrap_or(copy_registry); - - if copy_registry { - copy_volume_files(engine, container, cargo_dir, &dst, msg_info)?; - } else { - // can copy a limit subset of files: the rest is present. - create_volume_dir(engine, container, &dst, msg_info)?; - for entry in fs::read_dir(cargo_dir) - .wrap_err_with(|| format!("when reading directory {cargo_dir:?}"))? - { - let file = entry?; - let basename = file - .file_name() - .to_utf8() - .wrap_err_with(|| format!("when reading file {file:?}"))? - .to_owned(); - if !basename.starts_with('.') && !matches!(basename.as_ref(), "git" | "registry") { - copy_volume_files(engine, container, &file.path(), &dst, msg_info)?; - } - } - } - - Ok(()) -} - // recursively copy a directory into another fn copy_dir( src: &Path, @@ -368,502 +538,150 @@ fn warn_symlinks(had_symlinks: bool, msg_info: &mut MessageInfo) -> Result<()> { } } -// copy over files needed for all targets in the toolchain that should never change -fn copy_volume_container_rust_base( - engine: &Engine, - container: &str, - sysroot: &Path, - mount_prefix: &Path, - msg_info: &mut MessageInfo, -) -> Result<()> { - // the rust toolchain is quite large, but most of it isn't needed - // we need the bin, libexec, and etc directories, and part of the lib directory. - let dst = mount_prefix.join("rust"); - let rustlib = Path::new("lib").join("rustlib"); - create_volume_dir(engine, container, &dst.join(&rustlib), msg_info)?; - for basename in ["bin", "libexec", "etc"] { - let file = sysroot.join(basename); - copy_volume_files(engine, container, &file, &dst, msg_info)?; - } - - // the lib directories are rather large, so we want only a subset. - // now, we use a temp directory for everything else in the libdir - // we can pretty safely assume we don't have symlinks here. - - // first, copy the shared libraries inside lib, all except rustlib. - // SAFETY: safe, single-threaded execution. - let tempdir = unsafe { temp::TempDir::new()? }; - let temppath = tempdir.path(); - fs::create_dir_all(&temppath.join(&rustlib))?; - let mut had_symlinks = copy_dir( - &sysroot.join("lib"), - &temppath.join("lib"), - true, - 0, - |e, d| d == 0 && e.file_name() == "rustlib", - )?; - - // next, copy the src/etc directories inside rustlib - had_symlinks |= copy_dir( - &sysroot.join(&rustlib), - &temppath.join(&rustlib), - true, - 0, - |e, d| d == 0 && !(e.file_name() == "src" || e.file_name() == "etc"), - )?; - copy_volume_files(engine, container, &temppath.join("lib"), &dst, msg_info)?; - - warn_symlinks(had_symlinks, msg_info) -} - -fn copy_volume_container_rust_manifest( - engine: &Engine, - container: &str, - sysroot: &Path, - mount_prefix: &Path, - msg_info: &mut MessageInfo, -) -> Result<()> { - // copy over all the manifest files in rustlib - // these are small text files containing names/paths to toolchains - let dst = mount_prefix.join("rust"); - let rustlib = Path::new("lib").join("rustlib"); - - // SAFETY: safe, single-threaded execution. - let tempdir = unsafe { temp::TempDir::new()? }; - let temppath = tempdir.path(); - fs::create_dir_all(&temppath.join(&rustlib))?; - let had_symlinks = copy_dir( - &sysroot.join(&rustlib), - &temppath.join(&rustlib), - true, - 0, - |e, d| d != 0 || e.file_type().map(|t| !t.is_file()).unwrap_or(true), - )?; - copy_volume_files(engine, container, &temppath.join("lib"), &dst, msg_info)?; - - warn_symlinks(had_symlinks, msg_info) -} - -// copy over the toolchain for a specific triple -pub fn copy_volume_container_rust_triple( - engine: &Engine, - container: &str, - sysroot: &Path, - triple: &str, - mount_prefix: &Path, - skip_exists: bool, - msg_info: &mut MessageInfo, -) -> Result<()> { - // copy over the files for a specific triple - let dst = mount_prefix.join("rust"); - let rustlib = Path::new("lib").join("rustlib"); - let dst_rustlib = dst.join(&rustlib); - let src_toolchain = sysroot.join(&rustlib).join(triple); - let dst_toolchain = dst_rustlib.join(triple); - - // skip if the toolchain already exists. for the host toolchain - // or the first run of the target toolchain, we know it doesn't exist. - let mut skip = false; - if skip_exists { - skip = container_path_exists(engine, container, &dst_toolchain, msg_info)?; - } - if !skip { - copy_volume_files(engine, container, &src_toolchain, &dst_rustlib, msg_info)?; - } - if !skip && skip_exists { - // this means we have a persistent data volume and we have a - // new target, meaning we might have new manifests as well. - copy_volume_container_rust_manifest(engine, container, sysroot, mount_prefix, msg_info)?; - } - - Ok(()) +#[derive(Debug)] +struct Fingerprint { + map: BTreeMap, } -pub fn copy_volume_container_rust( - engine: &Engine, - container: &str, - sysroot: &Path, - target: &Target, - mount_prefix: &Path, - skip_target: bool, - msg_info: &mut MessageInfo, -) -> Result<()> { - let target_triple = target.triple(); - let image_triple = Host::X86_64UnknownLinuxGnu.triple(); - - copy_volume_container_rust_base(engine, container, sysroot, mount_prefix, msg_info)?; - copy_volume_container_rust_manifest(engine, container, sysroot, mount_prefix, msg_info)?; - copy_volume_container_rust_triple( - engine, - container, - sysroot, - image_triple, - mount_prefix, - false, - msg_info, - )?; - if !skip_target && target_triple != image_triple { - copy_volume_container_rust_triple( - engine, - container, - sysroot, - target_triple, - mount_prefix, - false, - msg_info, - )?; +impl Fingerprint { + fn new() -> Self { + Self { + map: BTreeMap::new(), + } } - Ok(()) -} + fn read_file(path: &Path) -> Result { + let file = fs::OpenOptions::new().read(true).open(path)?; + let reader = io::BufReader::new(file); + let mut map = BTreeMap::new(); + for line in reader.lines() { + let line = line?; + let (timestamp, relpath) = line + .split_once('\t') + .ok_or_else(|| eyre::eyre!("unable to parse fingerprint line '{line}'"))?; + let modified = time_from_millis(timestamp.parse::()?); + map.insert(relpath.to_owned(), modified); + } -type FingerprintMap = BTreeMap; - -fn parse_project_fingerprint(path: &Path) -> Result { - let epoch = time::SystemTime::UNIX_EPOCH; - let file = fs::OpenOptions::new().read(true).open(path)?; - let reader = io::BufReader::new(file); - let mut result = BTreeMap::new(); - for line in reader.lines() { - let line = line?; - let (timestamp, relpath) = line - .split_once('\t') - .ok_or_else(|| eyre::eyre!("unable to parse fingerprint line '{line}'"))?; - let modified = epoch + time::Duration::from_millis(timestamp.parse::()?); - result.insert(relpath.to_owned(), modified); + Ok(Self { map }) } - Ok(result) -} + fn write_file(&self, path: &Path) -> Result<()> { + let mut file = fs::OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(path)?; + for (relpath, modified) in &self.map { + let timestamp = time_to_millis(modified)?; + writeln!(file, "{timestamp}\t{relpath}")?; + } -fn write_project_fingerprint(path: &Path, fingerprint: &FingerprintMap) -> Result<()> { - let epoch = time::SystemTime::UNIX_EPOCH; - let mut file = fs::OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(path)?; - for (relpath, modified) in fingerprint { - let timestamp = modified.duration_since(epoch)?.as_millis() as u64; - writeln!(file, "{timestamp}\t{relpath}")?; + Ok(()) } - Ok(()) -} - -fn read_dir_fingerprint( - home: &Path, - path: &Path, - map: &mut FingerprintMap, - copy_cache: bool, -) -> Result<()> { - let epoch = time::SystemTime::UNIX_EPOCH; - for entry in fs::read_dir(path)? { - let file = entry?; - let file_type = file.file_type()?; - // only parse known files types: 0 or 1 of these tests can pass. - if file_type.is_dir() { - if copy_cache || !is_cachedir(&file) { - read_dir_fingerprint(home, &path.join(file.file_name()), map, copy_cache)?; + fn _read_dir(&mut self, home: &Path, path: &Path, copy_cache: bool) -> Result<()> { + for entry in fs::read_dir(path)? { + let file = entry?; + let file_type = file.file_type()?; + // only parse known files types: 0 or 1 of these tests can pass. + if file_type.is_dir() { + if copy_cache || !is_cachedir(&file) { + self._read_dir(home, &path.join(file.file_name()), copy_cache)?; + } + } else if file_type.is_file() || file_type.is_symlink() { + // we're mounting to the same location, so this should fine + // we need to round the modified date to millis. + let modified = file.metadata()?.modified()?; + let rounded = time_from_millis(time_to_millis(&modified)?); + let relpath = file.path().strip_prefix(home)?.as_posix_relative()?; + self.map.insert(relpath, rounded); } - } else if file_type.is_file() || file_type.is_symlink() { - // we're mounting to the same location, so this should fine - // we need to round the modified date to millis. - let modified = file.metadata()?.modified()?; - let millis = modified.duration_since(epoch)?.as_millis() as u64; - let rounded = epoch + time::Duration::from_millis(millis); - let relpath = file.path().strip_prefix(home)?.as_posix()?; - map.insert(relpath, rounded); } - } - - Ok(()) -} -fn get_project_fingerprint(home: &Path, copy_cache: bool) -> Result { - let mut result = BTreeMap::new(); - read_dir_fingerprint(home, home, &mut result, copy_cache)?; - Ok(result) -} - -fn get_fingerprint_difference<'a, 'b>( - previous: &'a FingerprintMap, - current: &'b FingerprintMap, -) -> (Vec<&'b str>, Vec<&'a str>) { - // this can be added or updated - let changed: Vec<&str> = current - .iter() - .filter(|(k, v1)| previous.get(*k).map_or(true, |v2| v1 != &v2)) - .map(|(k, _)| k.as_str()) - .collect(); - let removed: Vec<&str> = previous - .iter() - .filter(|(k, _)| !current.contains_key(*k)) - .map(|(k, _)| k.as_str()) - .collect(); - (changed, removed) -} - -// copy files for a docker volume, for remote host support -// provides a list of files relative to src. -fn copy_volume_file_list( - engine: &Engine, - container: &str, - src: &Path, - dst: &Path, - files: &[&str], - msg_info: &mut MessageInfo, -) -> Result { - // SAFETY: safe, single-threaded execution. - let tempdir = unsafe { temp::TempDir::new()? }; - let temppath = tempdir.path(); - for file in files { - let src_path = src.join(file); - let dst_path = temppath.join(file); - fs::create_dir_all(dst_path.parent().expect("must have parent"))?; - fs::copy(&src_path, &dst_path)?; - } - copy_volume_files(engine, container, temppath, dst, msg_info) -} - -// removed files from a docker volume, for remote host support -// provides a list of files relative to src. -fn remove_volume_file_list( - engine: &Engine, - container: &str, - dst: &Path, - files: &[&str], - msg_info: &mut MessageInfo, -) -> Result { - const PATH: &str = "/tmp/remove_list"; - let mut script = vec![]; - if msg_info.is_verbose() { - script.push("set -x".to_owned()); + Ok(()) } - script.push(format!( - "cat \"{PATH}\" | while read line; do - rm -f \"${{line}}\" -done -rm \"{PATH}\" -" - )); + fn read_dir(home: &Path, copy_cache: bool) -> Result { + let mut result = Fingerprint::new(); + result._read_dir(home, home, copy_cache)?; + Ok(result) + } + + // returns to_copy (added + modified) and to_remove (removed). + fn difference<'a, 'b>(&'a self, current: &'b Fingerprint) -> (Vec<&'b str>, Vec<&'a str>) { + let to_copy: Vec<&str> = current + .map + .iter() + .filter(|(k, v1)| self.map.get(*k).map_or(true, |v2| v1 != &v2)) + .map(|(k, _)| k.as_str()) + .collect(); + let to_remove: Vec<&str> = self + .map + .iter() + .filter(|(k, _)| !current.map.contains_key(*k)) + .map(|(k, _)| k.as_str()) + .collect(); + (to_copy, to_remove) + } +} + +impl QualifiedToolchain { + pub fn unique_toolchain_identifier(&self) -> Result { + // try to get the commit hash for the currently toolchain, if possible + // if not, get the default rustc and use the path hash for uniqueness + let commit_hash = if let Some(version) = self.rustc_version_string()? { + rustc::hash_from_version_string(&version, 1) + } else { + rustc::version_meta()?.commit_hash() + }; - // SAFETY: safe, single-threaded execution. - let mut tempfile = unsafe { temp::TempFile::new()? }; - for file in files { - writeln!(tempfile.file(), "{}", dst.join(file).as_posix()?)?; + let toolchain_name = self + .get_sysroot() + .file_name() + .expect("should be able to get toolchain name") + .to_utf8()?; + let toolchain_hash = path_hash(self.get_sysroot(), PATH_HASH_SHORT)?; + Ok(format!( + "{VOLUME_PREFIX}{toolchain_name}-{toolchain_hash}-{commit_hash}" + )) } - // need to avoid having hundreds of files on the command, so - // just provide a single file name. - subcommand_or_exit(engine, "cp")? - .arg(tempfile.path()) - .arg(format!("{container}:{PATH}")) - .run_and_get_status(msg_info, true)?; - - subcommand_or_exit(engine, "exec")? - .arg(container) - .args(&["sh", "-c", &script.join("\n")]) - .run_and_get_status(msg_info, true) -} - -fn copy_volume_container_project( - engine: &Engine, - container: &str, - src: &Path, - dst: &Path, - volume: &VolumeId, - copy_cache: bool, - msg_info: &mut MessageInfo, -) -> Result<()> { - let copy_all = |info: &mut MessageInfo| { - if copy_cache { - copy_volume_files(engine, container, src, dst, info) - } else { - copy_volume_files_nocache(engine, container, src, dst, true, info) - } - }; - match volume { - VolumeId::Keep(_) => { - let parent = temp::dir()?; - fs::create_dir_all(&parent)?; - let fingerprint = parent.join(container); - let current = get_project_fingerprint(src, copy_cache)?; - // need to check if the container path exists, otherwise we might - // have stale data: the persistent volume was deleted & recreated. - if fingerprint.exists() && container_path_exists(engine, container, dst, msg_info)? { - let previous = parse_project_fingerprint(&fingerprint)?; - let (changed, removed) = get_fingerprint_difference(&previous, ¤t); - write_project_fingerprint(&fingerprint, ¤t)?; - - if !changed.is_empty() { - copy_volume_file_list(engine, container, src, dst, &changed, msg_info)?; - } - if !removed.is_empty() { - remove_volume_file_list(engine, container, dst, &removed, msg_info)?; - } - } else { - write_project_fingerprint(&fingerprint, ¤t)?; - copy_all(msg_info)?; - } - } - VolumeId::Discard => { - copy_all(msg_info)?; - } + // unique identifier for a given container. allows the ID to + // be generated outside a rust package and run multiple times. + pub fn unique_container_identifier(&self, triple: &TargetTriple) -> Result { + let toolchain_id = self.unique_toolchain_identifier()?; + let cwd_path = path_hash(&env::current_dir()?, PATH_HASH_SHORT)?; + let system_time = now_as_millis()?; + Ok(format!("{toolchain_id}-{triple}-{cwd_path}-{system_time}")) } - Ok(()) -} - -fn run_and_get_status( - engine: &Engine, - args: &[&str], - msg_info: &mut MessageInfo, -) -> Result { - command(engine) - .args(args) - .run_and_get_status(msg_info, true) -} - -fn run_and_get_output( - engine: &Engine, - args: &[&str], - msg_info: &mut MessageInfo, -) -> Result { - command(engine).args(args).run_and_get_output(msg_info) -} - -pub fn volume_create( - engine: &Engine, - volume: &str, - msg_info: &mut MessageInfo, -) -> Result { - run_and_get_status(engine, &["volume", "create", volume], msg_info) -} - -pub fn volume_rm(engine: &Engine, volume: &str, msg_info: &mut MessageInfo) -> Result { - run_and_get_status(engine, &["volume", "rm", volume], msg_info) -} - -pub fn volume_exists(engine: &Engine, volume: &str, msg_info: &mut MessageInfo) -> Result { - run_and_get_output(engine, &["volume", "inspect", volume], msg_info) - .map(|output| output.status.success()) -} - -pub fn container_stop( - engine: &Engine, - container: &str, - timeout: u32, - msg_info: &mut MessageInfo, -) -> Result { - run_and_get_status( - engine, - &["stop", container, "--time", &timeout.to_string()], - msg_info, - ) -} - -pub fn container_stop_default( - engine: &Engine, - container: &str, - msg_info: &mut MessageInfo, -) -> Result { - // we want a faster timeout, since this might happen in signal - // handler. our containers normally clean up pretty fast, it's - // only without a pseudo-tty that they don't. - container_stop(engine, container, DEFAULT_TIMEOUT, msg_info) -} - -// if stop succeeds without a timeout, this can have a spurious error -// that is, if the container no longer exists. just silence this. -pub fn container_rm( - engine: &Engine, - container: &str, - msg_info: &mut MessageInfo, -) -> Result { - run_and_get_output(engine, &["rm", container], msg_info).map(|output| output.status) -} - -pub fn container_state( - engine: &Engine, - container: &str, - msg_info: &mut MessageInfo, -) -> Result { - let stdout = command(engine) - .args(&["ps", "-a"]) - .args(&["--filter", &format!("name={container}")]) - .args(&["--format", "{{.State}}"]) - .run_and_get_stdout(msg_info)?; - ContainerState::new(stdout.trim()) -} - -pub fn unique_toolchain_identifier(sysroot: &Path) -> Result { - // try to get the commit hash for the currently toolchain, if possible - // if not, get the default rustc and use the path hash for uniqueness - let commit_hash = if let Some(version) = rustup::rustc_version_string(sysroot)? { - rustc::hash_from_version_string(&version, 1) - } else { - rustc::version_meta()?.commit_hash() - }; - - let toolchain_name = sysroot - .file_name() - .expect("should be able to get toolchain name") - .to_utf8()?; - let toolchain_hash = path_hash(sysroot)?; - Ok(format!( - "cross-{toolchain_name}-{toolchain_hash}-{commit_hash}" - )) -} - -// unique identifier for a given project -pub fn unique_container_identifier( - target: &Target, - metadata: &CargoMetadata, - dirs: &Directories, -) -> Result { - let workspace_root = &metadata.workspace_root; - let package = metadata - .packages - .iter() - .find(|p| { - p.manifest_path - .parent() - .expect("manifest path should have a parent directory") - == workspace_root - }) - .unwrap_or_else(|| { - metadata - .packages - .get(0) - .expect("should have at least 1 package") - }); - - let name = &package.name; - let triple = target.triple(); - let toolchain_id = unique_toolchain_identifier(&dirs.sysroot)?; - let project_hash = path_hash(&package.manifest_path)?; - Ok(format!("{toolchain_id}-{triple}-{name}-{project_hash}")) -} - -fn mount_path(val: &Path) -> Result { - let host_path = file::canonicalize(val)?; - canonicalize_mount_path(&host_path) + // unique identifier for a given mounted volume + pub fn unique_mount_identifier(&self, path: &Path) -> Result { + let toolchain_id = self.unique_toolchain_identifier()?; + let mount_hash = path_hash(path, PATH_HASH_UNIQUE)?; + Ok(format!("{toolchain_id}-{mount_hash}")) + } } pub(crate) fn run( options: DockerOptions, paths: DockerPaths, args: &[String], + subcommand: Option, msg_info: &mut MessageInfo, -) -> Result { +) -> Result> { let engine = &options.engine; let target = &options.target; - let dirs = &paths.directories; + let toolchain_dirs = paths.directories.toolchain_directories(); + let package_dirs = paths.directories.package_directories(); let mount_prefix = MOUNT_PREFIX; + if options.in_docker() { + msg_info.warn("remote and docker-in-docker are unlikely to work together when using cross. remote cross uses data volumes, so docker-in-docker should not be required.")?; + } + // the logic is broken into the following steps // 1. get our unique identifiers and cleanup from a previous run. // 2. if not using persistent volumes, create a data volume @@ -886,17 +704,33 @@ pub(crate) fn run( // this can happen if we didn't gracefully exit before // note that since we use `docker run --rm`, it's very // unlikely the container state existed before. - let toolchain_id = unique_toolchain_identifier(&dirs.sysroot)?; - let container = unique_container_identifier(target, &paths.metadata, dirs)?; - let volume = VolumeId::create(engine, &toolchain_id, msg_info)?; - let state = container_state(engine, &container, msg_info)?; + let toolchain_id = toolchain_dirs.unique_toolchain_identifier()?; + let container_id = toolchain_dirs.unique_container_identifier(target.target())?; + let volume = { + let existing = DockerVolume::existing(engine, toolchain_dirs.toolchain(), msg_info)?; + if existing.iter().any(|v| v == &toolchain_id) { + VolumeId::Keep(toolchain_id) + } else { + let partial = format!("{VOLUME_PREFIX}{}", toolchain_dirs.toolchain()); + if existing.iter().any(|v| v.starts_with(&partial)) { + msg_info.warn(format_args!( + "a persistent volume does not exists for `{0}`, but there is a volume for a different version.\n > Create a new volume with `cross-util volumes create --toolchain {0}`", + toolchain_dirs.toolchain() + ))?; + } + VolumeId::Discard + } + }; + + let container = DockerContainer::new(engine, &container_id); + let state = container.state(msg_info)?; if !state.is_stopped() { - msg_info.warn(format_args!("container {container} was running."))?; - container_stop_default(engine, &container, msg_info)?; + msg_info.warn(format_args!("container {container_id} was running."))?; + container.stop_default(msg_info)?; } if state.exists() { - msg_info.warn(format_args!("container {container} was exited."))?; - container_rm(engine, &container, msg_info)?; + msg_info.warn(format_args!("container {container_id} was exited."))?; + container.remove(msg_info)?; } // 2. create our volume to copy all our data over to @@ -904,35 +738,37 @@ pub(crate) fn run( // if we're using a discarded volume. // 3. create our start container command here - let mut docker = subcommand(engine, "run"); - docker_userns(&mut docker); - docker.args(&["--name", &container]); + let mut docker = engine.subcommand("run"); + docker.add_userns(); + options + .image + .platform + .specify_platform(&options.engine, &mut docker); + docker.args(["--name", &container_id]); docker.arg("--rm"); - let volume_mount = match volume { - VolumeId::Keep(ref id) => format!("{id}:{mount_prefix}"), - VolumeId::Discard => mount_prefix.to_owned(), - }; - docker.args(&["-v", &volume_mount]); + docker.args(["-v", &volume.mount(mount_prefix)]); let mut volumes = vec![]; - let mount_volumes = docker_mount( - &mut docker, - &options, - &paths, - |_, val| mount_path(val), - |(src, dst)| volumes.push((src, dst)), - ) - .wrap_err("could not determine mount points")?; - - docker_seccomp(&mut docker, engine.kind, target, &paths.metadata) + docker + .add_mounts( + &options, + &paths, + |_, _, _| Ok(()), + |(src, dst)| volumes.push((src, dst)), + msg_info, + ) + .wrap_err("could not determine mount points")?; + + docker + .add_seccomp(engine.kind, target, &paths.metadata) .wrap_err("when copying seccomp profile")?; // Prevent `bin` from being mounted inside the Docker container. - docker.args(&["-v", &format!("{mount_prefix}/cargo/bin")]); + docker.args(["-v", &format!("{mount_prefix}/cargo/bin")]); // When running inside NixOS or using Nix packaging we need to add the Nix // Store to the running container so it can load the needed binaries. - if let Some(ref nix_store) = dirs.nix_store { + if let Some(nix_store) = toolchain_dirs.nix_store() { let nix_string = nix_store.to_utf8()?; volumes.push((nix_string.to_owned(), nix_string.to_owned())); } @@ -943,128 +779,96 @@ pub(crate) fn run( docker.arg("-t"); } - docker.arg(&image_name(&options.config, target)?); + let mut image_name = options.image.name.clone(); + + if options.needs_custom_image() { + image_name = options + .custom_image_build(&paths, msg_info) + .wrap_err("when building custom image")?; + } + + docker.arg(&image_name); + if !is_tty { // ensure the process never exits until we stop it // we only need this infinite loop if we don't allocate // a TTY. this has a few issues though: now, the // container no longer responds to signals, so the // container will need to be sig-killed. - docker.args(&["sh", "-c", "sleep infinity"]); + docker.args(["sh", "-c", "sleep infinity"]); } // store first, since failing to non-existing container is fine - create_container_deleter(engine.clone(), container.clone()); + ChildContainer::create(engine.clone(), container_id.clone())?; docker.run_and_get_status(msg_info, true)?; // 4. copy all mounted volumes over + let data_volume = ContainerDataVolume::new(engine, &container_id, toolchain_dirs); let copy_cache = env::var("CROSS_REMOTE_COPY_CACHE") .map(|s| bool_from_envvar(&s)) .unwrap_or_default(); - let copy = |src, dst: &PathBuf, info: &mut MessageInfo| { - if copy_cache { - copy_volume_files(engine, &container, src, dst, info) - } else { - copy_volume_files_nocache(engine, &container, src, dst, true, info) - } + let copy = |src, reldst: &str, info: &mut MessageInfo| { + data_volume.copy_mount(src, reldst, mount_prefix, &volume, copy_cache, info) }; - let mount_prefix_path = mount_prefix.as_ref(); if let VolumeId::Discard = volume { - copy_volume_container_xargo( - engine, - &container, - &dirs.xargo, - target, - mount_prefix_path, - msg_info, - ) - .wrap_err("when copying xargo")?; - copy_volume_container_cargo( - engine, - &container, - &dirs.cargo, - mount_prefix_path, - false, - msg_info, - ) - .wrap_err("when copying cargo")?; - copy_volume_container_rust( - engine, - &container, - &dirs.sysroot, - target, - mount_prefix_path, - false, - msg_info, - ) - .wrap_err("when copying rust")?; + data_volume + .copy_xargo(mount_prefix, msg_info) + .wrap_err("when copying xargo")?; + data_volume + .copy_cargo(mount_prefix, false, msg_info) + .wrap_err("when copying cargo")?; + data_volume + .copy_rust(Some(target.target()), mount_prefix, msg_info) + .wrap_err("when copying rust")?; } else { // need to copy over the target triple if it hasn't been previously copied - copy_volume_container_rust_triple( - engine, - &container, - &dirs.sysroot, - target.triple(), - mount_prefix_path, - true, - msg_info, - ) - .wrap_err("when copying rust target files")?; - } - let mount_root = if mount_volumes { - // cannot panic: absolute unix path, must have root - let rel_mount_root = dirs - .mount_root - .strip_prefix('/') - .expect("mount root should be absolute"); - let mount_root = mount_prefix_path.join(rel_mount_root); - if !rel_mount_root.is_empty() { - create_volume_dir( - engine, - &container, - mount_root - .parent() - .expect("mount root should have a parent directory"), + data_volume + .copy_rust_triple(target.target(), mount_prefix, true, msg_info) + .wrap_err("when copying rust target files")?; + } + // cannot panic: absolute unix path, must have root + let rel_mount_root = package_dirs + .mount_root() + .strip_prefix('/') + .expect("mount root should be absolute"); + if !rel_mount_root.is_empty() { + data_volume + .create_dir( + posix_parent(rel_mount_root).expect("mount root should have a parent directory"), + mount_prefix, msg_info, ) .wrap_err("when creating mount root")?; - } - mount_root - } else { - mount_prefix_path.join("project") - }; - copy_volume_container_project( - engine, - &container, - &dirs.host_root, - &mount_root, - &volume, - copy_cache, - msg_info, - ) - .wrap_err("when copying project")?; - + } + copy(package_dirs.host_root(), rel_mount_root, msg_info).wrap_err("when copying project")?; + let sysroot = toolchain_dirs.get_sysroot().to_owned(); let mut copied = vec![ - (&dirs.xargo, mount_prefix_path.join("xargo")), - (&dirs.cargo, mount_prefix_path.join("cargo")), - (&dirs.sysroot, mount_prefix_path.join("rust")), - (&dirs.host_root, mount_root.clone()), + ( + toolchain_dirs.xargo(), + toolchain_dirs.xargo_mount_path_relative()?, + ), + ( + toolchain_dirs.cargo(), + toolchain_dirs.cargo_mount_path_relative()?, + ), + (&sysroot, toolchain_dirs.sysroot_mount_path_relative()?), + (package_dirs.host_root(), rel_mount_root.to_owned()), ]; let mut to_symlink = vec![]; - let target_dir = file::canonicalize(&dirs.target)?; - let target_dir = if let Ok(relpath) = target_dir.strip_prefix(&dirs.host_root) { - mount_root.join(relpath) + let target_dir = file::canonicalize(package_dirs.target())?; + let target_dir = if let Ok(relpath) = target_dir.strip_prefix(package_dirs.host_root()) { + relpath.as_posix_relative()? } else { // outside project, need to copy the target data over // only do if we're copying over cached files. - let target_dir = mount_prefix_path.join("target"); + let target_dir = "target".to_owned(); if copy_cache { - copy(&dirs.target, &target_dir, msg_info)?; + copy(package_dirs.target(), &target_dir, msg_info)?; } else { - create_volume_dir(engine, &container, &target_dir, msg_info)?; + data_volume.create_dir(&target_dir, mount_prefix, msg_info)?; } - copied.push((&dirs.target, target_dir.clone())); + copied.push((package_dirs.target(), target_dir.clone())); target_dir }; for (src, dst) in &volumes { @@ -1073,56 +877,62 @@ pub(crate) fn run( // path has already been copied over let relpath = src .strip_prefix(psrc) - .expect("source should start with prefix"); - to_symlink.push((pdst.join(relpath), dst)); + .expect("source should start with prefix") + .as_posix_relative()?; + to_symlink.push((format!("{pdst}/{relpath}"), dst)); } else { - let rel_dst = dst + let reldst = dst .strip_prefix('/') .expect("destination should be absolute"); - let mount_dst = mount_prefix_path.join(rel_dst); - if !rel_dst.is_empty() { - create_volume_dir( - engine, - &container, - mount_dst - .parent() - .expect("destination should have a parent directory"), + if !reldst.is_empty() { + data_volume.create_dir( + posix_parent(reldst).expect("destination should have a parent directory"), + mount_prefix, msg_info, )?; } - copy(src, &mount_dst, msg_info)?; + copy(src, reldst, msg_info)?; } } - // `clean` doesn't handle symlinks: it will just unlink the target - // directory, so we should just substitute it our target directory - // for it. we'll still have the same end behavior - let mut final_args = vec![]; - let mut iter = args.iter().cloned(); - let mut has_target_dir = false; - let target_dir_string = target_dir.as_posix()?; - while let Some(arg) = iter.next() { - if arg == "--target-dir" { - has_target_dir = true; - final_args.push(arg); - if iter.next().is_some() { - final_args.push(target_dir_string.clone()); - } - } else if arg.starts_with("--target-dir=") { - has_target_dir = true; - if arg.split_once('=').is_some() { - final_args.push(format!("--target-dir={target_dir_string}")); + let mut cmd = options.command_variant.safe_command(); + + if msg_info.should_fail() { + return Ok(None); + } + + if !options.command_variant.is_shell() { + // `clean` doesn't handle symlinks: it will just unlink the target + // directory, so we should just substitute it our target directory + // for it. we'll still have the same end behavior + let mut final_args = vec![]; + let mut iter = args.iter().cloned(); + let mut has_target_dir = false; + while let Some(arg) = iter.next() { + if arg == "--target-dir" { + has_target_dir = true; + final_args.push(arg); + if iter.next().is_some() { + final_args.push(target_dir.clone()); + } + } else if arg.starts_with("--target-dir=") { + has_target_dir = true; + if arg.split_once('=').is_some() { + final_args.push(format!("--target-dir={target_dir}")); + } + } else { + final_args.push(arg); } - } else { - final_args.push(arg); } + if !has_target_dir && subcommand.map_or(true, |s| s.needs_target_in_command()) { + final_args.push("--target-dir".to_owned()); + final_args.push(target_dir.clone()); + } + + cmd.args(final_args); + } else { + cmd.args(args); } - if !has_target_dir { - final_args.push("--target-dir".to_owned()); - final_args.push(target_dir_string); - } - let mut cmd = cargo_safe_command(options.uses_xargo); - cmd.args(final_args); // 5. create symlinks for copied data let mut symlink = vec!["set -e pipefail".to_owned()]; @@ -1156,25 +966,28 @@ symlink_recurse \"${{prefix}}\" " )); for (src, dst) in to_symlink { - symlink.push(format!("ln -s \"{}\" \"{}\"", src.as_posix()?, dst)); + symlink.push(format!("ln -s \"{src}\" \"{dst}\"",)); } subcommand_or_exit(engine, "exec")? - .arg(&container) - .args(&["sh", "-c", &symlink.join("\n")]) + .arg(&container_id) + .args(["sh", "-c", &symlink.join("\n")]) .run_and_get_status(msg_info, false) .wrap_err("when creating symlinks to provide consistent host/mount paths")?; // 6. execute our cargo command inside the container - let mut docker = subcommand(engine, "exec"); - docker_user_id(&mut docker, engine.kind); - docker_envvars(&mut docker, &options.config, target, msg_info)?; - docker_cwd(&mut docker, &paths, mount_volumes)?; - docker.arg(&container); - docker.args(&["sh", "-c", &format!("PATH=$PATH:/rust/bin {:?}", cmd)]); + let mut docker = engine.subcommand("exec"); + docker.add_user_id(engine.is_rootless); + docker.add_envvars(&options, toolchain_dirs, msg_info)?; + docker.add_cwd(&paths)?; + docker.arg(&container_id); + docker.add_build_command(toolchain_dirs, &cmd); + + if options.interactive { + docker.arg("-i"); + } + bail_container_exited!(); - let status = docker - .run_and_get_status(msg_info, false) - .map_err(Into::into); + let status = docker.run_and_get_status(msg_info, false); // 7. copy data from our target dir back to host // this might not exist if we ran `clean`. @@ -1182,13 +995,16 @@ symlink_recurse \"${{prefix}}\" .map(|s| bool_from_envvar(&s)) .unwrap_or_default(); bail_container_exited!(); - if !skip_artifacts && container_path_exists(engine, &container, &target_dir, msg_info)? { + let mount_target_dir = format!("{}/{}", package_dirs.mount_root(), target_dir); + if !skip_artifacts + && data_volume.container_path_exists(&mount_target_dir, mount_prefix, msg_info)? + { subcommand_or_exit(engine, "cp")? .arg("-a") - .arg(&format!("{container}:{}", target_dir.as_posix()?)) + .arg(&format!("{container_id}:{mount_target_dir}",)) .arg( - &dirs - .target + package_dirs + .target() .parent() .expect("target directory should have a parent"), ) @@ -1196,7 +1012,7 @@ symlink_recurse \"${{prefix}}\" .map_err::(Into::into)?; } - drop_container(is_tty, msg_info); + ChildContainer::finish_static(is_tty, msg_info); - status + status.map(Some) } diff --git a/src/docker/shared.rs b/src/docker/shared.rs index be23bb54b..7996e1fc0 100644 --- a/src/docker/shared.rs +++ b/src/docker/shared.rs @@ -1,48 +1,67 @@ use std::io::Write; use std::path::{Path, PathBuf}; -use std::process::Command; -use std::{env, fs}; +use std::process::{Command, ExitStatus, Output}; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::{env, fs, time}; use super::custom::{Dockerfile, PreBuild}; -use super::engine::*; -use crate::cargo::{cargo_metadata_with_args, CargoMetadata}; -use crate::config::{bool_from_envvar, Config}; +use super::image::PossibleImage; +use super::Image; +use super::PROVIDED_IMAGES; +use super::{engine::*, ProvidedImage}; +use crate::cargo::CargoMetadata; +use crate::config::Config; use crate::errors::*; use crate::extensions::{CommandExt, SafeCommand}; use crate::file::{self, write_file, PathExt, ToUtf8}; use crate::id; -use crate::rustc::{self, VersionMetaExt}; -use crate::shell::{MessageInfo, Verbosity}; -use crate::Target; +use crate::rustc::QualifiedToolchain; +use crate::shell::{ColorChoice, MessageInfo, Verbosity}; +use crate::{CommandVariant, OutputExt, Target, TargetTriple}; + +use rustc_version::Version as RustcVersion; pub use super::custom::CROSS_CUSTOM_DOCKERFILE_IMAGE_PREFIX; pub const CROSS_IMAGE: &str = "ghcr.io/cross-rs"; // note: this is the most common base image for our images -pub const UBUNTU_BASE: &str = "ubuntu:16.04"; -const DOCKER_IMAGES: &[&str] = &include!(concat!(env!("OUT_DIR"), "/docker-images.rs")); - -// secured profile based off the docker documentation for denied syscalls: -// https://docs.docker.com/engine/security/seccomp/#significant-syscalls-blocked-by-the-default-profile -// note that we've allow listed `clone` and `clone3`, which is necessary -// to fork the process, and which podman allows by default. -pub(crate) const SECCOMP: &str = include_str!("seccomp.json"); +pub const UBUNTU_BASE: &str = "ubuntu:20.04"; +pub const DEFAULT_IMAGE_VERSION: &str = if crate::commit_info().is_empty() { + env!("CARGO_PKG_VERSION") +} else { + "main" +}; #[derive(Debug)] pub struct DockerOptions { pub engine: Engine, pub target: Target, pub config: Config, - pub uses_xargo: bool, + pub image: Image, + pub command_variant: CommandVariant, + // not all toolchains will provide this + pub rustc_version: Option, + pub interactive: bool, } impl DockerOptions { - pub fn new(engine: Engine, target: Target, config: Config, uses_xargo: bool) -> DockerOptions { + pub fn new( + engine: Engine, + target: Target, + config: Config, + image: Image, + cargo_variant: CommandVariant, + rustc_version: Option, + interactive: bool, + ) -> DockerOptions { DockerOptions { engine, target, config, - uses_xargo, + image, + command_variant: cargo_variant, + rustc_version, + interactive, } } @@ -58,15 +77,8 @@ impl DockerOptions { #[must_use] pub fn needs_custom_image(&self) -> bool { - self.config - .dockerfile(&self.target) - .unwrap_or_default() - .is_some() - || self - .config - .pre_build(&self.target) - .unwrap_or_default() - .is_some() + self.config.dockerfile(&self.target).is_some() + || self.config.pre_build(&self.target).is_some() } pub(crate) fn custom_image_build( @@ -74,30 +86,39 @@ impl DockerOptions { paths: &DockerPaths, msg_info: &mut MessageInfo, ) -> Result { - let mut image = image_name(&self.config, &self.target)?; + let mut image = self.image.clone(); + if self.target.triple() == "arm-unknown-linux-gnueabihf" { + msg_info.note("cannot install armhf system packages via apt for `arm-unknown-linux-gnueabihf`, since they are for ARMv7a targets but this target is ARMv6. installation of all packages for the armhf architecture has been blocked.")?; + } - if let Some(path) = self.config.dockerfile(&self.target)? { - let context = self.config.dockerfile_context(&self.target)?; - let name = self.config.image(&self.target)?; + if let Some(path) = self.config.dockerfile(&self.target) { + let context = self.config.dockerfile_context(&self.target); + + let is_custom_image = self.config.image(&self.target)?.is_some(); let build = Dockerfile::File { path: &path, context: context.as_deref(), - name: name.as_deref(), + name: if is_custom_image { + Some(&image.name) + } else { + None + }, + runs_with: &image.platform, }; - image = build + image.name = build .build( self, paths, self.config - .dockerfile_build_args(&self.target)? + .dockerfile_build_args(&self.target) .unwrap_or_default(), msg_info, ) .wrap_err("when building dockerfile")?; } - let pre_build = self.config.pre_build(&self.target)?; + let pre_build = self.config.pre_build(&self.target); if let Some(pre_build) = pre_build { match pre_build { @@ -119,9 +140,10 @@ impl DockerOptions { RUN chmod +x /pre-build-script RUN ./pre-build-script $CROSS_TARGET"# ), + runs_with: &image.platform, }; - image = custom + image.name = custom .build( self, paths, @@ -149,8 +171,9 @@ impl DockerOptions { ARG CROSS_CMD RUN eval "${{CROSS_CMD}}""# ), + runs_with: &image.platform, }; - image = custom + image.name = custom .build( self, paths, @@ -163,32 +186,7 @@ impl DockerOptions { } } } - Ok(image) - } - - pub(crate) fn image_name(&self) -> Result { - if let Some(image) = self.config.image(&self.target)? { - return Ok(image); - } - - if !DOCKER_IMAGES.contains(&self.target.triple()) { - eyre::bail!( - "`cross` does not provide a Docker image for target {target}, \ - specify a custom image in `Cross.toml`.", - target = self.target - ); - } - - let version = if include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")).is_empty() { - env!("CARGO_PKG_VERSION") - } else { - "main" - }; - - Ok(format!( - "{CROSS_IMAGE}/{target}:{version}", - target = self.target - )) + Ok(image.name.clone()) } } @@ -197,7 +195,6 @@ pub struct DockerPaths { pub mount_finder: MountFinder, pub metadata: CargoMetadata, pub cwd: PathBuf, - pub sysroot: PathBuf, pub directories: Directories, } @@ -206,19 +203,24 @@ impl DockerPaths { engine: &Engine, metadata: CargoMetadata, cwd: PathBuf, - sysroot: PathBuf, + toolchain: QualifiedToolchain, + msg_info: &mut MessageInfo, ) -> Result { - let mount_finder = MountFinder::create(engine)?; - let directories = Directories::create(&mount_finder, &metadata, &cwd, &sysroot)?; + let mount_finder = MountFinder::create(engine, msg_info)?; + let (directories, metadata) = + Directories::assemble(&mount_finder, metadata, &cwd, toolchain)?; Ok(Self { mount_finder, metadata, cwd, - sysroot, directories, }) } + pub fn get_sysroot(&self) -> &Path { + self.directories.toolchain_directories().get_sysroot() + } + pub fn workspace_root(&self) -> &Path { &self.metadata.workspace_root } @@ -239,170 +241,682 @@ impl DockerPaths { } pub fn mount_cwd(&self) -> &str { - &self.directories.mount_cwd + self.directories.package_directories().mount_cwd() } pub fn host_root(&self) -> &Path { - &self.directories.host_root + self.directories.package_directories().host_root() } } #[derive(Debug)] -pub struct Directories { - pub cargo: PathBuf, - pub xargo: PathBuf, - pub target: PathBuf, - pub nix_store: Option, - pub host_root: PathBuf, - // both mount fields are WSL paths on windows: they already are POSIX paths - pub mount_root: String, - pub mount_cwd: String, - pub sysroot: PathBuf, +pub struct ToolchainDirectories { + cargo: PathBuf, + xargo: PathBuf, + nix_store: Option, + toolchain: QualifiedToolchain, + cargo_mount_path: String, + xargo_mount_path: String, + sysroot_mount_path: String, } -impl Directories { - pub fn create( - mount_finder: &MountFinder, - metadata: &CargoMetadata, - cwd: &Path, - sysroot: &Path, - ) -> Result { +impl ToolchainDirectories { + pub fn assemble(mount_finder: &MountFinder, toolchain: QualifiedToolchain) -> Result { let home_dir = home::home_dir().ok_or_else(|| eyre::eyre!("could not find home directory"))?; let cargo = home::cargo_home()?; let xargo = env::var_os("XARGO_HOME").map_or_else(|| home_dir.join(".xargo"), PathBuf::from); - let nix_store = env::var_os("NIX_STORE").map(PathBuf::from); - let target = &metadata.target_directory; + // NIX_STORE_DIR is an override of NIX_STORE, which is the path in derivations. + let nix_store = env::var_os("NIX_STORE_DIR") + .or_else(|| env::var_os("NIX_STORE")) + .map(PathBuf::from); // create the directories we are going to mount before we mount them, // otherwise `docker` will create them but they will be owned by `root` // cargo builds all intermediate directories, but fails // if it has other issues (such as permission errors). - fs::create_dir_all(&cargo)?; - fs::create_dir_all(&xargo)?; - create_target_dir(target)?; + file::create_dir_all(&cargo)?; + file::create_dir_all(&xargo)?; + if let Some(ref nix_store) = nix_store { + file::create_dir_all(nix_store)?; + } + + // get our mount paths prior to canonicalizing them + let cargo_mount_path = cargo.as_posix_absolute()?; + let xargo_mount_path = xargo.as_posix_absolute()?; + + // now that we know the paths exist, canonicalize them. this avoids creating + // directories after failed canonicalization into a shared directory. + let cargo = file::canonicalize(&cargo)?; + let xargo = file::canonicalize(&xargo)?; + + let default_nix_store = PathBuf::from("/nix/store"); + let nix_store = match nix_store { + Some(store) if store.exists() => { + let path = file::canonicalize(store)?; + Some(path) + } + Some(store) => { + eyre::bail!("unable to find provided nix-store directory {store:?}"); + } + None if cfg!(target_os = "linux") && default_nix_store.exists() => { + Some(default_nix_store) + } + None => None, + }; let cargo = mount_finder.find_mount_path(cargo); let xargo = mount_finder.find_mount_path(xargo); - let target = mount_finder.find_mount_path(target); + + // canonicalize these once to avoid syscalls + let sysroot_mount_path = toolchain.get_sysroot().as_posix_absolute()?; + + Ok(ToolchainDirectories { + cargo, + xargo, + nix_store, + toolchain, + cargo_mount_path, + xargo_mount_path, + sysroot_mount_path, + }) + } + + pub fn unique_toolchain_identifier(&self) -> Result { + self.toolchain.unique_toolchain_identifier() + } + + pub fn unique_container_identifier(&self, triple: &TargetTriple) -> Result { + self.toolchain.unique_container_identifier(triple) + } + + pub fn toolchain(&self) -> &QualifiedToolchain { + &self.toolchain + } + + pub fn get_sysroot(&self) -> &Path { + self.toolchain.get_sysroot() + } + + pub fn host_target(&self) -> &TargetTriple { + &self.toolchain.host().target + } + + pub fn cargo(&self) -> &Path { + &self.cargo + } + + pub fn cargo_host_path(&self) -> Result<&str> { + self.cargo.to_utf8() + } + + pub fn cargo_mount_path(&self) -> &str { + &self.cargo_mount_path + } + + pub fn xargo(&self) -> &Path { + &self.xargo + } + + pub fn xargo_host_path(&self) -> Result<&str> { + self.xargo.to_utf8() + } + + pub fn xargo_mount_path(&self) -> &str { + &self.xargo_mount_path + } + + pub fn sysroot_mount_path(&self) -> &str { + &self.sysroot_mount_path + } + + pub fn nix_store(&self) -> Option<&Path> { + self.nix_store.as_deref() + } + + pub fn cargo_mount_path_relative(&self) -> Result { + self.cargo_mount_path() + .strip_prefix('/') + .map(ToOwned::to_owned) + .ok_or_else(|| eyre::eyre!("cargo directory must be relative to root")) + } + + pub fn xargo_mount_path_relative(&self) -> Result { + self.xargo_mount_path() + .strip_prefix('/') + .map(ToOwned::to_owned) + .ok_or_else(|| eyre::eyre!("xargo directory must be relative to root")) + } + + pub fn sysroot_mount_path_relative(&self) -> Result { + self.sysroot_mount_path() + .strip_prefix('/') + .map(ToOwned::to_owned) + .ok_or_else(|| eyre::eyre!("sysroot directory must be relative to root")) + } +} + +#[derive(Debug)] +pub struct PackageDirectories { + target: PathBuf, + host_root: PathBuf, + // both mount fields are WSL paths on windows: they already are POSIX paths + mount_root: String, + mount_cwd: String, +} + +impl PackageDirectories { + pub fn assemble( + mount_finder: &MountFinder, + metadata: CargoMetadata, + cwd: &Path, + ) -> Result<(Self, CargoMetadata)> { + let target = &metadata.target_directory; + // see ToolchainDirectories::assemble for creating directories + create_target_dir(target)?; // root is either workspace_root, or, if we're outside the workspace root, the current directory - let host_root = mount_finder.find_mount_path(if metadata.workspace_root.starts_with(cwd) { + let host_root = if metadata.workspace_root.starts_with(cwd) { cwd } else { &metadata.workspace_root - }); + } + .to_path_buf(); + + // on Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. + // NOTE: on unix, host root has already found the mount path + let mount_root = host_root.as_posix_absolute()?; + let mount_cwd = cwd.as_posix_absolute()?; + + Ok(( + PackageDirectories { + target: mount_finder.find_mount_path(target), + host_root, + mount_root, + mount_cwd, + }, + metadata, + )) + } - // root is either workspace_root, or, if we're outside the workspace root, the current directory - let mount_root: String; - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - mount_root = host_root.as_wslpath()?; + pub fn target(&self) -> &Path { + &self.target + } + + pub fn host_root(&self) -> &Path { + &self.host_root + } + + pub fn mount_root(&self) -> &str { + &self.mount_root + } + + pub fn mount_cwd(&self) -> &str { + &self.mount_cwd + } +} + +#[derive(Debug)] +pub struct Directories { + toolchain: ToolchainDirectories, + package: PackageDirectories, +} + +impl Directories { + pub fn assemble( + mount_finder: &MountFinder, + metadata: CargoMetadata, + cwd: &Path, + toolchain: QualifiedToolchain, + ) -> Result<(Self, CargoMetadata)> { + let (package, metadata) = PackageDirectories::assemble(mount_finder, metadata, cwd)?; + let toolchain = ToolchainDirectories::assemble(mount_finder, toolchain)?; + + Ok((Directories { toolchain, package }, metadata)) + } + + pub fn toolchain_directories(&self) -> &ToolchainDirectories { + &self.toolchain + } + + pub fn package_directories(&self) -> &PackageDirectories { + &self.package + } +} + +#[derive(Debug, PartialEq, Eq)] +pub enum ContainerState { + Created, + Running, + Paused, + Restarting, + Dead, + Exited, + DoesNotExist, +} + +impl ContainerState { + pub fn new(state: &str) -> Result { + match state { + "created" => Ok(ContainerState::Created), + "running" => Ok(ContainerState::Running), + "paused" => Ok(ContainerState::Paused), + "restarting" => Ok(ContainerState::Restarting), + "dead" => Ok(ContainerState::Dead), + "exited" => Ok(ContainerState::Exited), + "" => Ok(ContainerState::DoesNotExist), + _ => eyre::bail!("unknown container state: got {state}"), } - #[cfg(not(target_os = "windows"))] - { - // NOTE: host root has already found the mount path - mount_root = host_root.to_utf8()?.to_owned(); + } + + #[must_use] + pub fn is_stopped(&self) -> bool { + matches!(self, Self::Exited | Self::DoesNotExist) + } + + #[must_use] + pub fn exists(&self) -> bool { + !matches!(self, Self::DoesNotExist) + } +} + +// the mount directory for the data volume. +pub const MOUNT_PREFIX: &str = "/cross"; +// the prefix used when naming volumes +pub const VOLUME_PREFIX: &str = "cross-"; +// default timeout to stop a container (in seconds) +pub const DEFAULT_TIMEOUT: u32 = 2; +// instant kill in case of a non-graceful exit +pub const NO_TIMEOUT: u32 = 0; + +pub(crate) static mut CHILD_CONTAINER: ChildContainer = ChildContainer::new(); + +// the lack of [MessageInfo] is because it'd require a mutable reference, +// since we don't need the functionality behind the [MessageInfo], we can just store the basic +// MessageInfo configurations. +pub(crate) struct ChildContainerInfo { + engine: Engine, + name: String, + timeout: u32, + color_choice: ColorChoice, + verbosity: Verbosity, +} + +// we need to specify drops for the containers, but we +// also need to ensure the drops are called on a +// termination handler. we use an atomic bool to ensure +// that the drop only gets called once, even if we have +// the signal handle invoked multiple times or it fails. +#[allow(missing_debug_implementations)] +pub struct ChildContainer { + info: Option, + exists: AtomicBool, +} + +impl ChildContainer { + pub const fn new() -> ChildContainer { + ChildContainer { + info: None, + exists: AtomicBool::new(false), } - let mount_cwd = mount_finder.find_path(cwd, false)?; - let sysroot = mount_finder.find_mount_path(sysroot); + } - Ok(Directories { - cargo, - xargo, - target, - nix_store, - host_root, - mount_root, - mount_cwd, - sysroot, - }) + pub fn create(engine: Engine, name: String) -> Result<()> { + // SAFETY: guarded by an atomic swap + unsafe { + if !CHILD_CONTAINER.exists.swap(true, Ordering::SeqCst) { + CHILD_CONTAINER.info = Some(ChildContainerInfo { + engine, + name, + timeout: NO_TIMEOUT, + color_choice: ColorChoice::Never, + verbosity: Verbosity::Quiet, + }); + Ok(()) + } else { + eyre::bail!("attempted to create already existing container."); + } + } + } + + // the static functions have been placed by the internal functions to + // verify the internal functions are wrapped in atomic load/stores. + + pub fn exists(&self) -> bool { + self.exists.load(Ordering::SeqCst) + } + + pub fn exists_static() -> bool { + // SAFETY: an atomic load. + unsafe { CHILD_CONTAINER.exists() } + } + + // when the `docker run` command finished. + // the container has already exited, so no cleanup required. + pub fn exit(&mut self) { + self.exists.store(false, Ordering::SeqCst); + } + + pub fn exit_static() { + // SAFETY: an atomic store. + unsafe { + CHILD_CONTAINER.exit(); + } + } + + // when the `docker exec` command finished. + pub fn finish(&mut self, is_tty: bool, msg_info: &mut MessageInfo) { + // relax the no-timeout and lack of output + // ensure we have atomic ordering + if self.exists() { + let info = self + .info + .as_mut() + .expect("since we're loaded and exist, child should not be terminated"); + if is_tty { + info.timeout = DEFAULT_TIMEOUT; + } + info.color_choice = msg_info.color_choice; + info.verbosity = msg_info.verbosity; + } + + self.terminate(); + } + + pub fn finish_static(is_tty: bool, msg_info: &mut MessageInfo) { + // SAFETY: internally guarded by an atomic load. + unsafe { + CHILD_CONTAINER.finish(is_tty, msg_info); + } + } + + // terminate the container early. leaves the struct in a valid + // state, so it's async safe, but so the container will not + // be stopped again. + pub fn terminate(&mut self) { + if self.exists.swap(false, Ordering::SeqCst) { + let info = self.info.as_mut().expect( + "since we're loaded and exist, child should not have been terminated already", + ); + let mut msg_info = MessageInfo::new(info.color_choice, info.verbosity); + let container = DockerContainer::new(&info.engine, &info.name); + container.stop(info.timeout, &mut msg_info).ok(); + container.remove(&mut msg_info).ok(); + + self.info = None; + } + } +} + +impl Drop for ChildContainer { + fn drop(&mut self) { + self.terminate(); + } +} + +#[derive(Debug)] +pub struct ContainerDataVolume<'a, 'b, 'c> { + pub(crate) engine: &'a Engine, + pub(crate) container: &'b str, + pub(crate) toolchain_dirs: &'c ToolchainDirectories, +} + +impl<'a, 'b, 'c> ContainerDataVolume<'a, 'b, 'c> { + pub const fn new( + engine: &'a Engine, + container: &'b str, + toolchain_dirs: &'c ToolchainDirectories, + ) -> Self { + Self { + engine, + container, + toolchain_dirs, + } + } +} + +#[derive(Debug, Clone)] +pub enum VolumeId { + Keep(String), + Discard, +} + +impl VolumeId { + pub fn mount(&self, mount_prefix: &str) -> String { + match self { + VolumeId::Keep(ref id) => format!("{id}:{mount_prefix}"), + VolumeId::Discard => mount_prefix.to_owned(), + } + } +} + +#[derive(Debug)] +pub struct DockerVolume<'a, 'b> { + pub(crate) engine: &'a Engine, + pub(crate) name: &'b str, +} + +impl<'a, 'b> DockerVolume<'a, 'b> { + pub const fn new(engine: &'a Engine, name: &'b str) -> Self { + Self { engine, name } + } + + #[track_caller] + pub fn create(&self, msg_info: &mut MessageInfo) -> Result { + self.engine + .run_and_get_status(&["volume", "create", self.name], msg_info) + } + + #[track_caller] + pub fn remove(&self, msg_info: &mut MessageInfo) -> Result { + self.engine + .run_and_get_status(&["volume", "rm", self.name], msg_info) + } + + #[track_caller] + pub fn exists(&self, msg_info: &mut MessageInfo) -> Result { + self.engine + .run_and_get_output(&["volume", "inspect", self.name], msg_info) + .map(|output| output.status.success()) + } + + #[track_caller] + pub fn existing( + engine: &Engine, + toolchain: &QualifiedToolchain, + msg_info: &mut MessageInfo, + ) -> Result> { + let list = engine + .run_and_get_output( + &[ + "volume", + "list", + "--format", + "{{.Name}}", + "--filter", + &format!("name=^{VOLUME_PREFIX}{}", toolchain), + ], + msg_info, + )? + .stdout()?; + + if list.is_empty() { + Ok(vec![]) + } else { + Ok(list.split('\n').map(ToOwned::to_owned).collect()) + } + } +} + +#[derive(Debug)] +pub struct DockerContainer<'a, 'b> { + pub(crate) engine: &'a Engine, + pub(crate) name: &'b str, +} + +impl<'a, 'b> DockerContainer<'a, 'b> { + pub const fn new(engine: &'a Engine, name: &'b str) -> Self { + Self { engine, name } + } + + pub fn stop(&self, timeout: u32, msg_info: &mut MessageInfo) -> Result { + self.engine.run_and_get_status( + &["stop", self.name, "--time", &timeout.to_string()], + msg_info, + ) + } + + pub fn stop_default(&self, msg_info: &mut MessageInfo) -> Result { + // we want a faster timeout, since this might happen in signal + // handler. our containers normally clean up pretty fast, it's + // only without a pseudo-tty that they don't. + self.stop(DEFAULT_TIMEOUT, msg_info) + } + + /// if stopping a container succeeds without a timeout, this command + /// can fail because the container no longer exists. however, if + /// the container was killed, we need to cleanup the exited container. + /// just silence any warnings. + pub fn remove(&self, msg_info: &mut MessageInfo) -> Result { + self.engine + .run_and_get_output(&["rm", self.name], msg_info) + .map(|output| output.status) + } + + pub fn state(&self, msg_info: &mut MessageInfo) -> Result { + let stdout = self + .engine + .command() + .args(["ps", "-a"]) + .args(["--filter", &format!("name={}", self.name)]) + .args(["--format", "{{.State}}"]) + .run_and_get_stdout(msg_info)?; + ContainerState::new(stdout.trim()) } } +pub(crate) fn time_to_millis(timestamp: &time::SystemTime) -> Result { + Ok(timestamp + .duration_since(time::SystemTime::UNIX_EPOCH)? + .as_millis() as u64) +} + +pub(crate) fn time_from_millis(millis: u64) -> time::SystemTime { + time::SystemTime::UNIX_EPOCH + time::Duration::from_millis(millis) +} + +pub(crate) fn now_as_millis() -> Result { + time_to_millis(&time::SystemTime::now()) +} + const CACHEDIR_TAG: &str = "Signature: 8a477f597d28d172789f06886806bc55 # This file is a cache directory tag created by cross. # For information about cache directory tags see https://bford.info/cachedir/"; -fn create_target_dir(path: &Path) -> Result<()> { +pub fn create_target_dir(path: &Path) -> Result<()> { // cargo creates all paths to the target directory, and writes // a cache dir tag only if the path doesn't previously exist. if !path.exists() { - fs::create_dir_all(&path)?; + file::create_dir_all(path)?; fs::OpenOptions::new() .write(true) .create_new(true) - .open(&path.join("CACHEDIR.TAG"))? + .open(path.join("CACHEDIR.TAG"))? .write_all(CACHEDIR_TAG.as_bytes())?; } Ok(()) } -pub fn command(engine: &Engine) -> Command { - let mut command = Command::new(&engine.path); - if engine.needs_remote() { - // if we're using podman and not podman-remote, need `--remote`. - command.arg("--remote"); +impl Engine { + pub fn command(&self) -> Command { + let mut command = Command::new(&self.path); + if self.needs_remote() { + // if we're using podman and not podman-remote, need `--remote`. + command.arg("--remote"); + } + command } - command -} -pub fn subcommand(engine: &Engine, cmd: &str) -> Command { - let mut command = command(engine); - command.arg(cmd); - command -} + pub fn subcommand(&self, cmd: &str) -> Command { + let mut command = self.command(); + command.arg(cmd); + command + } -pub fn get_package_info( - engine: &Engine, - target: &str, - channel: Option<&str>, - msg_info: &mut MessageInfo, -) -> Result<(Target, CargoMetadata, Directories)> { - let target_list = msg_info.as_quiet(rustc::target_list)?; - let target = Target::from(target, &target_list); - let metadata = cargo_metadata_with_args(None, None, msg_info)? - .ok_or(eyre::eyre!("unable to get project metadata"))?; - let cwd = std::env::current_dir()?; - let host_meta = rustc::version_meta()?; - let host = host_meta.host(); - - let sysroot = rustc::get_sysroot(&host, &target, channel, msg_info)?.1; - let mount_finder = MountFinder::create(engine)?; - let dirs = Directories::create(&mount_finder, &metadata, &cwd, &sysroot)?; - - Ok((target, metadata, dirs)) -} + #[track_caller] + pub(crate) fn run_and_get_status( + &self, + args: &[&str], + msg_info: &mut MessageInfo, + ) -> Result { + self.command().args(args).run_and_get_status(msg_info, true) + } -/// Register binfmt interpreters -pub(crate) fn register(engine: &Engine, target: &Target, msg_info: &mut MessageInfo) -> Result<()> { - let cmd = if target.is_windows() { - // https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html - "mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc && \ - echo ':wine:M::MZ::/usr/bin/run-detectors:' > /proc/sys/fs/binfmt_misc/register" - } else { - "apt-get update && apt-get install --no-install-recommends --assume-yes \ - binfmt-support qemu-user-static" - }; + #[track_caller] + pub(crate) fn run_and_get_output( + &self, + args: &[&str], + msg_info: &mut MessageInfo, + ) -> Result { + self.command().args(args).run_and_get_output(msg_info) + } + + pub fn parse_opts(value: &str) -> Result> { + shell_words::split(value) + .wrap_err_with(|| format!("could not parse docker opts of {}", value)) + } - let mut docker = subcommand(engine, "run"); - docker_userns(&mut docker); - docker.arg("--privileged"); - docker.arg("--rm"); - docker.arg(UBUNTU_BASE); - docker.args(&["sh", "-c", cmd]); + /// Register binfmt interpreters + pub(crate) fn register_binfmt( + &self, + target: &Target, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let cmd = if target.is_windows() { + // https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html + "mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc && \ + echo ':wine:M::MZ::/usr/bin/run-detectors:' > /proc/sys/fs/binfmt_misc/register" + } else { + "apt-get update && apt-get install --no-install-recommends --assume-yes \ + binfmt-support qemu-user-static" + }; - docker.run(msg_info, false).map_err(Into::into) + let mut docker = self.subcommand("run"); + docker.add_userns(); + docker.arg("--privileged"); + docker.arg("--rm"); + docker.arg(UBUNTU_BASE); + docker.args(["sh", "-c", cmd]); + + docker.run(msg_info, false) + } } -fn validate_env_var(var: &str) -> Result<(&str, Option<&str>)> { +fn validate_env_var<'a>( + var: &'a str, + warned: &mut bool, + var_type: &'static str, + var_syntax: &'static str, + msg_info: &mut MessageInfo, +) -> Result<(&'a str, Option<&'a str>)> { let (key, value) = match var.split_once('=') { Some((key, value)) => (key, Some(value)), _ => (var, None), }; + if value.is_none() + && !*warned + && !var + .chars() + .all(|c| matches!(c, 'a'..='z' | 'A'..='Z' | '_' | '0'..='9')) + { + msg_info.warn(format_args!( + "got {var_type} of \"{var}\" which is not a valid environment variable name. the proper syntax is {var_syntax}" + ))?; + *warned = true; + } + if key == "CROSS_RUNNER" { eyre::bail!( "CROSS_RUNNER environment variable name is reserved and cannot be pass through" @@ -412,184 +926,287 @@ fn validate_env_var(var: &str) -> Result<(&str, Option<&str>)> { Ok((key, value)) } -pub fn parse_docker_opts(value: &str) -> Result> { - shell_words::split(value).wrap_err_with(|| format!("could not parse docker opts of {}", value)) +impl CommandVariant { + pub(crate) fn safe_command(&self) -> SafeCommand { + SafeCommand::new(self.to_str()) + } } -pub(crate) fn cargo_safe_command(uses_xargo: bool) -> SafeCommand { - if uses_xargo { - SafeCommand::new("xargo") - } else { - SafeCommand::new("cargo") - } +pub(crate) trait DockerCommandExt { + fn add_configuration_envvars(&mut self); + fn add_envvars( + &mut self, + options: &DockerOptions, + dirs: &ToolchainDirectories, + msg_info: &mut MessageInfo, + ) -> Result<()>; + fn add_cwd(&mut self, paths: &DockerPaths) -> Result<()>; + fn add_build_command(&mut self, dirs: &ToolchainDirectories, cmd: &SafeCommand) -> &mut Self; + fn add_user_id(&mut self, is_rootless: bool); + fn add_userns(&mut self); + fn add_seccomp( + &mut self, + engine_type: EngineType, + target: &Target, + metadata: &CargoMetadata, + ) -> Result<()>; + fn add_mounts( + &mut self, + options: &DockerOptions, + paths: &DockerPaths, + mount_cb: impl Fn(&mut Command, &Path, &Path) -> Result<()>, + store_cb: impl FnMut((String, String)), + msg_info: &mut MessageInfo, + ) -> Result<()>; } -fn add_cargo_configuration_envvars(docker: &mut Command) { - let non_cargo_prefix = &[ - "http_proxy", - "TERM", - "RUSTDOCFLAGS", - "RUSTFLAGS", - "BROWSER", - "HTTPS_PROXY", - "HTTP_TIMEOUT", - "https_proxy", - ]; - let cargo_prefix_skip = &[ - "CARGO_HOME", - "CARGO_TARGET_DIR", - "CARGO_BUILD_TARGET_DIR", - "CARGO_BUILD_RUSTC", - "CARGO_BUILD_RUSTC_WRAPPER", - "CARGO_BUILD_RUSTC_WORKSPACE_WRAPPER", - "CARGO_BUILD_RUSTDOC", - ]; - let is_cargo_passthrough = |key: &str| -> bool { - non_cargo_prefix.contains(&key) - || key.starts_with("CARGO_") && !cargo_prefix_skip.contains(&key) - }; +impl DockerCommandExt for Command { + fn add_configuration_envvars(&mut self) { + let other = &[ + "http_proxy", + "TERM", + "RUSTDOCFLAGS", + "RUSTFLAGS", + "BROWSER", + "HTTPS_PROXY", + "HTTP_TIMEOUT", + "https_proxy", + "QEMU_STRACE", + ]; + let cargo_prefix_skip = &[ + "CARGO_HOME", + "CARGO_TARGET_DIR", + "CARGO_BUILD_TARGET_DIR", + "CARGO_BUILD_RUSTC", + "CARGO_BUILD_RUSTC_WRAPPER", + "CARGO_BUILD_RUSTC_WORKSPACE_WRAPPER", + "CARGO_BUILD_RUSTDOC", + ]; + let cross_prefix_skip = &[ + "CROSS_RUNNER", + "CROSS_RUSTC_MAJOR_VERSION", + "CROSS_RUSTC_MINOR_VERSION", + "CROSS_RUSTC_PATCH_VERSION", + ]; + let is_passthrough = |key: &str| -> bool { + other.contains(&key) + || key.starts_with("CARGO_") && !cargo_prefix_skip.contains(&key) + || key.starts_with("CROSS_") && !cross_prefix_skip.contains(&key) + }; - // also need to accept any additional flags used to configure - // cargo, but only pass what's actually present. - for (key, _) in env::vars() { - if is_cargo_passthrough(&key) { - docker.args(&["-e", &key]); + // also need to accept any additional flags used to configure + // cargo or cross, but only pass what's actually present. + for (key, _) in env::vars() { + if is_passthrough(&key) { + self.args(["-e", &key]); + } } } -} -// NOTE: host path must be canonical -pub(crate) fn mount(docker: &mut Command, host_path: &Path, prefix: &str) -> Result { - let mount_path = canonicalize_mount_path(host_path)?; - docker.args(&[ - "-v", - &format!("{}:{prefix}{}", host_path.to_utf8()?, mount_path), - ]); - Ok(mount_path) -} + fn add_envvars( + &mut self, + options: &DockerOptions, + dirs: &ToolchainDirectories, + msg_info: &mut MessageInfo, + ) -> Result<()> { + let mut warned = false; + for ref var in options + .config + .env_passthrough(&options.target) + .unwrap_or_default() + { + validate_env_var( + var, + &mut warned, + "environment variable", + "`passthrough = [\"ENVVAR=value\"]`", + msg_info, + )?; -pub(crate) fn docker_envvars( - docker: &mut Command, - config: &Config, - target: &Target, - msg_info: &mut MessageInfo, -) -> Result<()> { - for ref var in config.env_passthrough(target)?.unwrap_or_default() { - validate_env_var(var)?; + // Only specifying the environment variable name in the "-e" + // flag forwards the value from the parent shell + self.args(["-e", var]); + } - // Only specifying the environment variable name in the "-e" - // flag forwards the value from the parent shell - docker.args(&["-e", var]); - } + let runner = options.config.runner(&options.target); + let cross_runner = format!("CROSS_RUNNER={}", runner.unwrap_or_default()); + self.args(["-e", &format!("XARGO_HOME={}", dirs.xargo_mount_path())]) + .args(["-e", &format!("CARGO_HOME={}", dirs.cargo_mount_path())]) + .args([ + "-e", + &format!("CROSS_RUST_SYSROOT={}", dirs.sysroot_mount_path()), + ]) + .args(["-e", "CARGO_TARGET_DIR=/target"]) + .args(["-e", &cross_runner]); + if options.command_variant.uses_zig() { + // otherwise, zig has a permission error trying to create the cache + self.args(["-e", "XDG_CACHE_HOME=/target/.zig-cache"]); + } + self.add_configuration_envvars(); - let runner = config.runner(target)?; - let cross_runner = format!("CROSS_RUNNER={}", runner.unwrap_or_default()); - docker - .args(&["-e", "PKG_CONFIG_ALLOW_CROSS=1"]) - .args(&["-e", "XARGO_HOME=/xargo"]) - .args(&["-e", "CARGO_HOME=/cargo"]) - .args(&["-e", "CARGO_TARGET_DIR=/target"]) - .args(&["-e", &cross_runner]); - add_cargo_configuration_envvars(docker); + if let Some(username) = id::username().wrap_err("could not get username")? { + self.args(["-e", &format!("USER={username}")]); + } - if let Some(username) = id::username().wrap_err("could not get username")? { - docker.args(&["-e", &format!("USER={username}")]); - } + if let Ok(value) = env::var("CROSS_CONTAINER_OPTS") { + if env::var("DOCKER_OPTS").is_ok() { + msg_info.warn("using both `CROSS_CONTAINER_OPTS` and `DOCKER_OPTS`.")?; + } + self.args(&Engine::parse_opts(&value)?); + } else if let Ok(value) = env::var("DOCKER_OPTS") { + // FIXME: remove this when we deprecate DOCKER_OPTS. + self.args(&Engine::parse_opts(&value)?); + }; - if let Ok(value) = env::var("QEMU_STRACE") { - docker.args(&["-e", &format!("QEMU_STRACE={value}")]); - } + let (major, minor, patch) = match options.rustc_version.as_ref() { + Some(version) => (version.major, version.minor, version.patch), + // no toolchain version available, always provide the oldest + // compiler available. this isn't a major issue because + // linking with libgcc will not include symbols found in + // the builtins. + None => (1, 0, 0), + }; + self.args(["-e", &format!("CROSS_RUSTC_MAJOR_VERSION={}", major)]); + self.args(["-e", &format!("CROSS_RUSTC_MINOR_VERSION={}", minor)]); + self.args(["-e", &format!("CROSS_RUSTC_PATCH_VERSION={}", patch)]); - if let Ok(value) = env::var("CROSS_DEBUG") { - docker.args(&["-e", &format!("CROSS_DEBUG={value}")]); + Ok(()) } - if let Ok(value) = env::var("CROSS_CONTAINER_OPTS") { - if env::var("DOCKER_OPTS").is_ok() { - msg_info.warn("using both `CROSS_CONTAINER_OPTS` and `DOCKER_OPTS`.")?; - } - docker.args(&parse_docker_opts(&value)?); - } else if let Ok(value) = env::var("DOCKER_OPTS") { - // FIXME: remove this when we deprecate DOCKER_OPTS. - docker.args(&parse_docker_opts(&value)?); - }; + fn add_cwd(&mut self, paths: &DockerPaths) -> Result<()> { + self.args(["-w", paths.mount_cwd()]); - Ok(()) -} + Ok(()) + } -pub(crate) fn docker_cwd( - docker: &mut Command, - paths: &DockerPaths, - mount_volumes: bool, -) -> Result<()> { - if mount_volumes { - docker.args(&["-w", paths.mount_cwd()]); - } else if paths.mount_cwd() == paths.workspace_root().to_utf8()? { - docker.args(&["-w", "/project"]); - } else { - // We do this to avoid clashes with path separators. Windows uses `\` as a path separator on Path::join - let working_dir = Path::new("/project").join(paths.workspace_from_cwd()?); - docker.args(&["-w", &working_dir.as_posix()?]); + fn add_build_command(&mut self, dirs: &ToolchainDirectories, cmd: &SafeCommand) -> &mut Self { + let build_command = format!( + "PATH=\"$PATH\":\"{}/bin\" {:?}", + dirs.sysroot_mount_path(), + cmd + ); + self.args(["sh", "-c", &build_command]) } - Ok(()) -} + fn add_user_id(&mut self, is_rootless: bool) { + // by default, docker runs as root so we need to specify the user + // so the resulting file permissions are for the current user. + // since we can have rootless docker, we provide an override. + if !is_rootless { + self.args(["--user", &format!("{}:{}", user_id(), group_id(),)]); + } + } -pub(crate) fn docker_mount( - docker: &mut Command, - options: &DockerOptions, - paths: &DockerPaths, - mount_cb: impl Fn(&mut Command, &Path) -> Result, - mut store_cb: impl FnMut((String, String)), -) -> Result { - let mut mount_volumes = false; - // FIXME(emilgardis 2022-04-07): This is a fallback so that if it's hard for us to do mounting logic, make it simple(r) - // Preferably we would not have to do this. - if !paths.in_workspace() { - mount_volumes = true; - } - - for ref var in options - .config - .env_volumes(&options.target)? - .unwrap_or_default() - { - let (var, value) = validate_env_var(var)?; - let value = match value { - Some(v) => Ok(v.to_owned()), - None => env::var(var), + fn add_userns(&mut self) { + let userns = match env::var("CROSS_CONTAINER_USER_NAMESPACE").ok().as_deref() { + Some("none") => None, + None | Some("auto") => Some("host".to_owned()), + Some(ns) => Some(ns.to_owned()), }; - - if let Ok(val) = value { - let canonical_val = file::canonicalize(&val)?; - let host_path = paths.mount_finder.find_path(&canonical_val, true)?; - let mount_path = mount_cb(docker, host_path.as_ref())?; - docker.args(&["-e", &format!("{}={}", host_path, mount_path)]); - store_cb((val, mount_path)); - mount_volumes = true; + if let Some(ns) = userns { + self.args(["--userns", &ns]); } } - for path in paths.workspace_dependencies() { - let canonical_path = file::canonicalize(path)?; - let host_path = paths.mount_finder.find_path(&canonical_path, true)?; - let mount_path = mount_cb(docker, host_path.as_ref())?; - store_cb((path.to_utf8()?.to_owned(), mount_path)); - mount_volumes = true; - } + #[allow(unused_mut, clippy::let_and_return)] + fn add_seccomp( + &mut self, + engine_type: EngineType, + target: &Target, + metadata: &CargoMetadata, + ) -> Result<()> { + // secured profile based off the docker documentation for denied syscalls: + // https://docs.docker.com/engine/security/seccomp/#significant-syscalls-blocked-by-the-default-profile + // note that we've allow listed `clone` and `clone3`, which is necessary + // to fork the process, and which podman allows by default. + const SECCOMP: &str = include_str!("seccomp.json"); + + // docker uses seccomp now on all installations + if target.needs_docker_seccomp() { + let seccomp = if engine_type.is_docker() && cfg!(target_os = "windows") { + // docker on windows fails due to a bug in reading the profile + // https://github.com/docker/for-win/issues/12760 + "unconfined".to_owned() + } else { + #[allow(unused_mut)] // target_os = "windows" + let mut path = metadata + .target_directory + .join(target.triple()) + .join("seccomp.json"); + if !path.exists() { + write_file(&path, false)?.write_all(SECCOMP.as_bytes())?; + } + let mut path_string = path.to_utf8()?.to_owned(); + #[cfg(target_os = "windows")] + if matches!(engine_type, EngineType::Podman | EngineType::PodmanRemote) { + // podman weirdly expects a WSL path here, and fails otherwise + path_string = path.as_posix_absolute()?; + } + path_string + }; - Ok(mount_volumes) -} + self.args(["--security-opt", &format!("seccomp={}", seccomp)]); + } -pub(crate) fn canonicalize_mount_path(path: &Path) -> Result { - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - path.as_wslpath() + Ok(()) } - #[cfg(not(target_os = "windows"))] - { - path.to_utf8().map(|p| p.to_owned()) + + fn add_mounts( + &mut self, + options: &DockerOptions, + paths: &DockerPaths, + mount_cb: impl Fn(&mut Command, &Path, &Path) -> Result<()>, + mut store_cb: impl FnMut((String, String)), + msg_info: &mut MessageInfo, + ) -> Result<()> { + let mut warned = false; + for ref var in options + .config + .env_volumes(&options.target) + .unwrap_or_default() + { + let (var, value) = validate_env_var( + var, + &mut warned, + "volume", + "`volumes = [\"ENVVAR=/path/to/directory\"]`", + msg_info, + )?; + let value = match value { + Some(v) => Ok(v.to_owned()), + None => env::var(var), + }; + + // NOTE: we use canonical paths on the host, since it's unambiguous. + // however, for the mounted paths, we use the same path as was + // provided. this avoids canonicalizing symlinks which then causes + // the mounted path to differ from the path expected on the host. + // for example, if `/tmp` is a symlink to `/private/tmp`, canonicalizing + // it would lead to us mounting `/tmp/process` to `/private/tmp/process`, + // which would cause any code relying on `/tmp/process` to break. + + if let Ok(val) = value { + let canonical_path = file::canonicalize(&val)?; + let host_path = paths.mount_finder.find_path(&canonical_path, true)?; + let mount_path = Path::new(&val).as_posix_absolute()?; + mount_cb(self, host_path.as_ref(), mount_path.as_ref())?; + self.args(["-e", &format!("{}={}", var, mount_path)]); + store_cb((val, mount_path)); + } + } + + for path in paths.workspace_dependencies() { + // NOTE: we use canonical paths here since cargo metadata + // always canonicalizes paths, so these should be relative + // to the mounted project directory. + let canonical_path = file::canonicalize(path)?; + let host_path = paths.mount_finder.find_path(&canonical_path, true)?; + let mount_path = path.as_posix_absolute()?; + mount_cb(self, host_path.as_ref(), mount_path.as_ref())?; + store_cb((path.to_utf8()?.to_owned(), mount_path)); + } + + Ok(()) } } @@ -601,101 +1218,207 @@ pub(crate) fn group_id() -> String { env::var("CROSS_CONTAINER_GID").unwrap_or_else(|_| id::group().to_string()) } -pub(crate) fn docker_user_id(docker: &mut Command, engine_type: EngineType) { - // by default, docker runs as root so we need to specify the user - // so the resulting file permissions are for the current user. - // since we can have rootless docker, we provide an override. - let is_rootless = env::var("CROSS_ROOTLESS_CONTAINER_ENGINE") - .ok() - .and_then(|s| match s.as_ref() { - "auto" => None, - b => Some(bool_from_envvar(b)), - }) - .unwrap_or_else(|| engine_type != EngineType::Docker); - if !is_rootless { - docker.args(&["--user", &format!("{}:{}", user_id(), group_id(),)]); - } +#[derive(Debug, thiserror::Error)] +pub enum GetImageError { + #[error( + "`cross` does not provide a Docker image for target {0}, \ + specify a custom image in `Cross.toml`." + )] + NoCompatibleImages(String), + #[error("platforms for provided image `{0}` are not specified, this is a bug in cross")] + SpecifiedImageNoPlatform(String), + #[error(transparent)] + MultipleImages(eyre::Report), + #[error(transparent)] + Other(eyre::Report), } -pub(crate) fn docker_userns(docker: &mut Command) { - let userns = match env::var("CROSS_CONTAINER_USER_NAMESPACE").ok().as_deref() { - Some("none") => None, - None | Some("auto") => Some("host".to_owned()), - Some(ns) => Some(ns.to_owned()), - }; - if let Some(ns) = userns { - docker.args(&["--userns", &ns]); +fn get_target_name(target: &Target, uses_zig: bool) -> &str { + if uses_zig { + "zig" + } else { + target.triple() } } -#[allow(unused_mut, clippy::let_and_return)] -pub(crate) fn docker_seccomp( - docker: &mut Command, - engine_type: EngineType, +fn get_user_image( + config: &Config, target: &Target, - metadata: &CargoMetadata, -) -> Result<()> { - // docker uses seccomp now on all installations - if target.needs_docker_seccomp() { - let seccomp = if engine_type == EngineType::Docker && cfg!(target_os = "windows") { - // docker on windows fails due to a bug in reading the profile - // https://github.com/docker/for-win/issues/12760 - "unconfined".to_owned() - } else { - #[allow(unused_mut)] // target_os = "windows" - let mut path = metadata - .target_directory - .join(target.triple()) - .join("seccomp.json"); - if !path.exists() { - write_file(&path, false)?.write_all(SECCOMP.as_bytes())?; - } - let mut path_string = path.to_utf8()?.to_owned(); - #[cfg(target_os = "windows")] - if matches!(engine_type, EngineType::Podman | EngineType::PodmanRemote) { - // podman weirdly expects a WSL path here, and fails otherwise - path_string = path.as_wslpath()?; - } - path_string - }; + uses_zig: bool, +) -> Result, GetImageError> { + let mut image = if uses_zig { + config.zig_image(target) + } else { + config.image(target) + } + .map_err(GetImageError::Other)?; - docker.args(&["--security-opt", &format!("seccomp={}", seccomp)]); + if let Some(image) = &mut image { + let target_name = get_target_name(target, uses_zig); + image.reference.ensure_qualified(target_name); } - Ok(()) + Ok(image) } -pub(crate) fn image_name(config: &Config, target: &Target) -> Result { - if let Some(image) = config.image(target)? { - return Ok(image); +fn get_provided_images_for_target( + target_name: &str, +) -> Result, GetImageError> { + let compatible = PROVIDED_IMAGES + .iter() + .filter(|p| p.name == target_name) + .collect::>(); + + if compatible.is_empty() { + return Err(GetImageError::NoCompatibleImages(target_name.to_owned())); } - if !DOCKER_IMAGES.contains(&target.triple()) { - eyre::bail!( - "`cross` does not provide a Docker image for target {target}, \ - specify a custom image in `Cross.toml`." - ); + Ok(compatible) +} + +/// Simpler version of [get_image] +pub fn get_image_name( + config: &Config, + target: &Target, + uses_zig: bool, +) -> Result { + if let Some(image) = get_user_image(config, target, uses_zig)? { + return Ok(image.reference.get().to_owned()); + } + + let target_name = get_target_name(target, uses_zig); + let compatible = get_provided_images_for_target(target_name)?; + Ok(compatible + .first() + .expect("should not be empty") + .default_image_name()) +} + +pub fn get_image( + config: &Config, + target: &Target, + uses_zig: bool, +) -> Result { + if let Some(image) = get_user_image(config, target, uses_zig)? { + return Ok(image); } - let version = if include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")).is_empty() { - env!("CARGO_PKG_VERSION") + let target_name = get_target_name(target, uses_zig); + let compatible = get_provided_images_for_target(target_name)?; + let pick = if let [first] = compatible[..] { + // If only one match, use that + first + } else if compatible + .iter() + .filter(|provided| provided.sub.is_none()) + .count() + == 1 + { + // if multiple matches, but only one is not a sub-target, pick that one + compatible + .iter() + .find(|provided| provided.sub.is_none()) + .expect("should exists at least one non-sub image in list") } else { - "main" + // if there's multiple targets and no option can be chosen, bail + return Err(GetImageError::MultipleImages( + eyre::eyre!( + "`cross` provides multiple images for target {target_name}, \ + specify toolchain in `Cross.toml`." + ) + .with_note(|| { + format!( + "candidates: {}", + compatible + .iter() + .map(|provided| format!("\"{}\"", provided.default_image_name())) + .collect::>() + .join(", ") + ) + }), + )); }; - Ok(format!("{CROSS_IMAGE}/{target}:{version}")) + let image_name = pick.default_image_name(); + if pick.platforms.is_empty() { + return Err(GetImageError::SpecifiedImageNoPlatform(image_name)); + } + + let mut image: PossibleImage = image_name.into(); + image.toolchain = pick.platforms.to_vec(); + Ok(image) } -fn docker_read_mount_paths(engine: &Engine) -> Result> { - let hostname = env::var("HOSTNAME").wrap_err("HOSTNAME environment variable not found")?; +fn docker_inspect_self_mountinfo(engine: &Engine, msg_info: &mut MessageInfo) -> Result { + if cfg!(not(target_os = "linux")) { + eyre::bail!("/proc/self/mountinfo is unavailable when target_os != linux"); + } - let mut docker: Command = { - let mut command = subcommand(engine, "inspect"); + // The ID for the current Docker container might be in mountinfo, + // somewhere in a mount root. Full IDs are 64-char hexadecimal + // strings, so the first matching path segment in a mount root + // containing /docker/ is likely to be what we're looking for. See: + // https://www.kernel.org/doc/Documentation/filesystems/proc.txt + // https://community.toradex.com/t/15240/4 + let mountinfo = file::read("/proc/self/mountinfo")?; + let container_id = mountinfo + .lines() + .filter_map(|s| s.split(' ').nth(3)) + .filter(|s| s.contains("/docker/")) + .flat_map(|s| s.split('/')) + .find(|s| s.len() == 64 && s.as_bytes().iter().all(u8::is_ascii_hexdigit)) + .ok_or_else(|| eyre::eyre!("couldn't find container id in mountinfo"))?; + + engine + .subcommand("inspect") + .arg(container_id) + .run_and_get_stdout(msg_info) +} + +fn docker_inspect_self(engine: &Engine, msg_info: &mut MessageInfo) -> Result { + // Try to find the container ID by looking at HOSTNAME, and fallback to + // parsing `/proc/self/mountinfo` if HOSTNAME is unset or if there's no + // container that matches it (necessary e.g. when the container uses + // `--network=host`, which is act's default, see issue #1321). + // If `docker inspect` fails with unexpected output, skip the fallback + // and fail instantly. + if let Ok(hostname) = env::var("HOSTNAME") { + let mut command = engine.subcommand("inspect"); command.arg(hostname); - command - }; + let out = command.run_and_get_output(msg_info)?; - let output = docker.run_and_get_stdout(&mut Verbosity::Quiet.into())?; + if out.status.success() { + Ok(out.stdout()?) + } else { + let val = serde_json::from_slice::(&out.stdout); + if let Ok(val) = val { + if let Some(array) = val.as_array() { + // `docker inspect` completed but returned an empty array, most + // likely indicating that the hostname isn't a valid container ID. + if array.is_empty() { + msg_info.debug("docker inspect found no containers matching HOSTNAME, retrying using mountinfo")?; + return docker_inspect_self_mountinfo(engine, msg_info); + } + } + } + + let report = command + .status_result(msg_info, out.status, Some(&out)) + .expect_err("we know the command failed") + .to_section_report(); + Err(report) + } + } else { + msg_info.debug("HOSTNAME environment variable is unset")?; + docker_inspect_self_mountinfo(engine, msg_info) + } +} + +fn docker_read_mount_paths( + engine: &Engine, + msg_info: &mut MessageInfo, +) -> Result> { + let output = docker_inspect_self(engine, msg_info)?; let info = serde_json::from_str(&output).wrap_err("failed to parse docker inspect output")?; dockerinfo_parse_mounts(&info) } @@ -713,7 +1436,7 @@ fn dockerinfo_parse_root_mount_path(info: &serde_json::Value) -> Result Result { + pub fn create(engine: &Engine, msg_info: &mut MessageInfo) -> Result { Ok(if engine.in_docker { - MountFinder::new(docker_read_mount_paths(engine)?) + MountFinder::new(docker_read_mount_paths(engine, msg_info)?) } else { MountFinder::default() }) @@ -791,83 +1514,62 @@ impl MountFinder { path.to_path_buf() } - #[allow(unused_variables, clippy::needless_return)] fn find_path(&self, path: &Path, host: bool) -> Result { - #[cfg(target_os = "windows")] - { - // On Windows, we can not mount the directory name directly. Instead, we use wslpath to convert the path to a linux compatible path. - if host { - return Ok(path.to_utf8()?.to_owned()); - } else { - return path.as_wslpath(); - } - } - #[cfg(not(target_os = "windows"))] - { - return Ok(self.find_mount_path(path).to_utf8()?.to_owned()); + if cfg!(target_os = "windows") && host { + // On Windows, we can not mount the directory name directly. + // Instead, we convert the path to a linux compatible path. + return path.to_utf8().map(ToOwned::to_owned); + } else if cfg!(target_os = "windows") { + path.as_posix_absolute() + } else { + self.find_mount_path(path).as_posix_absolute() } } } +/// Short hash for identifiers with minimal risk of collision. +pub const PATH_HASH_SHORT: usize = 5; + +/// Longer hash to minimize risk of random collisions +/// Collision chance is ~10^-6 +pub const PATH_HASH_UNIQUE: usize = 10; + fn path_digest(path: &Path) -> Result { let buffer = const_sha1::ConstBuffer::from_slice(path.to_utf8()?.as_bytes()); Ok(const_sha1::sha1(&buffer)) } -pub fn path_hash(path: &Path) -> Result { +pub fn path_hash(path: &Path, count: usize) -> Result { Ok(path_digest(path)? .to_string() - .get(..5) - .expect("sha1 is expected to be at least 5 characters long") + .get(..count) + .unwrap_or_else(|| panic!("sha1 is expected to be at least {count} characters long")) .to_owned()) } #[cfg(test)] mod tests { + use std::collections::HashMap; + use super::*; - use crate::id; + use crate::{config::Environment, id}; + + #[cfg(not(target_os = "windows"))] + use crate::file::PathExt; #[test] fn test_docker_user_id() { - let var = "CROSS_ROOTLESS_CONTAINER_ENGINE"; - let old = env::var(var); - env::remove_var(var); - let rootful = format!("\"engine\" \"--user\" \"{}:{}\"", id::user(), id::group()); let rootless = "\"engine\"".to_owned(); - let test = |engine, expected| { + let test = |noroot, expected| { let mut cmd = Command::new("engine"); - docker_user_id(&mut cmd, engine); + cmd.add_user_id(noroot); assert_eq!(expected, &format!("{cmd:?}")); }; - test(EngineType::Docker, &rootful); - test(EngineType::Podman, &rootless); - test(EngineType::PodmanRemote, &rootless); - test(EngineType::Other, &rootless); - - env::set_var(var, "0"); - test(EngineType::Docker, &rootful); - test(EngineType::Podman, &rootful); - test(EngineType::PodmanRemote, &rootful); - test(EngineType::Other, &rootful); - - env::set_var(var, "1"); - test(EngineType::Docker, &rootless); - test(EngineType::Podman, &rootless); - test(EngineType::PodmanRemote, &rootless); - test(EngineType::Other, &rootless); - - env::set_var(var, "auto"); - test(EngineType::Docker, &rootful); - test(EngineType::Podman, &rootless); - test(EngineType::PodmanRemote, &rootless); - test(EngineType::Other, &rootless); - match old { - Ok(v) => env::set_var(var, v), - Err(_) => env::remove_var(var), - } + test(false, &rootful); + test(true, &rootless); } #[test] @@ -882,7 +1584,7 @@ mod tests { let test = |expected| { let mut cmd = Command::new("engine"); - docker_userns(&mut cmd); + cmd.add_userns(); assert_eq!(expected, &format!("{cmd:?}")); }; test(&host); @@ -905,9 +1607,54 @@ mod tests { } } + #[test] + fn test_tag_only_image() -> Result<()> { + let target: Target = TargetTriple::X86_64UnknownLinuxGnu.into(); + let test = |map, expected_ver: &str, expected_ver_zig: &str| -> Result<()> { + let env = Environment::new(Some(map)); + let config = Config::new_with(None, env); + for (uses_zig, expected_ver) in [(false, expected_ver), (true, expected_ver_zig)] { + let expected_image_target = if uses_zig { + "zig" + } else { + "x86_64-unknown-linux-gnu" + }; + let expected = format!("ghcr.io/cross-rs/{expected_image_target}{expected_ver}"); + + let image = get_image(&config, &target, uses_zig)?; + assert_eq!(image.reference.get(), expected); + let image_name = get_image_name(&config, &target, uses_zig)?; + assert_eq!(image_name, expected); + } + Ok(()) + }; + + let default_ver = format!(":{DEFAULT_IMAGE_VERSION}"); + let mut map = HashMap::new(); + test(map.clone(), &default_ver, &default_ver)?; + + map.insert("CROSS_TARGET_X86_64_UNKNOWN_LINUX_GNU_IMAGE", "-centos"); + test(map.clone(), &format!("{default_ver}-centos"), &default_ver)?; + + map.insert("CROSS_TARGET_X86_64_UNKNOWN_LINUX_GNU_IMAGE", ":edge"); + test(map.clone(), ":edge", &default_ver)?; + + map.insert( + "CROSS_TARGET_X86_64_UNKNOWN_LINUX_GNU_ZIG_IMAGE", + "@sha256:foobar", + ); + test(map.clone(), ":edge", "@sha256:foobar")?; + + map.remove("CROSS_TARGET_X86_64_UNKNOWN_LINUX_GNU_IMAGE"); + test(map.clone(), &default_ver, "@sha256:foobar")?; + + Ok(()) + } + mod directories { use super::*; use crate::cargo::cargo_metadata_with_args; + use crate::rustc::{self, VersionMetaExt}; use crate::temp; fn unset_env() -> Vec<(&'static str, Option)> { @@ -966,53 +1713,56 @@ mod tests { Ok(path) } - fn get_sysroot() -> Result { - Ok(home()? + fn get_toolchain() -> Result { + let host_version_meta = rustc::version_meta()?; + let host = host_version_meta.host(); + let image_platform = + crate::docker::ImagePlatform::from_const_target(host.triple().into()); + let sysroot = home()? .join(".rustup") .join("toolchains") - .join("stable-x86_64-unknown-linux-gnu")) + .join(host.triple()); + Ok(QualifiedToolchain::new( + "stable", + &None, + &image_platform, + &sysroot, + false, + )) } fn get_directories( - metadata: &CargoMetadata, + metadata: CargoMetadata, mount_finder: &MountFinder, - ) -> Result { + ) -> Result<(Directories, CargoMetadata)> { let cwd = get_cwd()?; - let sysroot = get_sysroot()?; - Directories::create(mount_finder, metadata, &cwd, &sysroot) - } - - fn path_to_posix(path: &Path) -> Result { - #[cfg(target_os = "windows")] - { - path.as_wslpath() - } - #[cfg(not(target_os = "windows"))] - { - path.as_posix() - } + let toolchain = get_toolchain()?; + Directories::assemble(mount_finder, metadata, &cwd, toolchain) } #[track_caller] fn paths_equal(x: &Path, y: &Path) -> Result<()> { - assert_eq!(path_to_posix(x)?, path_to_posix(y)?); + assert_eq!(x.as_posix_absolute()?, y.as_posix_absolute()?); Ok(()) } #[test] + #[cfg_attr(cross_sandboxed, ignore)] fn test_host() -> Result<()> { let vars = unset_env(); let mount_finder = MountFinder::new(vec![]); let metadata = cargo_metadata(false, &mut MessageInfo::default())?; - let directories = get_directories(&metadata, &mount_finder)?; - paths_equal(&directories.cargo, &home()?.join(".cargo"))?; - paths_equal(&directories.xargo, &home()?.join(".xargo"))?; - paths_equal(&directories.host_root, &metadata.workspace_root)?; + let (directories, metadata) = get_directories(metadata, &mount_finder)?; + let toolchain_dirs = directories.toolchain_directories(); + let package_dirs = directories.package_directories(); + paths_equal(toolchain_dirs.cargo(), &home()?.join(".cargo"))?; + paths_equal(toolchain_dirs.xargo(), &home()?.join(".xargo"))?; + paths_equal(package_dirs.host_root(), &metadata.workspace_root)?; assert_eq!( - &directories.mount_root, - &path_to_posix(&metadata.workspace_root)? + package_dirs.mount_root(), + &metadata.workspace_root.as_posix_absolute()? ); - assert_eq!(&directories.mount_cwd, &path_to_posix(&get_cwd()?)?); + assert_eq!(package_dirs.mount_cwd(), &get_cwd()?.as_posix_absolute()?); reset_env(vars); Ok(()) @@ -1025,9 +1775,8 @@ mod tests { let mut msg_info = MessageInfo::default(); let engine = create_engine(&mut msg_info); - let hostname = env::var("HOSTNAME"); - if engine.is_err() || hostname.is_err() { - eprintln!("could not get container engine or no hostname found"); + if engine.is_err() { + eprintln!("could not get container engine"); reset_env(vars); return Ok(()); } @@ -1037,33 +1786,26 @@ mod tests { reset_env(vars); return Ok(()); } - let hostname = hostname.unwrap(); - let output = subcommand(&engine, "inspect") - .arg(hostname) - .run_and_get_output(&mut msg_info)?; - if !output.status.success() { + let output = docker_inspect_self(&engine, &mut msg_info); + if output.is_err() { eprintln!("inspect failed"); reset_env(vars); return Ok(()); } - let mount_finder = MountFinder::create(&engine)?; + let mount_finder = MountFinder::create(&engine, &mut msg_info)?; let metadata = cargo_metadata(true, &mut msg_info)?; - let directories = get_directories(&metadata, &mount_finder)?; - let mount_finder = MountFinder::new(docker_read_mount_paths(&engine)?); + let (directories, _) = get_directories(metadata, &mount_finder)?; + let toolchain_dirs = directories.toolchain_directories(); + let package_dirs = directories.package_directories(); + let mount_finder = MountFinder::new(docker_read_mount_paths(&engine, &mut msg_info)?); let mount_path = |p| mount_finder.find_mount_path(p); - paths_equal(&directories.cargo, &mount_path(home()?.join(".cargo")))?; - paths_equal(&directories.xargo, &mount_path(home()?.join(".xargo")))?; - paths_equal(&directories.host_root, &mount_path(get_cwd()?))?; - assert_eq!( - &directories.mount_root, - &path_to_posix(&mount_path(get_cwd()?))? - ); - assert_eq!( - &directories.mount_cwd, - &path_to_posix(&mount_path(get_cwd()?))? - ); + paths_equal(toolchain_dirs.cargo(), &mount_path(home()?.join(".cargo")))?; + paths_equal(toolchain_dirs.xargo(), &mount_path(home()?.join(".xargo")))?; + paths_equal(package_dirs.host_root(), &get_cwd()?)?; + assert_eq!(package_dirs.mount_root(), &get_cwd()?.as_posix_absolute()?); + assert_eq!(package_dirs.mount_cwd(), &get_cwd()?.as_posix_absolute()?); reset_env(vars); Ok(()) diff --git a/src/errors.rs b/src/errors.rs index c0c5aaf88..bcb4d3595 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -1,4 +1,4 @@ -use crate::docker::remote; +use crate::docker; use crate::temp; use std::sync::atomic::{AtomicBool, Ordering}; @@ -10,8 +10,10 @@ pub use eyre::Result; pub static mut TERMINATED: AtomicBool = AtomicBool::new(false); pub fn install_panic_hook() -> Result<()> { + let is_dev = !crate::commit_info().is_empty() || std::env::var("CROSS_DEBUG").is_ok(); color_eyre::config::HookBuilder::new() .display_env_section(false) + .display_location_section(is_dev) .install() } @@ -85,15 +87,35 @@ unsafe fn termination_handler() { // // on windows, a non-reentrant static mutex is used, so it is // definitely not thread safe, but this should not matter. - remote::CONTAINER = None; + // + // NOTE: there is one major function that is not async-signal safe here: + // memory allocation and deallocation, which is not async-signal safe. + // this should only be run once without deadlocking since any + // atomics are guaranteed to be lock-free. we cannot easily avoid + // allocation/deallocation, since we would need static global muts + // for basically everything. `Command::arg` and `Command::new` will + // internally allocate, and freeing it will deallocate any arguments + // it was provided. even if we used a static global `Command`, the + // `io::Result` requires a `Box` or `io::Error`, which would allocate. + // the alternative would be to use `posix_spawnp` or `CreateProcess` + // directly, which are async-signal safe and thread-safe, respectively, + // however, we'd need to store the engine path and the argument list as + // a global CString and `Vec`, respectively. this atomic guard + // makes this safe regardless. + docker::CHILD_CONTAINER.terminate(); - // EOWNERDEAD, seems to be the same on linux, macos, and bash on windows. + // all termination exit codes are 128 + signal code. the exit code is + // 130 for Ctrl+C or SIGINT (signal code 2) for linux, macos, and windows. std::process::exit(130); } pub fn install_termination_hook() -> Result<()> { // SAFETY: safe since single-threaded execution. - ctrlc::set_handler(|| unsafe { termination_handler() }).map_err(Into::into) + unsafe { + signal_hook::low_level::register(signal_hook::consts::SIGINT, || termination_handler()) + } + .map_err(Into::into) + .map(|_| ()) } #[derive(Debug, thiserror::Error)] diff --git a/src/extensions.rs b/src/extensions.rs index a9c2e8c22..992eb8d46 100644 --- a/src/extensions.rs +++ b/src/extensions.rs @@ -10,16 +10,19 @@ pub const STRIPPED_BINS: &[&str] = &[crate::docker::DOCKER, crate::docker::PODMA pub trait CommandExt { fn fmt_message(&self, msg_info: &mut MessageInfo) -> String; + #[track_caller] fn print(&self, msg_info: &mut MessageInfo) -> Result<()> { let msg = self.fmt_message(msg_info); msg_info.print(&msg) } + #[track_caller] fn info(&self, msg_info: &mut MessageInfo) -> Result<()> { let msg = self.fmt_message(msg_info); msg_info.info(&msg) } + #[track_caller] fn debug(&self, msg_info: &mut MessageInfo) -> Result<()> { let msg = self.fmt_message(msg_info); msg_info.debug(&msg) @@ -31,13 +34,17 @@ pub trait CommandExt { status: ExitStatus, output: Option<&Output>, ) -> Result<(), CommandError>; + #[track_caller] fn run(&mut self, msg_info: &mut MessageInfo, silence_stdout: bool) -> Result<()>; + #[track_caller] fn run_and_get_status( &mut self, msg_info: &mut MessageInfo, silence_stdout: bool, ) -> Result; + #[track_caller] fn run_and_get_stdout(&mut self, msg_info: &mut MessageInfo) -> Result; + #[track_caller] fn run_and_get_output(&mut self, msg_info: &mut MessageInfo) -> Result; fn command_pretty( &self, @@ -84,22 +91,36 @@ impl CommandExt for Command { format!("{}", C(self, msg_info, strip)) } + #[track_caller] fn fmt_message(&self, mut msg_info: &mut MessageInfo) -> String { + use std::fmt::Write; let msg_info = &mut msg_info; + let mut string = String::new(); + if let Some(caller) = msg_info.caller() { + write!(string, "[{}] ->\n+ ", caller).unwrap(); + } else { + write!(string, "+ ").unwrap(); + }; if let Some(cwd) = self.get_current_dir() { - format!( - "+ {:?} {}", + write!( + string, + "{:?} {}", cwd, msg_info.as_verbose(|info| self.command_pretty(info, |_| false)) ) + .unwrap(); } else { - format!( - "+ {}", + write!( + string, + "{}", msg_info.as_verbose(|info| self.command_pretty(info, |_| false)) ) + .unwrap(); } + string } + #[track_caller] fn status_result( &self, msg_info: &mut MessageInfo, @@ -120,13 +141,15 @@ impl CommandExt for Command { } /// Runs the command to completion + #[track_caller] fn run(&mut self, msg_info: &mut MessageInfo, silence_stdout: bool) -> Result<()> { let status = self.run_and_get_status(msg_info, silence_stdout)?; - self.status_result(msg_info, status, None) - .map_err(Into::into) + #[warn(clippy::nursery)] + Ok(self.status_result(msg_info, status, None)?) } /// Runs the command to completion + #[track_caller] fn run_and_get_status( &mut self, msg_info: &mut MessageInfo, @@ -136,21 +159,19 @@ impl CommandExt for Command { if silence_stdout && !msg_info.is_verbose() { self.stdout(std::process::Stdio::null()); } - self.status() - .map_err(|e| CommandError::CouldNotExecute { - source: Box::new(e), - command: self - .command_pretty(msg_info, |cmd| STRIPPED_BINS.iter().any(|f| f == &cmd)), - }) - .map_err(Into::into) + Ok(self.status().map_err(|e| CommandError::CouldNotExecute { + source: Box::new(e), + command: self.command_pretty(msg_info, |cmd| STRIPPED_BINS.iter().any(|f| f == &cmd)), + })?) } /// Runs the command to completion and returns its stdout + #[track_caller] fn run_and_get_stdout(&mut self, msg_info: &mut MessageInfo) -> Result { let out = self.run_and_get_output(msg_info)?; self.status_result(msg_info, out.status, Some(&out)) .map_err(CommandError::to_section_report)?; - out.stdout().map_err(Into::into) + Ok(out.stdout()?) } /// Runs the command to completion and returns the status and its [output](std::process::Output). @@ -158,6 +179,7 @@ impl CommandExt for Command { /// # Notes /// /// This command does not check the status. + #[track_caller] fn run_and_get_output(&mut self, msg_info: &mut MessageInfo) -> Result { self.debug(msg_info)?; self.output().map_err(|e| { diff --git a/src/file.rs b/src/file.rs index c2c976cfd..cc39ae9b6 100644 --- a/src/file.rs +++ b/src/file.rs @@ -1,4 +1,5 @@ use std::borrow::Cow; +use std::env; use std::ffi::OsStr; use std::fs::{self, File}; use std::io::Read; @@ -26,15 +27,14 @@ impl ToUtf8 for Path { } pub trait PathExt { - fn as_posix(&self) -> Result; - #[cfg(target_family = "windows")] - fn as_wslpath(&self) -> Result; + fn as_posix_relative(&self) -> Result; + fn as_posix_absolute(&self) -> Result; } #[cfg(target_family = "windows")] fn format_prefix(prefix: &str) -> Result { match prefix { - "" => eyre::bail!("Error: got empty windows prefix"), + "" => Ok("".to_owned()), _ => Ok(format!("/mnt/{}", prefix.to_lowercase())), } } @@ -44,6 +44,23 @@ fn fmt_disk(disk: u8) -> String { (disk as char).to_string() } +#[cfg(target_family = "windows")] +fn fmt_ns_disk(disk: &std::ffi::OsStr) -> Result { + let disk = disk.to_utf8()?; + Ok(match disk.len() { + // ns can be similar to `\\.\COM42`, or also `\\.\C:\` + 2 => { + let c = disk.chars().next().expect("cannot be empty"); + if c.is_ascii_alphabetic() && disk.ends_with(':') { + fmt_disk(c as u8) + } else { + disk.to_owned() + } + } + _ => disk.to_owned(), + }) +} + #[cfg(target_family = "windows")] fn fmt_unc(server: &std::ffi::OsStr, volume: &std::ffi::OsStr) -> Result { let server = server.to_utf8()?; @@ -52,7 +69,7 @@ fn fmt_unc(server: &std::ffi::OsStr, volume: &std::ffi::OsStr) -> Result if server == "localhost" && bytes.len() == 2 && bytes[1] == b'$' - && matches!(bytes[0], b'A'..=b'Z' | b'a'..=b'z') + && bytes[0].is_ascii_alphabetic() { Ok(fmt_disk(bytes[0])) } else { @@ -61,7 +78,7 @@ fn fmt_unc(server: &std::ffi::OsStr, volume: &std::ffi::OsStr) -> Result } impl PathExt for Path { - fn as_posix(&self) -> Result { + fn as_posix_relative(&self) -> Result { if cfg!(target_os = "windows") { let push = |p: &mut String, c: &str| { if !p.is_empty() && p != "/" { @@ -89,11 +106,16 @@ impl PathExt for Path { } } - // this is similar to as_posix, but it handles drive separators - // and doesn't assume a relative path. + #[cfg(not(target_family = "windows"))] + fn as_posix_absolute(&self) -> Result { + absolute_path(self)?.to_utf8().map(ToOwned::to_owned) + } + + // this is similar to as_posix_relative, but it handles drive + // separators and will only work with absolute paths. #[cfg(target_family = "windows")] - fn as_wslpath(&self) -> Result { - let path = canonicalize(self)?; + fn as_posix_absolute(&self) -> Result { + let path = absolute_path(self)?; let push = |p: &mut String, c: &str, r: bool| { if !r { @@ -115,7 +137,7 @@ impl PathExt for Path { // a root_prefix since we force absolute paths. Prefix::VerbatimDisk(disk) => fmt_disk(disk), Prefix::UNC(server, volume) => fmt_unc(server, volume)?, - Prefix::DeviceNS(ns) => ns.to_utf8()?.to_owned(), + Prefix::DeviceNS(ns) => fmt_ns_disk(ns)?, Prefix::Disk(disk) => fmt_disk(disk), } } @@ -143,6 +165,11 @@ where read_(path.as_ref()) } +pub fn create_dir_all(path: impl AsRef) -> Result<()> { + fs::create_dir_all(path.as_ref()) + .wrap_err_with(|| format!("couldn't create directory {:?}", path.as_ref())) +} + fn read_(path: &Path) -> Result { let mut s = String::new(); File::open(path) @@ -161,7 +188,7 @@ fn _canonicalize(path: &Path) -> Result { #[cfg(target_os = "windows")] { // Docker does not support UNC paths, this will try to not use UNC paths - dunce::canonicalize(&path).map_err(Into::into) + dunce::canonicalize(path).map_err(Into::into) } #[cfg(not(target_os = "windows"))] { @@ -169,6 +196,30 @@ fn _canonicalize(path: &Path) -> Result { } } +fn is_wsl_absolute(path: &str) -> bool { + if !cfg!(target_os = "windows") { + return false; + } + let path = path.strip_prefix("/mnt/").unwrap_or(path); + let maybe_drive = path.split_once('/').map_or(path, |x| x.0); + + maybe_drive.len() == 1 && matches!(maybe_drive.chars().next(), Some('a'..='z')) +} + +// Fix for issue #581. target_dir must be absolute. +pub fn absolute_path(path: impl AsRef) -> Result { + let as_ref = path.as_ref(); + Ok( + if as_ref.is_absolute() + || cfg!(target_family = "windows") && is_wsl_absolute(as_ref.to_utf8()?) + { + as_ref.to_path_buf() + } else { + env::current_dir()?.join(path) + }, + ) +} + /// Pretty format a file path. Also removes the path prefix from a command if wanted pub fn pretty_path(path: impl AsRef, strip: impl for<'a> Fn(&'a str) -> bool) -> String { let path = path.as_ref(); @@ -222,12 +273,10 @@ pub fn maybe_canonicalize(path: &Path) -> Cow<'_, OsStr> { pub fn write_file(path: impl AsRef, overwrite: bool) -> Result { let path = path.as_ref(); - fs::create_dir_all( - &path - .parent() + create_dir_all( + path.parent() .ok_or_else(|| eyre::eyre!("could not find parent directory for `{path:?}`"))?, - ) - .wrap_err_with(|| format!("couldn't create directory `{path:?}`"))?; + )?; let mut open = fs::OpenOptions::new(); open.write(true); @@ -238,7 +287,7 @@ pub fn write_file(path: impl AsRef, overwrite: bool) -> Result { open.create_new(true); } - open.open(&path) + open.open(path) .wrap_err(format!("couldn't write to file `{path:?}`")) } @@ -253,6 +302,7 @@ mod tests { }; } + #[track_caller] fn result_eq(x: Result, y: Result) { match (x, y) { (Ok(x), Ok(y)) => assert_eq!(x, y), @@ -261,34 +311,70 @@ mod tests { } #[test] - fn as_posix() { - result_eq(p!(".").join("..").as_posix(), Ok("./..".to_owned())); - result_eq(p!(".").join("/").as_posix(), Ok("/".to_owned())); - result_eq(p!("foo").join("bar").as_posix(), Ok("foo/bar".to_owned())); - result_eq(p!("/foo").join("bar").as_posix(), Ok("/foo/bar".to_owned())); + fn as_posix_relative() { + result_eq( + p!(".").join("..").as_posix_relative(), + Ok("./..".to_owned()), + ); + result_eq(p!(".").join("/").as_posix_relative(), Ok("/".to_owned())); + result_eq( + p!("foo").join("bar").as_posix_relative(), + Ok("foo/bar".to_owned()), + ); + result_eq( + p!("/foo").join("bar").as_posix_relative(), + Ok("/foo/bar".to_owned()), + ); } #[test] #[cfg(target_family = "windows")] fn as_posix_prefix() { assert_eq!(p!("C:").join(".."), p!("C:..")); - assert!(p!("C:").join("..").as_posix().is_err()); + assert!(p!("C:").join("..").as_posix_relative().is_err()); + } + + #[test] + #[cfg(target_family = "windows")] + fn is_absolute_wslpath() { + assert!(is_wsl_absolute("/mnt/c/Users")); + assert!(is_wsl_absolute("/mnt/c")); + assert!(is_wsl_absolute("/mnt/z/Users")); + assert!(!is_wsl_absolute("/mnt")); + assert!(!is_wsl_absolute("/mnt/cc")); + assert!(!is_wsl_absolute("/mnt/zc")); } #[test] #[cfg(target_family = "windows")] - fn as_wslpath() { - result_eq(p!(r"C:\").as_wslpath(), Ok("/mnt/c".to_owned())); - result_eq(p!(r"C:\Users").as_wslpath(), Ok("/mnt/c/Users".to_owned())); + fn as_posix_with_drive() { + use regex::Regex; + + result_eq(p!(r"C:\").as_posix_absolute(), Ok("/mnt/c".to_owned())); result_eq( - p!(r"\\localhost\c$\Users").as_wslpath(), + p!(r"C:\Users").as_posix_absolute(), Ok("/mnt/c/Users".to_owned()), ); - result_eq(p!(r"\\.\C:\").as_wslpath(), Ok("/mnt/c".to_owned())); result_eq( - p!(r"\\.\C:\Users").as_wslpath(), + p!(r"\\localhost\c$\Users").as_posix_absolute(), Ok("/mnt/c/Users".to_owned()), ); + result_eq(p!(r"\\.\C:\").as_posix_absolute(), Ok("/mnt/c".to_owned())); + result_eq( + p!(r"\\.\C:\Users").as_posix_absolute(), + Ok("/mnt/c/Users".to_owned()), + ); + + result_eq( + p!(r"/mnt/c/Users").as_posix_absolute(), + Ok("/mnt/c/Users".to_owned()), + ); + result_eq(p!(r"/mnt/c").as_posix_absolute(), Ok("/mnt/c".to_owned())); + + let regex = Regex::new("/mnt/[A-Za-z]/mnt").unwrap(); + let result = p!(r"/mnt").as_posix_absolute(); + assert!(result.is_ok()); + assert!(regex.is_match(&result.unwrap())); } #[test] diff --git a/src/lib.rs b/src/lib.rs index e7f25c810..28d13c519 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,8 @@ //! ⚠️ Warning: The cross library is for internal //! use only: only the command-line interface is stable. The library //! may change at any point for any reason. For documentation on the -//! CLI, please see the repository README +//! CLI, please see the repository README, +//! docs folder //! or the wiki. //!

@@ -24,33 +25,37 @@ clippy::semicolon_if_nothing_returned, clippy::str_to_string, clippy::string_to_string, - // needs clippy 1.61 clippy::unwrap_used + clippy::unwrap_used )] #[cfg(test)] mod tests; -mod cargo; -mod cli; -mod config; -mod cross_toml; +pub mod cargo; +pub mod cli; +pub mod config; +pub mod cross_toml; pub mod docker; pub mod errors; mod extensions; -mod file; +pub mod file; mod id; mod interpreter; pub mod rustc; -mod rustup; +pub mod rustup; pub mod shell; pub mod temp; use std::env; -use std::io::{self, Write}; use std::path::PathBuf; use std::process::ExitStatus; +use cli::Args; +use color_eyre::owo_colors::OwoColorize; +use color_eyre::{Help, SectionExt}; use config::Config; +use cross_toml::BuildStd; +use rustc::{QualifiedToolchain, Toolchain}; use rustc_version::Channel; use serde::{Deserialize, Serialize, Serializer}; @@ -60,15 +65,17 @@ use self::errors::Context; use self::shell::{MessageInfo, Verbosity}; pub use self::errors::{install_panic_hook, install_termination_hook, Result}; -pub use self::extensions::{CommandExt, OutputExt}; +pub use self::extensions::{CommandExt, OutputExt, SafeCommand}; pub use self::file::{pretty_path, ToUtf8}; pub use self::rustc::{TargetList, VersionMetaExt}; pub const CROSS_LABEL_DOMAIN: &str = "org.cross-rs"; #[allow(non_camel_case_types)] -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum Host { +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Hash)] +#[serde(from = "&str", into = "String")] +#[serde(rename_all = "snake_case")] +pub enum TargetTriple { Other(String), // OSX @@ -92,7 +99,76 @@ pub enum Host { X86_64PcWindowsMsvc, } -impl Host { +impl TargetTriple { + pub const DEFAULT: Self = Self::X86_64UnknownLinuxGnu; + /// Returns the architecture name according to `dpkg` naming convention + /// + /// # Notes + /// + /// Some of these make no sense to use in our standard images + pub fn deb_arch(&self) -> Option<&'static str> { + match self.triple() { + "aarch64-unknown-linux-gnu" => Some("arm64"), + "aarch64-unknown-linux-musl" => Some("musl-linux-arm64"), + "aarch64-linux-android" => None, + "x86_64-unknown-linux-gnu" => Some("amd64"), + "x86_64-apple-darwin" => Some("darwin-amd64"), + "x86_64-unknown-linux-musl" => Some("musl-linux-amd64"), + + "x86_64-pc-windows-msvc" => None, + "arm-unknown-linux-gnueabi" => Some("armel"), + "arm-unknown-linux-gnueabihf" => Some("armhf"), + "armv7-unknown-linux-gnueabi" => Some("armel"), + "armv7-unknown-linux-gnueabihf" => Some("armhf"), + "thumbv7neon-unknown-linux-gnueabihf" => Some("armhf"), + "i586-unknown-linux-gnu" => Some("i386"), + "i686-unknown-linux-gnu" => Some("i386"), + "mips-unknown-linux-gnu" => Some("mips"), + "mipsel-unknown-linux-gnu" => Some("mipsel"), + "mips64-unknown-linux-gnuabi64" => Some("mips64"), + "mips64el-unknown-linux-gnuabi64" => Some("mips64el"), + "mips64-unknown-linux-muslabi64" => Some("musl-linux-mips64"), + "mips64el-unknown-linux-muslabi64" => Some("musl-linux-mips64el"), + "powerpc-unknown-linux-gnu" => Some("powerpc"), + "powerpc64-unknown-linux-gnu" => Some("ppc64"), + "powerpc64le-unknown-linux-gnu" => Some("ppc64el"), + "riscv64gc-unknown-linux-gnu" => Some("riscv64"), + "s390x-unknown-linux-gnu" => Some("s390x"), + "sparc64-unknown-linux-gnu" => Some("sparc64"), + "arm-unknown-linux-musleabihf" => Some("musl-linux-armhf"), + "arm-unknown-linux-musleabi" => Some("musl-linux-arm"), + "armv5te-unknown-linux-gnueabi" => None, + "armv5te-unknown-linux-musleabi" => None, + "armv7-unknown-linux-musleabi" => Some("musl-linux-arm"), + "armv7-unknown-linux-musleabihf" => Some("musl-linux-armhf"), + "i586-unknown-linux-musl" => Some("musl-linux-i386"), + "i686-unknown-linux-musl" => Some("musl-linux-i386"), + "mips-unknown-linux-musl" => Some("musl-linux-mips"), + "mipsel-unknown-linux-musl" => Some("musl-linux-mipsel"), + "arm-linux-androideabi" => None, + "armv7-linux-androideabi" => None, + "thumbv7neon-linux-androideabi" => None, + "i686-linux-android" => None, + "x86_64-linux-android" => None, + "x86_64-pc-windows-gnu" => None, + "i686-pc-windows-gnu" => None, + "asmjs-unknown-emscripten" => None, + "wasm32-unknown-emscripten" => None, + "x86_64-unknown-dragonfly" => Some("dragonflybsd-amd64"), + "i686-unknown-freebsd" => Some("freebsd-i386"), + "x86_64-unknown-freebsd" => Some("freebsd-amd64"), + "aarch64-unknown-freebsd" => Some("freebsd-arm64"), + "x86_64-unknown-netbsd" => Some("netbsd-amd64"), + "sparcv9-sun-solaris" => Some("solaris-sparc"), + "x86_64-pc-solaris" => Some("solaris-amd64"), + "thumbv6m-none-eabi" => Some("arm"), + "thumbv7em-none-eabi" => Some("arm"), + "thumbv7em-none-eabihf" => Some("armhf"), + "thumbv7m-none-eabi" => Some("arm"), + _ => None, + } + } + /// Checks if this `(host, target)` pair is supported by `cross` /// /// `target == None` means `target == host` @@ -104,17 +180,19 @@ impl Host { // Old behavior (up to cross version 0.2.1) can be activated on demand using environment // variable `CROSS_COMPATIBILITY_VERSION`. Ok("0.2.1") => match self { - Host::X86_64AppleDarwin | Host::Aarch64AppleDarwin => { + TargetTriple::X86_64AppleDarwin | TargetTriple::Aarch64AppleDarwin => { target.map_or(false, |t| t.needs_docker()) } - Host::X86_64UnknownLinuxGnu - | Host::Aarch64UnknownLinuxGnu - | Host::X86_64UnknownLinuxMusl - | Host::Aarch64UnknownLinuxMusl => target.map_or(true, |t| t.needs_docker()), - Host::X86_64PcWindowsMsvc => target.map_or(false, |t| { - t.triple() != Host::X86_64PcWindowsMsvc.triple() && t.needs_docker() + TargetTriple::X86_64UnknownLinuxGnu + | TargetTriple::Aarch64UnknownLinuxGnu + | TargetTriple::X86_64UnknownLinuxMusl + | TargetTriple::Aarch64UnknownLinuxMusl => { + target.map_or(true, |t| t.needs_docker()) + } + TargetTriple::X86_64PcWindowsMsvc => target.map_or(false, |t| { + t.triple() != TargetTriple::X86_64PcWindowsMsvc.triple() && t.needs_docker() }), - Host::Other(_) => false, + TargetTriple::Other(_) => false, }, // New behaviour, if a target is provided (--target ...) then always run with docker // image unless the target explicitly opts-out (i.e. unless needs_docker() returns false). @@ -124,7 +202,7 @@ impl Host { // having to change cross every time someone comes up with the need for a new host/target // combination. It's totally fine to call cross with `--target=$host_triple`, for // example to test custom docker images. Cross should not try to recognize if host and - // target are equal, it's a user decision and if user want's to bypass cross he can call + // target are equal, it's a user decision and if user wants to bypass cross he can call // cargo directly or omit the `--target` option. _ => target.map_or(false, |t| t.needs_docker()), } @@ -133,54 +211,97 @@ impl Host { /// Returns the [`Target`] as target triple string pub fn triple(&self) -> &str { match self { - Host::X86_64AppleDarwin => "x86_64-apple-darwin", - Host::Aarch64AppleDarwin => "aarch64-apple-darwin", - Host::X86_64UnknownLinuxGnu => "x86_64-unknown-linux-gnu", - Host::Aarch64UnknownLinuxGnu => "aarch64-unknown-linux-gnu", - Host::X86_64UnknownLinuxMusl => "x86_64-unknown-linux-musl", - Host::Aarch64UnknownLinuxMusl => "aarch64-unknown-linux-musl", - Host::X86_64PcWindowsMsvc => "x86_64-pc-windows-msvc", - Host::Other(s) => s.as_str(), + TargetTriple::X86_64AppleDarwin => "x86_64-apple-darwin", + TargetTriple::Aarch64AppleDarwin => "aarch64-apple-darwin", + TargetTriple::X86_64UnknownLinuxGnu => "x86_64-unknown-linux-gnu", + TargetTriple::Aarch64UnknownLinuxGnu => "aarch64-unknown-linux-gnu", + TargetTriple::X86_64UnknownLinuxMusl => "x86_64-unknown-linux-musl", + TargetTriple::Aarch64UnknownLinuxMusl => "aarch64-unknown-linux-musl", + TargetTriple::X86_64PcWindowsMsvc => "x86_64-pc-windows-msvc", + TargetTriple::Other(s) => s.as_str(), } } } -impl<'a> From<&'a str> for Host { - fn from(s: &str) -> Host { +impl<'a> From<&'a str> for TargetTriple { + fn from(s: &str) -> TargetTriple { match s { - "x86_64-apple-darwin" => Host::X86_64AppleDarwin, - "x86_64-unknown-linux-gnu" => Host::X86_64UnknownLinuxGnu, - "x86_64-unknown-linux-musl" => Host::X86_64UnknownLinuxMusl, - "x86_64-pc-windows-msvc" => Host::X86_64PcWindowsMsvc, - "aarch64-apple-darwin" => Host::Aarch64AppleDarwin, - "aarch64-unknown-linux-gnu" => Host::Aarch64UnknownLinuxGnu, - "aarch64-unknown-linux-musl" => Host::Aarch64UnknownLinuxMusl, - s => Host::Other(s.to_owned()), + "x86_64-apple-darwin" => TargetTriple::X86_64AppleDarwin, + "x86_64-unknown-linux-gnu" => TargetTriple::X86_64UnknownLinuxGnu, + "x86_64-unknown-linux-musl" => TargetTriple::X86_64UnknownLinuxMusl, + "x86_64-pc-windows-msvc" => TargetTriple::X86_64PcWindowsMsvc, + "aarch64-apple-darwin" => TargetTriple::Aarch64AppleDarwin, + "aarch64-unknown-linux-gnu" => TargetTriple::Aarch64UnknownLinuxGnu, + "aarch64-unknown-linux-musl" => TargetTriple::Aarch64UnknownLinuxMusl, + s => TargetTriple::Other(s.to_owned()), } } } +impl Default for TargetTriple { + fn default() -> TargetTriple { + TargetTriple::DEFAULT + } +} + +impl std::str::FromStr for TargetTriple { + type Err = std::convert::Infallible; + + fn from_str(s: &str) -> Result { + Ok(s.into()) + } +} + +impl std::fmt::Display for TargetTriple { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.triple()) + } +} + +impl From for TargetTriple { + fn from(s: String) -> TargetTriple { + s.as_str().into() + } +} + +impl Serialize for TargetTriple { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(self.triple()) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash, Deserialize)] #[serde(from = "String")] pub enum Target { - BuiltIn { triple: String }, - Custom { triple: String }, + BuiltIn { triple: TargetTriple }, + Custom { triple: TargetTriple }, } impl Target { + pub const DEFAULT: Self = Self::BuiltIn { + triple: TargetTriple::DEFAULT, + }; + fn new_built_in(triple: &str) -> Self { Target::BuiltIn { - triple: triple.to_owned(), + triple: triple.into(), } } fn new_custom(triple: &str) -> Self { Target::Custom { - triple: triple.to_owned(), + triple: triple.into(), } } - fn triple(&self) -> &str { + pub fn triple(&self) -> &str { + match *self { + Target::BuiltIn { ref triple } => triple.triple(), + Target::Custom { ref triple } => triple.triple(), + } + } + + pub fn target(&self) -> &TargetTriple { match *self { Target::BuiltIn { ref triple } => triple, Target::Custom { ref triple } => triple, @@ -192,7 +313,10 @@ impl Target { } fn is_bare_metal(&self) -> bool { - self.triple().contains("thumb") + self.triple().ends_with("-none") + || self.triple().ends_with("-none-elf") + || self.triple().ends_with("-none-eabi") + || self.triple().ends_with("-none-eabihf") } fn is_builtin(&self) -> bool { @@ -259,72 +383,11 @@ impl Target { arch_32bit && self.is_android() } +} - /// Returns the architecture name according to `dpkg` naming convention - /// - /// # Notes - /// - /// Some of these make no sense to use in our standard images - pub fn deb_arch(&self) -> Option<&'static str> { - match self.triple() { - "aarch64-unknown-linux-gnu" => Some("arm64"), - "aarch64-unknown-linux-musl" => Some("musl-linux-arm64"), - "aarch64-linux-android" => None, - "x86_64-unknown-linux-gnu" => Some("amd64"), - "x86_64-apple-darwin" => Some("darwin-amd64"), - "x86_64-unknown-linux-musl" => Some("musl-linux-amd64"), - - "x86_64-pc-windows-msvc" => None, - "arm-unknown-linux-gnueabi" => Some("armel"), - "arm-unknown-linux-gnueabihf" => Some("armhf"), - "armv7-unknown-linux-gnueabi" => Some("armel"), - "armv7-unknown-linux-gnueabihf" => Some("armhf"), - "thumbv7neon-unknown-linux-gnueabihf" => Some("armhf"), - "i586-unknown-linux-gnu" => Some("i386"), - "i686-unknown-linux-gnu" => Some("i386"), - "mips-unknown-linux-gnu" => Some("mips"), - "mipsel-unknown-linux-gnu" => Some("mipsel"), - "mips64-unknown-linux-gnuabi64" => Some("mips64"), - "mips64el-unknown-linux-gnuabi64" => Some("mips64el"), - "mips64-unknown-linux-muslabi64" => Some("musl-linux-mips64"), - "mips64el-unknown-linux-muslabi64" => Some("musl-linux-mips64el"), - "powerpc-unknown-linux-gnu" => Some("powerpc"), - "powerpc64-unknown-linux-gnu" => Some("ppc64"), - "powerpc64le-unknown-linux-gnu" => Some("ppc64el"), - "riscv64gc-unknown-linux-gnu" => Some("riscv64"), - "s390x-unknown-linux-gnu" => Some("s390x"), - "sparc64-unknown-linux-gnu" => Some("sparc64"), - "arm-unknown-linux-musleabihf" => Some("musl-linux-armhf"), - "arm-unknown-linux-musleabi" => Some("musl-linux-arm"), - "armv5te-unknown-linux-gnueabi" => None, - "armv5te-unknown-linux-musleabi" => None, - "armv7-unknown-linux-musleabi" => Some("musl-linux-arm"), - "armv7-unknown-linux-musleabihf" => Some("musl-linux-armhf"), - "i586-unknown-linux-musl" => Some("musl-linux-i386"), - "i686-unknown-linux-musl" => Some("musl-linux-i386"), - "mips-unknown-linux-musl" => Some("musl-linux-mips"), - "mipsel-unknown-linux-musl" => Some("musl-linux-mipsel"), - "arm-linux-androideabi" => None, - "armv7-linux-androideabi" => None, - "thumbv7neon-linux-androideabi" => None, - "i686-linux-android" => None, - "x86_64-linux-android" => None, - "x86_64-pc-windows-gnu" => None, - "i686-pc-windows-gnu" => None, - "asmjs-unknown-emscripten" => None, - "wasm32-unknown-emscripten" => None, - "x86_64-unknown-dragonfly" => Some("dragonflybsd-amd64"), - "i686-unknown-freebsd" => Some("freebsd-i386"), - "x86_64-unknown-freebsd" => Some("freebsd-amd64"), - "x86_64-unknown-netbsd" => Some("netbsd-amd64"), - "sparcv9-sun-solaris" => Some("solaris-sparc"), - "x86_64-sun-solaris" => Some("solaris-amd64"), - "thumbv6m-none-eabi" => Some("arm"), - "thumbv7em-none-eabi" => Some("arm"), - "thumbv7em-none-eabihf" => Some("armhf"), - "thumbv7m-none-eabi" => Some("arm"), - _ => None, - } +impl Default for Target { + fn default() -> Target { + Target::DEFAULT } } @@ -344,17 +407,23 @@ impl Target { } } -impl From for Target { - fn from(host: Host) -> Target { +impl From for Target { + fn from(host: TargetTriple) -> Target { match host { - Host::X86_64UnknownLinuxGnu => Target::new_built_in("x86_64-unknown-linux-gnu"), - Host::X86_64UnknownLinuxMusl => Target::new_built_in("x86_64-unknown-linux-musl"), - Host::X86_64AppleDarwin => Target::new_built_in("x86_64-apple-darwin"), - Host::X86_64PcWindowsMsvc => Target::new_built_in("x86_64-pc-windows-msvc"), - Host::Aarch64AppleDarwin => Target::new_built_in("aarch64-apple-darwin"), - Host::Aarch64UnknownLinuxGnu => Target::new_built_in("aarch64-unknown-linux-gnu"), - Host::Aarch64UnknownLinuxMusl => Target::new_built_in("aarch64-unknown-linux-musl"), - Host::Other(s) => Target::from( + TargetTriple::X86_64UnknownLinuxGnu => Target::new_built_in("x86_64-unknown-linux-gnu"), + TargetTriple::X86_64UnknownLinuxMusl => { + Target::new_built_in("x86_64-unknown-linux-musl") + } + TargetTriple::X86_64AppleDarwin => Target::new_built_in("x86_64-apple-darwin"), + TargetTriple::X86_64PcWindowsMsvc => Target::new_built_in("x86_64-pc-windows-msvc"), + TargetTriple::Aarch64AppleDarwin => Target::new_built_in("aarch64-apple-darwin"), + TargetTriple::Aarch64UnknownLinuxGnu => { + Target::new_built_in("aarch64-unknown-linux-gnu") + } + TargetTriple::Aarch64UnknownLinuxMusl => { + Target::new_built_in("aarch64-unknown-linux-musl") + } + TargetTriple::Other(s) => Target::from( s.as_str(), &rustc::target_list(&mut Verbosity::Quiet.into()) .expect("should be able to query rustc"), @@ -365,26 +434,67 @@ impl From for Target { impl From for Target { fn from(target_str: String) -> Target { - let target_host: Host = target_str.as_str().into(); + let target_host: TargetTriple = target_str.as_str().into(); target_host.into() } } impl Serialize for Target { fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(self.triple()) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum CommandVariant { + Cargo, + Xargo, + Zig, + Shell, +} + +impl CommandVariant { + pub fn create(uses_zig: bool, uses_xargo: bool) -> Result { + match (uses_zig, uses_xargo) { + (true, true) => eyre::bail!("cannot use both zig and xargo"), + (true, false) => Ok(CommandVariant::Zig), + (false, true) => Ok(CommandVariant::Xargo), + (false, false) => Ok(CommandVariant::Cargo), + } + } + + pub fn to_str(self) -> &'static str { match self { - Target::BuiltIn { triple } => serializer.serialize_str(triple), - Target::Custom { triple } => serializer.serialize_str(triple), + CommandVariant::Cargo => "cargo", + CommandVariant::Xargo => "xargo", + CommandVariant::Zig => "cargo-zigbuild", + CommandVariant::Shell => "sh", } } + + pub fn uses_xargo(self) -> bool { + self == CommandVariant::Xargo + } + + pub fn uses_zig(self) -> bool { + self == CommandVariant::Zig + } + + pub(crate) fn is_shell(self) -> bool { + self == CommandVariant::Shell + } } -fn warn_on_failure(target: &Target, toolchain: &str, msg_info: &mut MessageInfo) -> Result<()> { +fn warn_on_failure( + target: &Target, + toolchain: &QualifiedToolchain, + msg_info: &mut MessageInfo, +) -> Result<()> { let rust_std = format!("rust-std-{target}"); if target.is_builtin() { let component = rustup::check_component(&rust_std, toolchain, msg_info)?; if component.is_not_available() { - msg_info.warn(format!("rust-std is not available for {target}"))?; + msg_info.warn(format_args!("rust-std is not available for {target}"))?; msg_info.note( format_args!( r#"you may need to build components for the target via `-Z build-std=` or in your cross configuration specify `target.{target}.build-std` @@ -396,176 +506,338 @@ fn warn_on_failure(target: &Target, toolchain: &str, msg_info: &mut MessageInfo) Ok(()) } -pub fn run() -> Result { - let target_list = rustc::target_list(&mut Verbosity::Quiet.into())?; - let args = cli::parse(&target_list)?; - let mut msg_info = shell::MessageInfo::create(args.verbose, args.quiet, args.color.as_deref())?; +fn add_libc_version(triple: &str, zig_version: Option<&str>) -> String { + match zig_version { + Some(libc) => format!("{triple}.{libc}"), + None => triple.to_owned(), + } +} +pub fn run( + args: Args, + target_list: TargetList, + msg_info: &mut MessageInfo, +) -> Result> { if args.version && args.subcommand.is_none() { - let commit_info = include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")); - msg_info.print(format!( - concat!("cross ", env!("CARGO_PKG_VERSION"), "{}"), - commit_info + msg_info.print(concat!( + "cross ", + env!("CARGO_PKG_VERSION"), + crate::commit_info!() ))?; } + if let Some(Subcommand::Other(command)) = &args.subcommand { + msg_info.warn(format_args!( + "specified cargo subcommand `{command}` is not supported by `cross`." + ))?; + return Ok(None); + } + let host_version_meta = rustc::version_meta()?; + let cwd = std::env::current_dir()?; - if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), &mut msg_info)? { - let host = host_version_meta.host(); - let toml = toml(&metadata, &mut msg_info)?; - let config = Config::new(toml); - let target = args - .target - .or_else(|| config.target(&target_list)) - .unwrap_or_else(|| Target::from(host.triple(), &target_list)); - config.confusable_target(&target, &mut msg_info)?; - - let image_exists = match docker::image_name(&config, &target) { - Ok(_) => true, - Err(err) => { - msg_info.warn(err)?; - false + if let Some(metadata) = cargo_metadata_with_args(None, Some(&args), msg_info)? { + let CrossSetup { + config, + target, + uses_xargo, + uses_zig, + build_std, + zig_version, + toolchain, + is_remote, + engine, + image, + } = match setup(&host_version_meta, &metadata, &args, target_list, msg_info)? { + Some(setup) => setup, + _ => { + return Ok(None); } }; - if image_exists && host.is_supported(Some(&target)) { - let (toolchain, sysroot) = - rustc::get_sysroot(&host, &target, args.channel.as_deref(), &mut msg_info)?; - let mut is_nightly = toolchain.contains("nightly"); + config.confusable_target(&target, msg_info)?; - let installed_toolchains = rustup::installed_toolchains(&mut msg_info)?; + let picked_generic_channel = + matches!(toolchain.channel.as_str(), "stable" | "beta" | "nightly"); - if !installed_toolchains.into_iter().any(|t| t == toolchain) { - rustup::install_toolchain(&toolchain, &mut msg_info)?; + if image.platform.target.is_supported(Some(&target)) { + if image.platform.architecture != toolchain.host().architecture { + msg_info.warn(format_args!( + "toolchain `{toolchain}` may not run on image `{image}`" + ))?; } - // TODO: Provide a way to pick/match the toolchain version as a consumer of `cross`. - if let Some((rustc_version, channel, rustc_commit)) = rustup::rustc_version(&sysroot)? { - warn_host_version_mismatch( - &host_version_meta, - &toolchain, - &rustc_version, - &rustc_commit, - &mut msg_info, - )?; - is_nightly = channel == Channel::Nightly; - } - - let uses_build_std = config.build_std(&target).unwrap_or(false); - let uses_xargo = - !uses_build_std && config.xargo(&target).unwrap_or(!target.is_builtin()); - if !config.custom_toolchain() { - // build-std overrides xargo, but only use it if it's a built-in - // tool but not an available target or doesn't have rust-std. - let available_targets = rustup::available_targets(&toolchain, &mut msg_info)?; - - if !is_nightly && uses_build_std { - eyre::bail!( - "no rust-std component available for {}: must use nightly", - target.triple() - ); - } - - if !uses_xargo - && !available_targets.is_installed(&target) - && available_targets.contains(&target) - { - rustup::install(&target, &toolchain, &mut msg_info)?; - } else if !rustup::component_is_installed("rust-src", &toolchain, &mut msg_info)? { - rustup::install_component("rust-src", &toolchain, &mut msg_info)?; - } - if args.subcommand.map_or(false, |sc| sc == Subcommand::Clippy) - && !rustup::component_is_installed("clippy", &toolchain, &mut msg_info)? - { - rustup::install_component("clippy", &toolchain, &mut msg_info)?; + let mut is_nightly = toolchain.channel.contains("nightly"); + let mut rustc_version = None; + if let Some((version, channel, commit)) = toolchain.rustc_version()? { + if picked_generic_channel && toolchain.date.is_none() { + warn_host_version_mismatch( + &host_version_meta, + &toolchain, + &version, + &commit, + msg_info, + )?; } + is_nightly = channel == Channel::Nightly; + rustc_version = Some(version); } - let needs_interpreter = args.subcommand.map_or(false, |sc| sc.needs_interpreter()); - - let mut filtered_args = if args - .subcommand - .map_or(false, |s| !s.needs_target_in_command()) - { - let mut filtered_args = Vec::new(); - let mut args_iter = args.all.clone().into_iter(); - while let Some(arg) = args_iter.next() { - if arg == "--target" { - args_iter.next(); - } else if arg.starts_with("--target=") { - // NOOP - } else { - filtered_args.push(arg); - } - } - filtered_args - // Make sure --target is present - } else if !args.all.iter().any(|a| a.starts_with("--target")) { - let mut args_with_target = args.all.clone(); - args_with_target.push("--target".to_owned()); - args_with_target.push(target.triple().to_owned()); - args_with_target - } else { - args.all.clone() - }; + let available_targets = rustup::setup_rustup(&toolchain, msg_info)?; + + rustup::setup_components( + &target, + uses_xargo, + build_std.enabled(), + &toolchain, + is_nightly, + available_targets, + &args, + msg_info, + )?; - let is_test = args.subcommand.map_or(false, |sc| sc == Subcommand::Test); - if is_test && config.doctests().unwrap_or_default() && is_nightly { - filtered_args.push("-Zdoctest-xcompile".to_owned()); - } - if uses_build_std { - filtered_args.push("-Zbuild-std".to_owned()); - } + let filtered_args = + get_filtered_args(zig_version, &args, &target, &config, is_nightly, &build_std); - let is_remote = docker::Engine::is_remote(); let needs_docker = args .subcommand + .clone() .map_or(false, |sc| sc.needs_docker(is_remote)); if target.needs_docker() && needs_docker { - let engine = docker::Engine::new(None, Some(is_remote), &mut msg_info)?; - if host_version_meta.needs_interpreter() - && needs_interpreter - && target.needs_interpreter() - && !interpreter::is_registered(&target)? - { - docker::register(&engine, &target, &mut msg_info)?; + let paths = docker::DockerPaths::create( + &engine, + metadata, + cwd, + toolchain.clone(), + msg_info, + )?; + let options = docker::DockerOptions::new( + engine, + target.clone(), + config, + image, + crate::CommandVariant::create(uses_zig, uses_xargo)?, + rustc_version, + false, + ); + + if msg_info.should_fail() { + return Ok(None); } - let paths = docker::DockerPaths::create(&engine, metadata, cwd, sysroot)?; - let options = - docker::DockerOptions::new(engine, target.clone(), config, uses_xargo); - let status = docker::run(options, paths, &filtered_args, &mut msg_info) - .wrap_err("could not run container")?; + install_interpreter_if_needed( + &args, + host_version_meta, + &target, + &options, + msg_info, + )?; + let status = if let Some(status) = docker::run( + options, + paths, + &filtered_args, + args.subcommand.clone(), + msg_info, + ) + .wrap_err("could not run container")? + { + status + } else { + return Ok(None); + }; + let needs_host = args.subcommand.map_or(false, |sc| sc.needs_host(is_remote)); if !status.success() { - warn_on_failure(&target, &toolchain, &mut msg_info)?; + warn_on_failure(&target, &toolchain, msg_info)?; } if !(status.success() && needs_host) { - return Ok(status); + return Ok(Some(status)); } } } } + Ok(None) +} + +/// Check if an interpreter is needed and then install it. +pub fn install_interpreter_if_needed( + args: &Args, + host_version_meta: rustc_version::VersionMeta, + target: &Target, + options: &docker::DockerOptions, + msg_info: &mut MessageInfo, +) -> Result<(), color_eyre::Report> { + let needs_interpreter = args + .subcommand + .clone() + .map_or(false, |sc| sc.needs_interpreter()); + + if host_version_meta.needs_interpreter() + && needs_interpreter + && target.needs_interpreter() + && !interpreter::is_registered(target)? + { + options.engine.register_binfmt(target, msg_info)?; + } + Ok(()) +} - // if we fallback to the host cargo, use the same invocation that was made to cross - let argv: Vec = env::args().skip(1).collect(); - msg_info.note("Falling back to `cargo` on the host.")?; - match args.subcommand { - Some(Subcommand::List) => { - // this won't print in order if we have both stdout and stderr. - let out = cargo::run_and_get_output(&argv, &mut msg_info)?; - let stdout = out.stdout()?; - if out.status.success() && cli::is_subcommand_list(&stdout) { - cli::fmt_subcommands(&stdout, &mut msg_info)?; +/// Get filtered args to pass to cargo +pub fn get_filtered_args( + zig_version: Option, + args: &Args, + target: &Target, + config: &Config, + is_nightly: bool, + build_std: &BuildStd, +) -> Vec { + let add_libc = |triple: &str| add_libc_version(triple, zig_version.as_deref()); + let mut filtered_args = if args + .subcommand + .clone() + .map_or(false, |s| !s.needs_target_in_command()) + { + let mut filtered_args = Vec::new(); + let mut args_iter = args.cargo_args.clone().into_iter(); + while let Some(arg) = args_iter.next() { + if arg == "--target" { + args_iter.next(); + } else if arg.starts_with("--target=") { + // NOOP + } else { + filtered_args.push(arg); + } + } + filtered_args + // Make sure --target is present + } else if !args.cargo_args.iter().any(|a| a.starts_with("--target")) { + let mut args_with_target = args.cargo_args.clone(); + args_with_target.push("--target".to_owned()); + args_with_target.push(add_libc(target.triple())); + args_with_target + } else if zig_version.is_some() { + let mut filtered_args = Vec::new(); + let mut args_iter = args.cargo_args.clone().into_iter(); + while let Some(arg) = args_iter.next() { + if arg == "--target" { + filtered_args.push("--target".to_owned()); + if let Some(triple) = args_iter.next() { + filtered_args.push(add_libc(&triple)); + } + } else if let Some(stripped) = arg.strip_prefix("--target=") { + filtered_args.push(format!("--target={}", add_libc(stripped))); } else { - // Not a list subcommand, which can happen with weird edge-cases. - print!("{}", stdout); - io::stdout().flush().expect("could not flush"); + filtered_args.push(arg); } - Ok(out.status) } - _ => cargo::run(&argv, &mut msg_info).map_err(Into::into), + filtered_args + } else { + args.cargo_args.clone() + }; + + let is_test = args + .subcommand + .clone() + .map_or(false, |sc| sc == Subcommand::Test); + if is_test && config.doctests().unwrap_or_default() && is_nightly { + filtered_args.push("-Zdoctest-xcompile".to_owned()); + } + + if build_std.enabled() { + let mut arg = "-Zbuild-std".to_owned(); + if let BuildStd::Crates(crates) = build_std { + arg.push('='); + arg.push_str(&crates.join(",")); + } + filtered_args.push(arg); } + + filtered_args.extend(args.rest_args.iter().cloned()); + filtered_args +} + +/// Setup cross configuration +pub fn setup( + host_version_meta: &rustc_version::VersionMeta, + metadata: &CargoMetadata, + args: &Args, + target_list: TargetList, + msg_info: &mut MessageInfo, +) -> Result, color_eyre::Report> { + let host = host_version_meta.host(); + let toml = toml(metadata, msg_info)?; + let config = Config::new(Some(toml)); + let target = args + .target + .clone() + .or_else(|| config.target(&target_list)) + .unwrap_or_else(|| Target::from(host.triple(), &target_list)); + let build_std = config.build_std(&target).unwrap_or_default(); + let uses_xargo = !build_std.enabled() && config.xargo(&target).unwrap_or(!target.is_builtin()); + let uses_zig = config.zig(&target).unwrap_or(false); + let zig_version = config.zig_version(&target); + let image = match docker::get_image(&config, &target, uses_zig) { + Ok(i) => i, + Err(docker::GetImageError::NoCompatibleImages(..)) + if config.dockerfile(&target).is_some() => + { + "scratch".into() + } + Err(err) => { + msg_info.warn(err)?; + + return Ok(None); + } + }; + let default_toolchain = QualifiedToolchain::default(&config, msg_info)?; + let mut toolchain = if let Some(channel) = &args.channel { + let picked_toolchain: Toolchain = channel.parse()?; + + if let Some(picked_host) = &picked_toolchain.host { + return Err(eyre::eyre!("the specified toolchain `{picked_toolchain}` can't be used")) + .with_suggestion(|| { + format!( + "try `cross +{}` instead", + picked_toolchain.remove_host() + ) + }).with_section(|| format!( + r#"Overriding the toolchain in cross is only possible in CLI by specifying a channel and optional date: `+channel[-YYYY-MM-DD]`. +To override the toolchain mounted in the image, set `target.{target}.image.toolchain = "{picked_host}"`"#).header("Note:".bright_cyan())); + } + + default_toolchain.with_picked(picked_toolchain)? + } else { + default_toolchain + }; + let is_remote = docker::Engine::is_remote(); + let engine = docker::Engine::new(None, Some(is_remote), msg_info)?; + let image = image.to_definite_with(&engine, msg_info)?; + toolchain.replace_host(&image.platform); + Ok(Some(CrossSetup { + config, + target, + uses_xargo, + uses_zig, + build_std, + zig_version, + toolchain, + is_remote, + engine, + image, + })) +} + +#[derive(Debug)] +pub struct CrossSetup { + pub config: Config, + pub target: Target, + pub uses_xargo: bool, + pub uses_zig: bool, + pub build_std: BuildStd, + pub zig_version: Option, + pub toolchain: QualifiedToolchain, + pub is_remote: bool, + pub engine: docker::Engine, + pub image: docker::Image, } #[derive(PartialEq, Eq, Debug)] @@ -578,19 +850,16 @@ pub(crate) enum VersionMatch { pub(crate) fn warn_host_version_mismatch( host_version_meta: &rustc_version::VersionMeta, - toolchain: &str, + toolchain: &QualifiedToolchain, rustc_version: &rustc_version::Version, rustc_commit: &str, msg_info: &mut MessageInfo, ) -> Result { - let host_commit = (&host_version_meta.short_version_string) - .splitn(3, ' ') - .nth(2); + let host_commit = host_version_meta.short_version_string.splitn(3, ' ').nth(2); let rustc_commit_date = rustc_commit .split_once(' ') .and_then(|x| x.1.strip_suffix(')')); - // This should only hit on non Host::X86_64UnknownLinuxGnu hosts if rustc_version != &host_version_meta.semver || (Some(rustc_commit) != host_commit) { let versions = rustc_version.cmp(&host_version_meta.semver); let dates = rustc_commit_date.cmp(&host_version_meta.commit_date.as_deref()); @@ -600,59 +869,115 @@ pub(crate) fn warn_host_version_mismatch( host_version_meta.short_version_string ); if versions.is_lt() || (versions.is_eq() && dates.is_lt()) { - msg_info.warn(format!("using older {rustc_warning}.\n > Update with `rustup update --force-non-host {toolchain}`"))?; + if cfg!(not(test)) { + msg_info.info(format_args!("using older {rustc_warning}.\n > Update with `rustup update --force-non-host {toolchain}`"))?; + } return Ok(VersionMatch::OlderTarget); } else if versions.is_gt() || (versions.is_eq() && dates.is_gt()) { - msg_info.warn(format!( - "using newer {rustc_warning}.\n > Update with `rustup update`" - ))?; + if cfg!(not(test)) { + msg_info.info(format_args!( + "using newer {rustc_warning}.\n > Update with `rustup update`" + ))?; + } return Ok(VersionMatch::NewerTarget); } else { - msg_info.warn(format!("using {rustc_warning}."))?; + if cfg!(not(test)) { + msg_info.info(format_args!("using {rustc_warning}."))?; + } return Ok(VersionMatch::Different); } } Ok(VersionMatch::Same) } +pub const fn commit_info() -> &'static str { + commit_info!() +} + +#[macro_export] +macro_rules! commit_info { + () => { + include_str!(concat!(env!("OUT_DIR"), "/commit-info.txt")) + }; +} + /// Obtains the [`CrossToml`] from one of the possible locations /// /// These locations are checked in the following order: /// 1. If the `CROSS_CONFIG` variable is set, it tries to read the config from its value /// 2. Otherwise, the `Cross.toml` in the project root is used -/// 3. Package metadata in the Cargo.toml +/// 3. Package and workspace metadata in the Cargo.toml /// -/// The values from `CROSS_CONFIG` or `Cross.toml` are concatenated with the package +/// The values from `CROSS_CONFIG` or `Cross.toml` are concatenated with the /// metadata in `Cargo.toml`, with `Cross.toml` having the highest priority. -fn toml(metadata: &CargoMetadata, msg_info: &mut MessageInfo) -> Result> { +pub fn toml(metadata: &CargoMetadata, msg_info: &mut MessageInfo) -> Result { let root = &metadata.workspace_root; let cross_config_path = match env::var("CROSS_CONFIG") { Ok(var) => PathBuf::from(var), Err(_) => root.join("Cross.toml"), }; - // Attempts to read the cross config from the Cargo.toml - let cargo_toml_str = - file::read(root.join("Cargo.toml")).wrap_err("failed to read Cargo.toml")?; - - if cross_config_path.exists() { + let mut config = if cross_config_path.exists() { let cross_toml_str = file::read(&cross_config_path) .wrap_err_with(|| format!("could not read file `{cross_config_path:?}`"))?; - let (config, _) = CrossToml::parse(&cargo_toml_str, &cross_toml_str, msg_info) - .wrap_err_with(|| format!("failed to parse file `{cross_config_path:?}` as TOML",))?; + let (config, _) = CrossToml::parse_from_cross_str( + &cross_toml_str, + Some(cross_config_path.to_utf8()?), + msg_info, + ) + .wrap_err_with(|| format!("failed to parse file `{cross_config_path:?}` as TOML",))?; - Ok(Some(config)) + config } else { // Checks if there is a lowercase version of this file if root.join("cross.toml").exists() { msg_info.warn("There's a file named cross.toml, instead of Cross.toml. You may want to rename it, or it won't be considered.")?; } + CrossToml::default() + }; + let mut found: Option> = None; + + if let Some(workspace_metadata) = &metadata.metadata { + let workspace_metadata = + serde_json::de::from_str::(workspace_metadata.get())?; + if let Some(cross) = dbg!(workspace_metadata.get("cross")) { + found = Some( + metadata + .workspace_root + .join("Cargo.toml") + .to_utf8()? + .to_owned() + .into(), + ); + let (workspace_config, _) = + CrossToml::parse_from_deserializer(cross, found.as_deref(), msg_info)?; + config = config.merge(workspace_config)?; + } + } - if let Some((cfg, _)) = CrossToml::parse_from_cargo(&cargo_toml_str, msg_info)? { - Ok(Some(cfg)) - } else { - Ok(None) + for (package, package_metadata) in metadata + .packages + .iter() + .filter_map(|p| Some((p.manifest_path.as_path(), p.metadata.as_deref()?))) + { + let package_metadata = + serde_json::de::from_str::(package_metadata.get())?; + + if let Some(cross) = package_metadata.get("cross") { + if let Some(found) = &found { + msg_info.warn(format_args!("Found conflicting cross configuration in `{}`, use `[workspace.metadata.cross]` in the workspace manifest instead.\nCurrently only using configuration from `{}`", package.to_utf8()?, found))?; + continue; + } + let (workspace_config, _) = CrossToml::parse_from_deserializer( + cross, + Some(metadata.workspace_root.join("Cargo.toml").to_utf8()?), + msg_info, + )?; + config = config.merge(workspace_config)?; + found = Some(package.to_utf8()?.into()); } } + + Ok(config) } diff --git a/src/rustc.rs b/src/rustc.rs index 5b9a42404..ef50475d2 100644 --- a/src/rustc.rs +++ b/src/rustc.rs @@ -1,12 +1,14 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use std::process::Command; use rustc_version::{Version, VersionMeta}; +use serde::Deserialize; +use crate::docker::ImagePlatform; use crate::errors::*; use crate::extensions::{env_program, CommandExt}; use crate::shell::MessageInfo; -use crate::{Host, Target}; +use crate::TargetTriple; #[derive(Debug)] pub struct TargetList { @@ -21,14 +23,14 @@ impl TargetList { } pub trait VersionMetaExt { - fn host(&self) -> Host; + fn host(&self) -> TargetTriple; fn needs_interpreter(&self) -> bool; fn commit_hash(&self) -> String; } impl VersionMetaExt for VersionMeta { - fn host(&self) -> Host { - Host::from(&*self.host) + fn host(&self) -> TargetTriple { + TargetTriple::from(&*self.host) } fn needs_interpreter(&self) -> bool { @@ -79,6 +81,275 @@ pub fn hash_from_version_string(version: &str, index: usize) -> String { short_commit_hash(&const_sha1::sha1(&buffer).to_string()) } +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +pub struct QualifiedToolchain { + pub channel: String, + pub date: Option, + pub(self) host: ImagePlatform, + pub is_custom: bool, + pub full: String, + pub(self) sysroot: PathBuf, +} + +impl QualifiedToolchain { + pub fn new( + channel: &str, + date: &Option, + host: &ImagePlatform, + sysroot: &Path, + is_custom: bool, + ) -> Self { + let mut this = Self { + channel: channel.to_owned(), + date: date.clone(), + host: host.clone(), + is_custom, + full: if let Some(date) = date { + format!("{}-{}-{}", channel, date, host.target) + } else { + format!("{}-{}", channel, host.target) + }, + sysroot: sysroot.to_owned(), + }; + if !is_custom { + this.sysroot.set_file_name(&this.full); + } + this + } + + /// Replace the host, does nothing if ran on a custom toolchain + pub fn replace_host(&mut self, host: &ImagePlatform) -> &mut Self { + if !self.is_custom { + *self = Self::new(&self.channel, &self.date, host, &self.sysroot, false); + self.sysroot.set_file_name(&self.full); + } + self + } + + /// Makes a good guess as to what the toolchain is compiled to run on. + pub(crate) fn custom( + name: &str, + sysroot: &Path, + config: &crate::config::Config, + msg_info: &mut MessageInfo, + ) -> Result { + if let Some(compat) = config.custom_toolchain_compat() { + let mut toolchain: QualifiedToolchain = QualifiedToolchain::parse( + sysroot.to_owned(), + &compat, + config, + msg_info, + ) + .wrap_err( + "could not parse CROSS_CUSTOM_TOOLCHAIN_COMPAT as a fully qualified toolchain name", + )?; + toolchain.is_custom = true; + toolchain.full = name.to_owned(); + return Ok(toolchain); + } + // a toolchain installed by https://github.com/rust-lang/cargo-bisect-rustc + if name.starts_with("bisector-nightly") { + let (_, toolchain) = name.split_once('-').expect("should include -"); + let mut toolchain = + QualifiedToolchain::parse(sysroot.to_owned(), toolchain, config, msg_info) + .wrap_err("could not parse bisector toolchain")?; + toolchain.is_custom = true; + toolchain.full = name.to_owned(); + return Ok(toolchain); + } else if let Ok(stdout) = Command::new(sysroot.join("bin/rustc")) + .arg("-Vv") + .run_and_get_stdout(msg_info) + { + let rustc_version::VersionMeta { + build_date, + channel, + host, + .. + } = rustc_version::version_meta_for(&stdout)?; + let mut toolchain = QualifiedToolchain::new( + match channel { + rustc_version::Channel::Dev => "dev", + rustc_version::Channel::Nightly => "nightly", + rustc_version::Channel::Beta => "beta", + rustc_version::Channel::Stable => "stable", + }, + &build_date, + &ImagePlatform::from_target(host.into())?, + sysroot, + true, + ); + toolchain.full = name.to_owned(); + return Ok(toolchain); + } + Err(eyre::eyre!( + "cross can not figure out what your custom toolchain is" + )) + .suggestion("set `CROSS_CUSTOM_TOOLCHAIN_COMPAT` to a fully qualified toolchain name: i.e `nightly-aarch64-unknown-linux-musl`") + } + + pub fn host(&self) -> &ImagePlatform { + &self.host + } + + pub fn get_sysroot(&self) -> &Path { + &self.sysroot + } + + /// Grab the current default toolchain + pub fn default(config: &crate::config::Config, msg_info: &mut MessageInfo) -> Result { + let sysroot = sysroot(msg_info)?; + + let default_toolchain_name = sysroot + .file_name() + .ok_or_else(|| eyre::eyre!("couldn't get name of active toolchain"))? + .to_str() + .ok_or_else(|| eyre::eyre!("toolchain was not utf-8"))?; + + if !config.custom_toolchain() { + QualifiedToolchain::parse(sysroot.clone(), default_toolchain_name, config, msg_info) + } else { + QualifiedToolchain::custom(default_toolchain_name, &sysroot, config, msg_info) + } + } + + /// Merge a "picked" toolchain, overriding set fields. + pub fn with_picked(self, picked: Toolchain) -> Result { + let date = picked.date.or(self.date); + let host = picked + .host + .map_or(Ok(self.host), ImagePlatform::from_target)?; + let channel = picked.channel; + + Ok(QualifiedToolchain::new( + &channel, + &date, + &host, + &self.sysroot, + false, + )) + } + + pub fn set_sysroot(&mut self, convert: impl Fn(&Path) -> PathBuf) { + self.sysroot = convert(&self.sysroot); + } +} + +impl std::fmt::Display for QualifiedToolchain { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.full) + } +} + +impl QualifiedToolchain { + fn parse( + sysroot: PathBuf, + toolchain: &str, + config: &crate::config::Config, + msg_info: &mut MessageInfo, + ) -> Result { + match toolchain.parse::() { + Ok(Toolchain { + channel, + date, + host: Some(host), + is_custom, + full, + }) => Ok(QualifiedToolchain { + channel, + date, + host: ImagePlatform::from_target(host)?, + is_custom, + full, + sysroot, + }), + Ok(_) | Err(_) if config.custom_toolchain() => { + QualifiedToolchain::custom(toolchain, &sysroot, config, msg_info) + } + Ok(_) => Err(eyre::eyre!("toolchain is not fully qualified") + .with_note(|| "cross expects the toolchain to be a rustup installed toolchain") + .with_suggestion(|| { + "if you're using a custom toolchain try setting `CROSS_CUSTOM_TOOLCHAIN=1` or install rust via rustup" + })), + Err(e) => Err(e), + } + } +} + +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +pub struct Toolchain { + pub channel: String, + pub date: Option, + pub host: Option, + pub is_custom: bool, + pub full: String, +} + +impl Toolchain { + pub fn remove_host(&self) -> Self { + let mut new = Self { + host: None, + ..self.clone() + }; + if let Some(host) = &self.host { + new.full = new.full.replace(&format!("-{host}"), ""); + } + new + } +} + +impl std::fmt::Display for Toolchain { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(&self.full) + } +} + +impl std::str::FromStr for Toolchain { + type Err = eyre::Report; + + fn from_str(s: &str) -> Result { + fn dig(s: &str) -> bool { + s.chars().all(|c: char| c.is_ascii_digit()) + } + if let Some((channel, parts)) = s.split_once('-') { + if parts.starts_with(|c: char| c.is_ascii_digit()) { + // a date, YYYY-MM-DD + let mut split = parts.splitn(4, '-'); + let ymd = [split.next(), split.next(), split.next()]; + let ymd = match ymd { + [Some(y), Some(m), Some(d)] if dig(y) && dig(m) && dig(d) => { + format!("{y}-{m}-{d}") + } + _ => eyre::bail!("invalid toolchain `{s}`"), + }; + Ok(Toolchain { + channel: channel.to_owned(), + date: Some(ymd), + host: split.next().map(|s| s.into()), + is_custom: false, + full: s.to_owned(), + }) + } else { + // channel-host + Ok(Toolchain { + channel: channel.to_owned(), + date: None, + host: Some(parts.into()), + is_custom: false, + full: s.to_owned(), + }) + } + } else { + Ok(Toolchain { + channel: s.to_owned(), + date: None, + host: None, + is_custom: false, + full: s.to_owned(), + }) + } + } +} + #[must_use] pub fn rustc_command() -> Command { Command::new(env_program("RUSTC", "rustc")) @@ -86,54 +357,22 @@ pub fn rustc_command() -> Command { pub fn target_list(msg_info: &mut MessageInfo) -> Result { rustc_command() - .args(&["--print", "target-list"]) + .args(["--print", "target-list"]) .run_and_get_stdout(msg_info) .map(|s| TargetList { triples: s.lines().map(|l| l.to_owned()).collect(), }) } -pub fn sysroot(host: &Host, target: &Target, msg_info: &mut MessageInfo) -> Result { - let mut stdout = rustc_command() - .args(&["--print", "sysroot"]) +pub fn sysroot(msg_info: &mut MessageInfo) -> Result { + let stdout = rustc_command() + .args(["--print", "sysroot"]) .run_and_get_stdout(msg_info)? .trim() .to_owned(); - - // On hosts other than Linux, specify the correct toolchain path. - if host != &Host::X86_64UnknownLinuxGnu && target.needs_docker() { - stdout = stdout.replacen(host.triple(), Host::X86_64UnknownLinuxGnu.triple(), 1); - } - Ok(PathBuf::from(stdout)) } -pub fn get_sysroot( - host: &Host, - target: &Target, - channel: Option<&str>, - msg_info: &mut MessageInfo, -) -> Result<(String, PathBuf)> { - let mut sysroot = sysroot(host, target, msg_info)?; - let default_toolchain = sysroot - .file_name() - .and_then(|file_name| file_name.to_str()) - .ok_or_else(|| eyre::eyre!("couldn't get toolchain name"))?; - let toolchain = if let Some(channel) = channel { - [channel] - .iter() - .cloned() - .chain(default_toolchain.splitn(2, '-').skip(1)) - .collect::>() - .join("-") - } else { - default_toolchain.to_owned() - }; - sysroot.set_file_name(&toolchain); - - Ok((toolchain, sysroot)) -} - pub fn version_meta() -> Result { rustc_version::version_meta().wrap_err("couldn't fetch the `rustc` version") } @@ -142,6 +381,17 @@ pub fn version_meta() -> Result { mod tests { use super::*; + #[test] + fn bisect() { + QualifiedToolchain::custom( + "bisector-nightly-2022-04-26-x86_64-unknown-linux-gnu", + "/tmp/cross/sysroot".as_ref(), + &crate::config::Config::new(None), + &mut MessageInfo::create(2, false, None).unwrap(), + ) + .unwrap(); + } + #[test] fn hash_from_rustc() { assert_eq!( diff --git a/src/rustup.rs b/src/rustup.rs index a13b1dad5..cbd0ea4e2 100644 --- a/src/rustup.rs +++ b/src/rustup.rs @@ -1,18 +1,19 @@ -use std::path::{Path, PathBuf}; +use std::path::PathBuf; use std::process::Command; use rustc_version::{Channel, Version}; use crate::errors::*; pub use crate::extensions::{CommandExt, OutputExt}; +use crate::rustc::QualifiedToolchain; use crate::shell::{MessageInfo, Verbosity}; use crate::Target; #[derive(Debug)] pub struct AvailableTargets { - default: String, - installed: Vec, - not_installed: Vec, + pub default: String, + pub installed: Vec, + pub not_installed: Vec, } impl AvailableTargets { @@ -27,6 +28,31 @@ impl AvailableTargets { } } +pub fn setup_rustup( + toolchain: &QualifiedToolchain, + msg_info: &mut MessageInfo, +) -> Result { + if !toolchain.is_custom + && !installed_toolchains(msg_info)? + .into_iter() + .any(|t| t == toolchain.to_string()) + { + install_toolchain(toolchain, msg_info)?; + } + let available_targets = if !toolchain.is_custom { + available_targets(&toolchain.full, msg_info).with_note(|| { + format!("cross would use the toolchain '{toolchain}' for mounting rust") + })? + } else { + AvailableTargets { + default: String::new(), + installed: vec![], + not_installed: vec![], + } + }; + Ok(available_targets) +} + fn rustup_command(msg_info: &mut MessageInfo, no_flags: bool) -> Command { let mut cmd = Command::new("rustup"); if no_flags { @@ -36,7 +62,7 @@ fn rustup_command(msg_info: &mut MessageInfo, no_flags: bool) -> Command { Verbosity::Quiet => { cmd.arg("--quiet"); } - Verbosity::Verbose => { + Verbosity::Verbose(2..) => { cmd.arg("--verbose"); } _ => (), @@ -44,9 +70,22 @@ fn rustup_command(msg_info: &mut MessageInfo, no_flags: bool) -> Command { cmd } +pub fn active_toolchain(msg_info: &mut MessageInfo) -> Result { + let out = rustup_command(msg_info, true) + .args(["show", "active-toolchain"]) + .run_and_get_output(msg_info)?; + + Ok(out + .stdout()? + .split_once(' ') + .ok_or_else(|| eyre::eyre!("rustup returned invalid data"))? + .0 + .to_owned()) +} + pub fn installed_toolchains(msg_info: &mut MessageInfo) -> Result> { let out = rustup_command(msg_info, true) - .args(&["toolchain", "list"]) + .args(["toolchain", "list"]) .run_and_get_stdout(msg_info)?; Ok(out @@ -60,21 +99,39 @@ pub fn installed_toolchains(msg_info: &mut MessageInfo) -> Result> { .collect()) } -pub fn available_targets(toolchain: &str, msg_info: &mut MessageInfo) -> Result { +pub fn available_targets( + // this is explicitly a string and not `QualifiedToolchain`, + // this is because we use this as a way to ensure that + // the toolchain is an official toolchain, if this errors on + // `is a custom toolchain`, we tell the user to set CROSS_CUSTOM_TOOLCHAIN + // to handle the logic needed. + toolchain: &str, + msg_info: &mut MessageInfo, +) -> Result { let mut cmd = rustup_command(msg_info, true); - cmd.args(&["target", "list", "--toolchain", toolchain]); + + cmd.args(["target", "list", "--toolchain", toolchain]); let output = cmd .run_and_get_output(msg_info) .suggestion("is rustup installed?")?; if !output.status.success() { + let mut err = cmd + .status_result(msg_info, output.status, Some(&output)) + .expect_err("we know the command failed") + .to_section_report(); if String::from_utf8_lossy(&output.stderr).contains("is a custom toolchain") { - eyre::bail!("{toolchain} is a custom toolchain. To use it, you'll need to set the environment variable `CROSS_CUSTOM_TOOLCHAIN=1`") + err = err.wrap_err("'{toolchain}' is a custom toolchain.") + .suggestion(r#"To use this toolchain with cross, you'll need to set the environment variable `CROSS_CUSTOM_TOOLCHAIN=1` +cross will not attempt to configure the toolchain further so that it can run your binary."#); + } else if String::from_utf8_lossy(&output.stderr).contains("does not support components") { + err = err.suggestion(format!( + "try reinstalling the '{toolchain}' toolchain +$ rustup toolchain uninstall {toolchain} +$ rustup toolchain install {toolchain} --force-non-host" + )); } - return Err(cmd - .status_result(msg_info, output.status, Some(&output)) - .unwrap_err() - .to_section_report()); + return Err(err); } let out = output.stdout()?; let mut default = String::new(); @@ -104,33 +161,61 @@ pub fn available_targets(toolchain: &str, msg_info: &mut MessageInfo) -> Result< }) } -pub fn install_toolchain(toolchain: &str, msg_info: &mut MessageInfo) -> Result<()> { - rustup_command(msg_info, false) - .args(&["toolchain", "add", toolchain, "--profile", "minimal"]) +fn version(msg_info: &mut MessageInfo) -> Result { + let out = rustup_command(msg_info, false) + .arg("--version") + .run_and_get_stdout(msg_info)?; + + match out + .lines() + .next() + .and_then(|line| line.split_whitespace().nth(1)) + { + Some(version) => { + semver::Version::parse(version).wrap_err_with(|| "failed to parse rustup version") + } + None => eyre::bail!("failed to get rustup version"), + } +} + +pub fn install_toolchain(toolchain: &QualifiedToolchain, msg_info: &mut MessageInfo) -> Result<()> { + let mut command = rustup_command(msg_info, false); + let toolchain = toolchain.to_string(); + command.args(["toolchain", "add", &toolchain, "--profile", "minimal"]); + if version(msg_info)? >= semver::Version::new(1, 25, 0) { + command.arg("--force-non-host"); + } + command .run(msg_info, false) .wrap_err_with(|| format!("couldn't install toolchain `{toolchain}`")) } -pub fn install(target: &Target, toolchain: &str, msg_info: &mut MessageInfo) -> Result<()> { +pub fn install( + target: &Target, + toolchain: &QualifiedToolchain, + msg_info: &mut MessageInfo, +) -> Result<()> { let target = target.triple(); - + let toolchain = toolchain.to_string(); rustup_command(msg_info, false) - .args(&["target", "add", target, "--toolchain", toolchain]) + .args(["target", "add", target, "--toolchain", &toolchain]) .run(msg_info, false) .wrap_err_with(|| format!("couldn't install `std` for {target}")) } pub fn install_component( component: &str, - toolchain: &str, + toolchain: &QualifiedToolchain, msg_info: &mut MessageInfo, ) -> Result<()> { + let toolchain = toolchain.to_string(); rustup_command(msg_info, false) - .args(&["component", "add", component, "--toolchain", toolchain]) + .args(["component", "add", component, "--toolchain", &toolchain]) .run(msg_info, false) .wrap_err_with(|| format!("couldn't install the `{component}` component")) } +#[derive(Debug)] pub enum Component<'a> { Installed(&'a str), Available(&'a str), @@ -149,11 +234,11 @@ impl<'a> Component<'a> { pub fn check_component<'a>( component: &'a str, - toolchain: &str, + toolchain: &QualifiedToolchain, msg_info: &mut MessageInfo, ) -> Result> { Ok(Command::new("rustup") - .args(&["component", "list", "--toolchain", toolchain]) + .args(["component", "list", "--toolchain", &toolchain.to_string()]) .run_and_get_stdout(msg_info)? .lines() .find_map(|line| { @@ -175,12 +260,55 @@ pub fn check_component<'a>( pub fn component_is_installed( component: &str, - toolchain: &str, + toolchain: &QualifiedToolchain, msg_info: &mut MessageInfo, ) -> Result { Ok(check_component(component, toolchain, msg_info)?.is_installed()) } +#[allow(clippy::too_many_arguments)] +pub fn setup_components( + target: &Target, + uses_xargo: bool, + uses_build_std: bool, + toolchain: &QualifiedToolchain, + is_nightly: bool, + available_targets: AvailableTargets, + args: &crate::cli::Args, + msg_info: &mut MessageInfo, +) -> Result<(), color_eyre::Report> { + if !toolchain.is_custom { + // build-std overrides xargo, but only use it if it's a built-in + // tool but not an available target or doesn't have rust-std. + + if !is_nightly && uses_build_std { + eyre::bail!( + "no rust-std component available for {}: must use nightly", + target.triple() + ); + } + + if !uses_xargo + && !uses_build_std + && !available_targets.is_installed(target) + && available_targets.contains(target) + { + install(target, toolchain, msg_info)?; + } else if !component_is_installed("rust-src", toolchain, msg_info)? { + install_component("rust-src", toolchain, msg_info)?; + } + if args + .subcommand + .clone() + .map_or(false, |sc| sc == crate::Subcommand::Clippy) + && !component_is_installed("clippy", toolchain, msg_info)? + { + install_component("clippy", toolchain, msg_info)?; + } + } + Ok(()) +} + fn rustc_channel(version: &Version) -> Result { match version .pre @@ -196,36 +324,39 @@ fn rustc_channel(version: &Version) -> Result { } } -fn multirust_channel_manifest_path(toolchain_path: &Path) -> PathBuf { - toolchain_path.join("lib/rustlib/multirust-channel-manifest.toml") -} +impl QualifiedToolchain { + fn multirust_channel_manifest_path(&self) -> PathBuf { + self.get_sysroot() + .join("lib/rustlib/multirust-channel-manifest.toml") + } -pub fn rustc_version_string(toolchain_path: &Path) -> Result> { - let path = multirust_channel_manifest_path(toolchain_path); - if path.exists() { - let contents = - std::fs::read(&path).wrap_err_with(|| format!("couldn't open file `{path:?}`"))?; - let manifest: toml::value::Table = toml::from_slice(&contents)?; - return Ok(manifest - .get("pkg") - .and_then(|pkg| pkg.get("rust")) - .and_then(|rust| rust.get("version")) - .and_then(|version| version.as_str()) - .map(|version| version.to_owned())); + pub fn rustc_version_string(&self) -> Result> { + let path = self.multirust_channel_manifest_path(); + if path.exists() { + let contents = + std::fs::read(&path).wrap_err_with(|| format!("couldn't open file `{path:?}`"))?; + let manifest: toml::value::Table = toml::from_str(std::str::from_utf8(&contents)?)?; + return Ok(manifest + .get("pkg") + .and_then(|pkg| pkg.get("rust")) + .and_then(|rust| rust.get("version")) + .and_then(|version| version.as_str()) + .map(|version| version.to_owned())); + } + Ok(None) } - Ok(None) -} -pub fn rustc_version(toolchain_path: &Path) -> Result> { - let path = multirust_channel_manifest_path(toolchain_path); - if let Some(rust_version) = rustc_version_string(toolchain_path)? { - // Field is `"{version} ({commit} {date})"` - if let Some((version, meta)) = rust_version.split_once(' ') { - let version = Version::parse(version) - .wrap_err_with(|| format!("invalid rust version found in {path:?}"))?; - let channel = rustc_channel(&version)?; - return Ok(Some((version, channel, meta.to_owned()))); + pub fn rustc_version(&self) -> Result> { + let path = self.multirust_channel_manifest_path(); + if let Some(rust_version) = self.rustc_version_string()? { + // Field is `"{version} ({commit} {date})"` + if let Some((version, meta)) = rust_version.split_once(' ') { + let version = Version::parse(version) + .wrap_err_with(|| format!("invalid rust version found in {path:?}"))?; + let channel = rustc_channel(&version)?; + return Ok(Some((version, channel, meta.to_owned()))); + } } + Ok(None) } - Ok(None) } diff --git a/src/shell.rs b/src/shell.rs index 90171fed5..8c88430c6 100644 --- a/src/shell.rs +++ b/src/shell.rs @@ -6,7 +6,9 @@ use std::fmt; use std::io::{self, Write}; use std::str::FromStr; +use crate::config::bool_from_envvar; use crate::errors::Result; +use is_terminal::IsTerminal; use owo_colors::{self, OwoColorize}; // get the prefix for stderr messages @@ -39,8 +41,11 @@ macro_rules! message { (@status $stream:ident, $status:expr, $message:expr, $color:ident, $msg_info:expr $(,)?) => {{ write_style!($stream, $msg_info, $status, bold, $color); write_style!($stream, $msg_info, ":", bold); + if let Some(caller) = $msg_info.caller() { + write!($stream, " [{}]", caller)?; + } match $message { - Some(message) => writeln!($stream, " {}", message)?, + Some(message) => writeln!($stream, " {}", message, )?, None => write!($stream, " ")?, } @@ -65,36 +70,39 @@ macro_rules! status { } /// the requested verbosity of output. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum Verbosity { Quiet, + #[default] Normal, - Verbose, + Verbose(u8), } impl Verbosity { pub fn verbose(self) -> bool { match self { - Self::Verbose => true, + Self::Verbose(..) => true, Self::Normal | Self::Quiet => false, } } - fn create(color_choice: ColorChoice, verbose: bool, quiet: bool) -> Option { - match (verbose, quiet) { - (true, true) => { - MessageInfo::from(color_choice).fatal("cannot set both --verbose and --quiet", 101) - } - (true, false) => Some(Verbosity::Verbose), - (false, true) => Some(Verbosity::Quiet), - (false, false) => None, + #[must_use] + pub fn level(&self) -> u8 { + match &self { + Verbosity::Verbose(v) => *v, + _ => 0, } } -} -impl Default for Verbosity { - fn default() -> Verbosity { - Verbosity::Normal + fn create(color_choice: ColorChoice, verbose: impl Into, quiet: bool) -> Option { + match (verbose.into(), quiet) { + (1.., true) => { + MessageInfo::from(color_choice).fatal("cannot set both --verbose and --quiet", 101) + } + (v @ 1.., false) => Some(Verbosity::Verbose(v)), + (0, true) => Some(Verbosity::Quiet), + (0, false) => None, + } } } @@ -131,28 +139,40 @@ pub struct MessageInfo { pub verbosity: Verbosity, pub stdout_needs_erase: bool, pub stderr_needs_erase: bool, + pub cross_debug: bool, + pub has_warned: bool, } impl MessageInfo { - pub const fn new(color_choice: ColorChoice, verbosity: Verbosity) -> MessageInfo { + pub fn new(color_choice: ColorChoice, verbosity: Verbosity) -> MessageInfo { MessageInfo { color_choice, verbosity, stdout_needs_erase: false, stderr_needs_erase: false, + cross_debug: std::env::var("CROSS_DEBUG") + .as_deref() + .map(bool_from_envvar) + .unwrap_or_default(), + has_warned: false, } } - pub fn create(verbose: bool, quiet: bool, color: Option<&str>) -> Result { + pub fn create(verbose: impl Into, quiet: bool, color: Option<&str>) -> Result { let color_choice = get_color_choice(color)?; let verbosity = get_verbosity(color_choice, verbose, quiet)?; - Ok(MessageInfo { - color_choice, - verbosity, - stdout_needs_erase: false, - stderr_needs_erase: false, - }) + Ok(Self::new(color_choice, verbosity)) + } + + #[track_caller] + pub fn caller(&mut self) -> Option { + if self.cross_debug { + let loc = std::panic::Location::caller(); + Some(format!("{}:{}", loc.file(), loc.line())) + } else { + None + } } #[must_use] @@ -178,7 +198,7 @@ impl MessageInfo { } pub fn as_verbose T>(&mut self, call: C) -> T { - self.as_verbosity(call, Verbosity::Verbose) + self.as_verbosity(call, Verbosity::Verbose(2)) } fn erase_line(&mut self, stream: &mut S) -> Result<()> { @@ -203,6 +223,7 @@ impl MessageInfo { } /// prints a red 'error' message and terminates. + #[track_caller] pub fn fatal(&mut self, message: T, code: i32) -> ! { self.error(message) .expect("could not display fatal message"); @@ -210,13 +231,17 @@ impl MessageInfo { } /// prints a red 'error' message. + #[track_caller] pub fn error(&mut self, message: T) -> Result<()> { + self.has_warned = true; self.stderr_check_erase()?; status!(@stderr cross_prefix!("error"), Some(&message), red, self) } /// prints an amber 'warning' message. + #[track_caller] pub fn warn(&mut self, message: T) -> Result<()> { + self.has_warned = true; match self.verbosity { Verbosity::Quiet => Ok(()), _ => status!(@stderr @@ -229,6 +254,7 @@ impl MessageInfo { } /// prints a cyan 'note' message. + #[track_caller] pub fn note(&mut self, message: T) -> Result<()> { match self.verbosity { Verbosity::Quiet => Ok(()), @@ -247,6 +273,7 @@ impl MessageInfo { } /// prints a high-priority message to stdout. + #[track_caller] pub fn print(&mut self, message: T) -> Result<()> { self.stdout_check_erase()?; println!("{}", message); @@ -254,6 +281,7 @@ impl MessageInfo { } /// prints a normal message to stdout. + #[track_caller] pub fn info(&mut self, message: T) -> Result<()> { match self.verbosity { Verbosity::Quiet => Ok(()), @@ -265,6 +293,7 @@ impl MessageInfo { } /// prints a debugging message to stdout. + #[track_caller] pub fn debug(&mut self, message: T) -> Result<()> { match self.verbosity { Verbosity::Quiet | Verbosity::Normal => Ok(()), @@ -346,6 +375,15 @@ impl MessageInfo { Ok(()) } + + /// Returns true if we've previously warned or errored, and we're in CI or `CROSS_NO_WARNINGS` has been set. + /// + /// This is used so that unexpected warnings and errors cause ci to fail. + pub fn should_fail(&self) -> bool { + // FIXME: store env var + env::var("CROSS_NO_WARNINGS").map_or_else(|_| is_ci::cached(), |env| bool_from_envvar(&env)) + && self.has_warned + } } impl Default for MessageInfo { @@ -397,7 +435,11 @@ fn get_color_choice(color: Option<&str>) -> Result { }) } -fn get_verbosity(color_choice: ColorChoice, verbose: bool, quiet: bool) -> Result { +fn get_verbosity( + color_choice: ColorChoice, + verbose: impl Into, + quiet: bool, +) -> Result { // cargo always checks the value of these variables. let env_verbose = cargo_envvar_bool("CARGO_TERM_VERBOSE")?; let env_quiet = cargo_envvar_bool("CARGO_TERM_QUIET")?; @@ -408,13 +450,11 @@ fn get_verbosity(color_choice: ColorChoice, verbose: bool, quiet: bool) -> Resul } pub trait Stream { - const TTY: atty::Stream; + type TTY: IsTerminal; const OWO: owo_colors::Stream; #[must_use] - fn is_atty() -> bool { - atty::is(Self::TTY) - } + fn is_atty() -> bool; fn owo(&self) -> owo_colors::Stream { Self::OWO @@ -422,18 +462,30 @@ pub trait Stream { } impl Stream for io::Stdin { - const TTY: atty::Stream = atty::Stream::Stdin; + type TTY = io::Stdin; const OWO: owo_colors::Stream = owo_colors::Stream::Stdin; + + fn is_atty() -> bool { + io::stdin().is_terminal() + } } impl Stream for io::Stdout { - const TTY: atty::Stream = atty::Stream::Stdout; + type TTY = io::Stdout; const OWO: owo_colors::Stream = owo_colors::Stream::Stdout; + + fn is_atty() -> bool { + io::stdout().is_terminal() + } } impl Stream for io::Stderr { - const TTY: atty::Stream = atty::Stream::Stderr; + type TTY = io::Stderr; const OWO: owo_colors::Stream = owo_colors::Stream::Stderr; + + fn is_atty() -> bool { + io::stderr().is_terminal() + } } pub fn default_ident() -> usize { @@ -442,8 +494,9 @@ pub fn default_ident() -> usize { #[must_use] pub fn indent(message: &str, spaces: usize) -> String { - message - .lines() - .map(|s| format!("{:spaces$}{s}", "")) - .collect() + use std::fmt::Write as _; + message.lines().fold(String::new(), |mut string, line| { + let _ = write!(string, "{:spaces$}{line}", ""); + string + }) } diff --git a/src/tests.rs b/src/tests.rs index 16f243629..7a22907d4 100644 --- a/src/tests.rs +++ b/src/tests.rs @@ -8,12 +8,14 @@ use std::{ use once_cell::sync::OnceCell; use rustc_version::VersionMeta; +use crate::{docker::ImagePlatform, rustc::QualifiedToolchain, TargetTriple, ToUtf8}; + static WORKSPACE: OnceCell = OnceCell::new(); /// Returns the cargo workspace for the manifest pub fn get_cargo_workspace() -> &'static Path { let manifest_dir = env!("CARGO_MANIFEST_DIR"); - let mut msg_info = crate::shell::Verbosity::Verbose.into(); + let mut msg_info = crate::shell::Verbosity::Verbose(2).into(); #[allow(clippy::unwrap_used)] WORKSPACE.get_or_init(|| { crate::cargo_metadata_with_args(Some(manifest_dir.as_ref()), None, &mut msg_info) @@ -25,20 +27,23 @@ pub fn get_cargo_workspace() -> &'static Path { pub fn walk_dir<'a>( root: &'_ Path, - skip: &'a [impl AsRef], -) -> impl Iterator> + 'a { - walkdir::WalkDir::new(root).into_iter().filter_entry(|e| { - if skip - .iter() - .map(|s| -> &std::ffi::OsStr { s.as_ref() }) - .any(|dir| e.file_name() == dir) - { - return false; - } else if e.file_type().is_dir() { - return true; - } - e.path().extension() == Some("md".as_ref()) - }) + skip: &'static [impl AsRef + Send + Sync + 'a], + ext: impl for<'s> Fn(Option<&'s std::ffi::OsStr>) -> bool + Sync + Send + 'static, +) -> impl Iterator> { + ignore::WalkBuilder::new(root) + .filter_entry(move |e| { + if skip + .iter() + .map(|s| -> &std::ffi::OsStr { s.as_ref() }) + .any(|dir| e.file_name() == dir) + { + return false; + } else if e.file_type().map_or(false, |f| f.is_dir()) { + return true; + } + ext(e.path().extension()) + }) + .build() } #[test] @@ -76,7 +81,13 @@ release: {version} expected, warn_host_version_mismatch( &host_meta, - "xxxx", + &QualifiedToolchain::new( + "xxxx", + &None, + &ImagePlatform::from_const_target(TargetTriple::X86_64UnknownLinuxGnu), + Path::new("/toolchains/xxxx-x86_64-unknown-linux-gnu"), + false, + ), &target_meta.0, &target_meta.1, &mut msg_info, @@ -124,3 +135,22 @@ release: {version} "1.0.0-nightly (22222222 2022-02-02)", ); } + +#[test] +fn check_newlines() -> crate::Result<()> { + for file in walk_dir(get_cargo_workspace(), &[".git", "target"], |_| true) { + let file = file?; + if !file.file_type().map_or(true, |f| f.is_file()) { + continue; + } + eprintln!("File: {:?}", file.path()); + assert!( + crate::file::read(file.path()) + .unwrap_or_else(|_| String::from("\n")) + .ends_with('\n'), + "file {:?} does not end with a newline", + file.path().to_utf8()? + ); + } + Ok(()) +} diff --git a/src/tests/toml.rs b/src/tests/toml.rs index d917ea973..161b1e7dc 100644 --- a/src/tests/toml.rs +++ b/src/tests/toml.rs @@ -25,11 +25,12 @@ fn toml_check() -> Result<(), Box> { "CODE_OF_CONDUCT.md", "CHANGELOG.md", ], + |p| p == Some("md".as_ref()), ); for dir_entry in walk { let dir_entry = dir_entry?; - if dir_entry.file_type().is_dir() { + if dir_entry.file_type().map_or(true, |f| f.is_dir()) { continue; } eprintln!("File: {:?}", dir_entry.path()); @@ -60,14 +61,24 @@ fn toml_check() -> Result<(), Box> { text_line_no(&contents, fence.range().start), ); let mut msg_info = crate::shell::MessageInfo::default(); - assert!(if !cargo { - crate::cross_toml::CrossToml::parse_from_cross(&fence_content, &mut msg_info)? + let toml = if !cargo { + crate::cross_toml::CrossToml::parse_from_cross_str( + &fence_content, + None, + &mut msg_info, + )? } else { - crate::cross_toml::CrossToml::parse_from_cargo(&fence_content, &mut msg_info)? - .unwrap_or_default() - } - .1 - .is_empty()); + crate::cross_toml::CrossToml::parse_from_cargo_package_str( + &fence_content, + &mut msg_info, + )? + .unwrap_or_default() + }; + assert!(toml.1.is_empty()); + + // TODO: Add serde_path_to_error + // Check if roundtrip works, needed for merging Cross.toml and Cargo.toml + serde_json::from_value::(serde_json::to_value(toml.0)?)?; } } Ok(()) diff --git a/targets.toml b/targets.toml new file mode 100644 index 000000000..9d46f0a07 --- /dev/null +++ b/targets.toml @@ -0,0 +1,530 @@ +# This file contains all the "targets" cross can be used with by default and is used for generating `src/docker/provided_images.rs` and our images +# the only required value for a `target` entry is its name in `target.target` and the `os` to use in CI +# +# spec is available in `xtask/src/util.rs` on `CiTarget` + +[[target]] +target = "x86_64-apple-darwin" +os = "macos-12" +special = true +deploy = true + +[[target]] +target = "x86_64-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "native qemu-user qemu-system" +deploy = true + +[[target]] +target = "x86_64-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "native qemu-user" +deploy = true + +[[target]] +target = "x86_64-pc-windows-msvc" +os = "windows-2019" +special = true +deploy = true + +[[target]] +target = "x86_64-unknown-linux-gnu" +sub = "centos" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "native qemu-user qemu-system" + +[[target]] +target = "aarch64-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "arm-unknown-linux-gnueabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "arm-unknown-linux-gnueabihf" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv7-unknown-linux-gnueabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user" + +[[target]] +target = "armv7-unknown-linux-gnueabihf" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "thumbv7neon-unknown-linux-gnueabihf" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "i586-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "i686-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "native qemu-user qemu-system" + +[[target]] +target = "loongarch64-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user" + +[[target]] +target = "mips-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user" +build-std = true + +[[target]] +target = "mipsel-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" +build-std = true + +[[target]] +target = "mips64-unknown-linux-gnuabi64" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +build-std = true + +[[target]] +target = "mips64el-unknown-linux-gnuabi64" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" +build-std = true + +[[target]] +disabled = true # https://github.com/cross-rs/cross/issues/1422 +target = "mips64-unknown-linux-muslabi64" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +build-std = true + +[[target]] +disabled = true # https://github.com/cross-rs/cross/issues/1422 +target = "mips64el-unknown-linux-muslabi64" +os = "ubuntu-latest" +# FIXME: Lacking partial C++ support due to missing compiler builtins. +cpp = true +std = true +run = true +build-std = true + +[[target]] +target = "powerpc-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "powerpc64-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "powerpc64le-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "riscv64gc-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "s390x-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "sparc64-unknown-linux-gnu" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" + +[[target]] +target = "aarch64-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "arm-unknown-linux-musleabihf" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "arm-unknown-linux-musleabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv5te-unknown-linux-gnueabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv5te-unknown-linux-musleabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv7-unknown-linux-musleabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv7-unknown-linux-musleabihf" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "i586-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +# FIXME: These could run without qemu in our tests (`native`), but it fails today +runners = "qemu-user" + +[[target]] +target = "i686-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +# FIXME: These could run without qemu in our tests (`native`), but it fails today +runners = "qemu-user" + +[[target]] +disabled = true # https://github.com/cross-rs/cross/issues/1422 +target = "mips-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +build-std = true + +[[target]] +disabled = true # https://github.com/cross-rs/cross/issues/1422 +target = "mipsel-unknown-linux-musl" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +build-std = true + +[[target]] +target = "aarch64-linux-android" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "arm-linux-androideabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "armv7-linux-androideabi" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "thumbv7neon-linux-androideabi" +os = "ubuntu-latest" +cpp = true +std = true +run = true + +[[target]] +target = "i686-linux-android" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "x86_64-linux-android" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true + +[[target]] +target = "x86_64-pc-windows-gnu" +os = "ubuntu-latest" +cpp = true +std = true +run = true + +[[target]] +target = "i686-pc-windows-gnu" +os = "ubuntu-latest" +cpp = true +std = true +run = true + +[[target]] +# Disabled for now, see https://github.com/rust-lang/rust/issues/98216 & https://github.com/cross-rs/cross/issues/634 +disabled = true +target = "asmjs-unknown-emscripten" +os = "ubuntu-latest" +cpp = true +std = true +run = true + +[[target]] +target = "wasm32-unknown-emscripten" +os = "ubuntu-latest" +cpp = true +std = true +run = true + +[[target]] +target = "x86_64-unknown-dragonfly" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +build-std = true + +[[target]] +target = "i686-unknown-freebsd" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "x86_64-unknown-freebsd" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "aarch64-unknown-freebsd" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +build-std = true + +[[target]] +target = "x86_64-unknown-netbsd" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "sparcv9-sun-solaris" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "x86_64-pc-solaris" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "x86_64-unknown-illumos" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true + +[[target]] +target = "thumbv6m-none-eabi" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv7em-none-eabi" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv7em-none-eabihf" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv7m-none-eabi" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv8m.base-none-eabi" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv8m.main-none-eabi" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "thumbv8m.main-none-eabihf" +os = "ubuntu-latest" +cpp = true +std = false + +[[target]] +target = "cross" +os = "ubuntu-latest" +special = true + +[[target]] +target = "zig" +os = "ubuntu-latest" +special = true + +[[target]] +target = "aarch64-unknown-linux-gnu" +sub = "centos" +os = "ubuntu-latest" +cpp = true +dylib = true +std = true +run = true +runners = "qemu-user qemu-system" diff --git a/xtask/Cargo.toml b/xtask/Cargo.toml index 8cafcedf8..e7bd6b234 100644 --- a/xtask/Cargo.toml +++ b/xtask/Cargo.toml @@ -10,13 +10,16 @@ publish = false [dependencies] cross = { path = "..", features = ["dev"] } -walkdir = "2.3.2" -color-eyre = "0.6.1" +walkdir = "2.3.3" +color-eyre = "0.6.2" eyre = "0.6.8" -clap = { version = "3.2.2", features = ["derive", "env", "unstable-v4"] } +clap = { version = "4.1", features = ["derive", "env"] } which = { version = "4.2", default_features = false } serde = { version = "1", features = ["derive"] } -serde_yaml = "0.8" serde_json = "1.0" -once_cell = "1.12" +shell-words = "1.1.0" +toml = "0.7" +once_cell = "1.17" semver = "1" +chrono = "0.4" +wildmatch = "2.1.1" diff --git a/xtask/src/build_docker_image.rs b/xtask/src/build_docker_image.rs index fc00310ba..c747aba50 100644 --- a/xtask/src/build_docker_image.rs +++ b/xtask/src/build_docker_image.rs @@ -1,10 +1,14 @@ use std::fmt::Write; use std::path::Path; -use crate::util::{cargo_metadata, gha_error, gha_output, gha_print}; +use crate::util::{ + cargo_metadata, get_matrix, gha_error, gha_output, gha_print, DEFAULT_PLATFORMS, +}; +use crate::ImageTarget; use clap::Args; +use cross::docker::{self, BuildCommandExt, BuildResultExt, ImagePlatform, Progress}; use cross::shell::MessageInfo; -use cross::{docker, CommandExt, ToUtf8}; +use cross::{CommandExt, ToUtf8}; #[derive(Args, Debug)] pub struct BuildDockerImage { @@ -12,6 +16,9 @@ pub struct BuildDockerImage { pub ref_type: Option, #[clap(long, hide = true, env = "GITHUB_REF_NAME")] ref_name: Option, + /// Pass extra flags to the build + #[clap(long, env = "CROSS_BUILD_OPTS")] + build_opts: Option, #[clap(action, long = "latest", hide = true, env = "LATEST")] is_latest: bool, /// Specify a tag to use instead of the derived one, eg `local` @@ -22,17 +29,8 @@ pub struct BuildDockerImage { pub repository: String, /// Newline separated labels #[clap(long, env = "LABELS")] - pub labels: Option, - /// Provide verbose diagnostic output. - #[clap(short, long, action = clap::ArgAction::Count)] - pub verbose: u8, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Print but do not execute the build commands. + pub labels: Option, #[clap(long)] pub dry_run: bool, /// Force a push when `--push` is set, but not `--tag` @@ -48,12 +46,22 @@ pub struct BuildDockerImage { #[clap( long, value_parser = clap::builder::PossibleValuesParser::new(["auto", "plain", "tty"]), - default_value = "auto" )] - pub progress: String, + pub progress: Option, /// Do not load from cache when building the image. #[clap(long)] pub no_cache: bool, + /// Option `--cache-to` for docker, only would work if push is not set to true + /// + /// Additionally you can use {base_name} to replace with base name of the image + /// If not specified, would not be passed to docker unless `--push` is used + #[clap(long)] + pub cache_to: Option, + /// Option `--cache-from` for docker, would only work if engine supports cache from type and no_cache is not set to true + /// + /// Additionally you can use {base_name} to replace with base name of the image + #[clap(long, default_value = "type=registry,ref={base_name}:main")] + pub cache_from: String, /// Continue building images even if an image fails to build. #[clap(long)] pub no_fastfail: bool, @@ -66,16 +74,19 @@ pub struct BuildDockerImage { /// Additional build arguments to pass to Docker. #[clap(long)] pub build_arg: Vec, + // [os/arch[/variant]=]toolchain + #[clap(long, short = 'a', action = clap::builder::ArgAction::Append)] + pub platform: Vec, /// Targets to build for #[clap()] - pub targets: Vec, + pub targets: Vec, } fn locate_dockerfile( - target: crate::ImageTarget, + target: ImageTarget, docker_root: &Path, cross_toolchain_root: &Path, -) -> cross::Result<(crate::ImageTarget, String)> { +) -> cross::Result<(ImageTarget, String)> { let dockerfile_name = format!("Dockerfile.{target}"); let dockerfile_root = if cross_toolchain_root.join(&dockerfile_name).exists() { &cross_toolchain_root @@ -92,11 +103,11 @@ pub fn build_docker_image( BuildDockerImage { ref_type, ref_name, + build_opts, is_latest, tag: tag_override, repository, labels, - verbose, dry_run, force, push, @@ -106,16 +117,15 @@ pub fn build_docker_image( no_fastfail, from_ci, build_arg, + platform, + cache_from, + cache_to, mut targets, .. }: BuildDockerImage, engine: &docker::Engine, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - let verbose = match verbose { - 0 => msg_info.is_verbose() as u8, - v => v, - }; let metadata = cargo_metadata(msg_info)?; let version = metadata .get_package("cross") @@ -124,9 +134,10 @@ pub fn build_docker_image( .clone(); if targets.is_empty() { if from_ci { - targets = crate::util::get_matrix() + targets = get_matrix() .iter() .filter(|m| m.os.starts_with("ubuntu")) + .filter(|m| !m.disabled) .map(|m| m.to_image_target()) .collect(); } else { @@ -146,6 +157,10 @@ pub fn build_docker_image( } } let gha = std::env::var("GITHUB_ACTIONS").is_ok(); + let mut progress = progress.map(|x| x.parse().unwrap()); + if gha { + progress = Some(Progress::Plain); + } let root = metadata.workspace_root; let docker_root = root.join("docker"); let cross_toolchains_root = docker_root.join("cross-toolchains").join("docker"); @@ -154,20 +169,57 @@ pub fn build_docker_image( .map(|t| locate_dockerfile(t, &docker_root, &cross_toolchains_root)) .collect::>>()?; + let platforms = if platform.is_empty() { + DEFAULT_PLATFORMS.to_vec() + } else { + platform + }; + let mut results = vec![]; - for (target, dockerfile) in &targets { + for (platform, (target, dockerfile)) in targets + .iter() + .flat_map(|t| platforms.iter().map(move |p| (p, t))) + { if gha && targets.len() > 1 { gha_print("::group::Build {target}"); + } else { + msg_info.note(format_args!("Build {target} for {}", platform.target))?; } - let mut docker_build = docker::command(engine); - docker_build.args(&["buildx", "build"]); + let mut docker_build = engine.command(); + docker_build.invoke_build_command(); + let has_buildkit = docker::Engine::has_buildkit(); docker_build.current_dir(&docker_root); + let docker_platform = platform.docker_platform(); + let mut dockerfile = dockerfile.clone(); + docker_build.args(["--platform", &docker_platform]); + let uppercase_triple = target.name.to_ascii_uppercase().replace('-', "_"); + docker_build.args([ + "--build-arg", + &format!("CROSS_TARGET_TRIPLE={}", uppercase_triple), + ]); + // add our platform, and determine if we need to use a native docker image + if has_native_image(docker_platform.as_str(), target, msg_info)? { + let dockerfile_name = match target.sub.as_deref() { + Some(sub) => format!("Dockerfile.native.{sub}"), + None => "Dockerfile.native".to_owned(), + }; + let dockerfile_path = docker_root.join(&dockerfile_name); + if !dockerfile_path.exists() { + eyre::bail!( + "unable to find native dockerfile named {dockerfile_name} for target {target}." + ); + } + dockerfile = dockerfile_path.to_utf8()?.to_string(); + } + if push { docker_build.arg("--push"); + } else if engine.kind.supports_output_flag() && no_output { + docker_build.args(["--output", "type=tar,dest=/dev/null"]); } else if no_output { - docker_build.args(&["--output", "type=tar,dest=/dev/null"]); - } else { + msg_info.fatal("cannot specify `--no-output` with engine that does not support the `--output` flag", 1); + } else if has_buildkit { docker_build.arg("--load"); } @@ -194,25 +246,36 @@ pub fn build_docker_image( tags = vec![target.image_name(&repository, tag)]; } - docker_build.arg("--pull"); + if engine.kind.supports_pull_flag() { + docker_build.arg("--pull"); + } + let base_name = format!("{repository}/{}", target.name); if no_cache { docker_build.arg("--no-cache"); - } else { - docker_build.args(&[ + } else if engine.kind.supports_cache_from_type() { + docker_build.args([ "--cache-from", - &format!( - "type=registry,ref={}", - target.image_name(&repository, "main") - ), + &cache_from.replace("{base_name}", &base_name), ]); + } else { + // we can't use `image_name` since podman doesn't support tags + // with `--cache-from`. podman only supports an image format + // of registry/repo although it does when pulling images. this + // affects building from cache with target+subs images since we + // can't use caches from registry. this is only an issue if + // building with podman without a local cache, which never + // happens in practice. + docker_build.args(["--cache-from", &base_name]); } if push { - docker_build.args(&["--cache-to", "type=inline"]); + docker_build.args(["--cache-to", "type=inline"]); + } else if let Some(ref cache_to) = cache_to { + docker_build.args(["--cache-to", &cache_to.replace("{base_name}", &base_name)]); } for tag in &tags { - docker_build.args(&["--tag", tag]); + docker_build.args(["--tag", tag]); } for label in labels @@ -221,36 +284,32 @@ pub fn build_docker_image( .split('\n') .filter(|s| !s.is_empty()) { - docker_build.args(&["--label", label]); + docker_build.args(["--label", label]); } - docker_build.args([ - "--label", - &format!("{}.for-cross-target={target}", cross::CROSS_LABEL_DOMAIN), - ]); - - docker_build.args(&["-f", dockerfile]); + docker_build.cross_labels(&target.name, platform.target.triple()); + docker_build.args(["--file", &dockerfile]); - if gha || progress == "plain" { - docker_build.args(&["--progress", "plain"]); - } else { - docker_build.args(&["--progress", &progress]); - } + docker_build.progress(progress)?; + docker_build.verbose(msg_info.verbosity); for arg in &build_arg { - docker_build.args(&["--build-arg", arg]); - } - if verbose > 1 { - docker_build.args(&["--build-arg", "VERBOSE=1"]); + docker_build.args(["--build-arg", arg]); } - if target.needs_workspace_root_context() { - docker_build.arg(&root); - } else { - docker_build.arg("."); + if let Some(opts) = &build_opts { + docker_build.args(docker::Engine::parse_opts(opts)?); } + docker_build.arg(match target.needs_workspace_root_context() { + true => root.as_path(), + false => Path::new("."), + }); + if !dry_run && (force || !push || gha) { - let result = docker_build.run(msg_info, false); + let result = docker_build + .run(msg_info, false) + .engine_warning(engine) + .buildkit_warning(); if gha && targets.len() > 1 { if let Err(e) = &result { // TODO: Determine what instruction errorred, and place warning on that line with appropriate warning @@ -272,8 +331,8 @@ pub fn build_docker_image( } } if gha { - gha_output("image", &tags[0]); - gha_output("images", &format!("'{}'", serde_json::to_string(&tags)?)); + gha_output("image", &tags[0])?; + gha_output("images", &format!("'{}'", serde_json::to_string(&tags)?))?; if targets.len() > 1 { gha_print("::endgroup::"); } @@ -283,16 +342,51 @@ pub fn build_docker_image( std::env::set_var("GITHUB_STEP_SUMMARY", job_summary(&results)?); } if results.iter().any(|r| r.is_err()) { - results - .into_iter() - .filter_map(Result::err) - .fold(Err(eyre::eyre!("encountered error(s)")), |_, e| Err(e.1))?; + #[allow(unknown_lints, clippy::manual_try_fold)] + return Err(crate::util::with_section_reports( + eyre::eyre!("some error(s) encountered"), + results.into_iter().filter_map(Result::err).map(|e| e.1), + )); } Ok(()) } +fn has_native_image( + platform: &str, + target: &ImageTarget, + msg_info: &mut MessageInfo, +) -> cross::Result { + let note_host_target_detection = |msg_info: &mut MessageInfo| -> cross::Result<()> { + msg_info.note("using the rust target triple to determine the host triple to determine if the docker platform is native. this may fail if cross-compiling xtask.") + }; + + Ok(match target.sub.as_deref() { + // FIXME: add additional subs for new Linux distros, such as alpine. + None | Some("centos") => match (platform, target.name.as_str()) { + ("linux/386", "i686-unknown-linux-gnu") + | ("linux/amd64", "x86_64-unknown-linux-gnu") + | ("linux/arm64" | "linux/arm64/v8", "aarch64-unknown-linux-gnu") + | ("linux/ppc64le", "powerpc64le-unknown-linux-gnu") + | ("linux/riscv64", "riscv64gc-unknown-linux-gnu") + | ("linux/s390x", "s390x-unknown-linux-gnu") => true, + ("linux/arm/v6", "arm-unknown-linux-gnueabi") if target.is_armv6() => { + note_host_target_detection(msg_info)?; + true + } + ("linux/arm" | "linux/arm/v7", "armv7-unknown-linux-gnueabihf") + if target.is_armv7() => + { + note_host_target_detection(msg_info)?; + true + } + _ => false, + }, + Some(_) => false, + }) +} + pub fn determine_image_name( - target: &crate::ImageTarget, + target: &ImageTarget, repository: &str, ref_type: &str, ref_name: &str, @@ -301,7 +395,7 @@ pub fn determine_image_name( ) -> cross::Result> { let mut tags = vec![]; match (ref_type, ref_name) { - (ref_type, ref_name) if ref_type == "tag" && ref_name.starts_with('v') => { + ("tag", ref_name) if ref_name.starts_with('v') => { let tag_version = ref_name .strip_prefix('v') .expect("tag name should start with v"); @@ -314,8 +408,15 @@ pub fn determine_image_name( tags.push(target.image_name(repository, "latest")) } } - (ref_type, ref_name) if ref_type == "branch" => { - tags.push(target.image_name(repository, ref_name)); + ("branch", ref_name) => { + if let Some(gh_queue) = ref_name.strip_prefix("gh-readonly-queue/") { + let (_, source) = gh_queue + .split_once('/') + .ok_or_else(|| eyre::eyre!("invalid gh-readonly-queue branch name"))?; + tags.push(target.image_name(repository, source)); + } else { + tags.push(target.image_name(repository, ref_name)); + } if ["staging", "trying"] .iter() @@ -330,7 +431,7 @@ pub fn determine_image_name( } pub fn job_summary( - results: &[Result], + results: &[Result], ) -> cross::Result { let mut summary = "# SUMMARY\n\n".to_string(); let success: Vec<_> = results.iter().filter_map(|r| r.as_ref().ok()).collect(); diff --git a/xtask/src/changelog.rs b/xtask/src/changelog.rs new file mode 100644 index 000000000..8a83a6acf --- /dev/null +++ b/xtask/src/changelog.rs @@ -0,0 +1,945 @@ +use std::cmp; +use std::collections::BTreeSet; +use std::fmt; +use std::fs; +use std::path::Path; + +use crate::util::{project_dir, write_to_string}; +use cross::shell::MessageInfo; +use cross::ToUtf8; + +use chrono::{Datelike, Utc}; +use clap::{Args, Subcommand}; +use eyre::Context; +use serde::Deserialize; + +pub fn changelog(args: Changelog, msg_info: &mut MessageInfo) -> cross::Result<()> { + match args { + Changelog::Build(args) => build_changelog(args, msg_info), + Changelog::Validate(args) => validate_changelog(args, msg_info), + } +} + +#[derive(Subcommand, Debug)] +pub enum Changelog { + /// Build the changelog. + Build(BuildChangelog), + /// Validate changelog entries. + Validate(ValidateChangelog), +} + +#[derive(Args, Debug)] +pub struct BuildChangelog { + /// Build a release changelog. + #[clap(long, env = "NEW_VERSION", required = true)] + release: Option, + /// Whether we're doing a dry run or not. + #[clap(long, env = "DRY_RUN")] + dry_run: bool, +} + +#[derive(Args, Debug)] +pub struct ValidateChangelog { + /// List of changelog entries to validate. + files: Vec, +} + +// the type for the identifier: if it's a PR, sort +// by the number, otherwise, sort as 0. the numbers +// should be sorted, and the `max(values) || 0` should +// be used +#[derive(Debug, Clone, PartialEq, Eq)] +enum IdType { + PullRequest(Vec), + Issue(Vec), +} + +impl IdType { + fn numbers(&self) -> &[u64] { + match self { + IdType::PullRequest(v) => v, + IdType::Issue(v) => v, + } + } + + fn max_number(&self) -> u64 { + self.numbers().iter().max().map_or_else(|| 0, |v| *v) + } + + fn parse_stem(file_stem: &str) -> cross::Result { + let (is_issue, rest) = match file_stem.strip_prefix("issue") { + Some(n) => (true, n), + None => (false, file_stem), + }; + let mut numbers = rest + .split('-') + .map(|x| x.parse::()) + .collect::, _>>()?; + numbers.sort_unstable(); + + Ok(match is_issue { + false => IdType::PullRequest(numbers), + true => IdType::Issue(numbers), + }) + } + + fn parse_changelog(prs: &str) -> cross::Result { + let mut numbers = prs + .split(',') + .map(|x| x.trim().parse::()) + .collect::, _>>()?; + numbers.sort_unstable(); + + Ok(IdType::PullRequest(numbers)) + } +} + +impl cmp::PartialOrd for IdType { + fn partial_cmp(&self, other: &IdType) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for IdType { + fn cmp(&self, other: &IdType) -> cmp::Ordering { + self.max_number().cmp(&other.max_number()) + } +} + +#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Eq)] +#[serde(rename_all = "lowercase")] +enum ChangelogType { + Added, + Changed, + Fixed, + Removed, + Internal, +} + +impl ChangelogType { + fn from_header(s: &str) -> cross::Result { + Ok(match s { + "Added" => Self::Added, + "Changed" => Self::Changed, + "Fixed" => Self::Fixed, + "Removed" => Self::Removed, + "Internal" => Self::Internal, + _ => eyre::bail!("invalid header section, got {s}"), + }) + } + + fn sort_by(&self) -> u32 { + match self { + ChangelogType::Added => 4, + ChangelogType::Changed => 3, + ChangelogType::Fixed => 2, + ChangelogType::Removed => 1, + ChangelogType::Internal => 0, + } + } +} + +impl cmp::PartialOrd for ChangelogType { + fn partial_cmp(&self, other: &ChangelogType) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for ChangelogType { + fn cmp(&self, other: &ChangelogType) -> cmp::Ordering { + self.sort_by().cmp(&other.sort_by()) + } +} + +// internal type for a changelog, just containing the contents +#[derive(Debug, Clone, Deserialize, PartialEq, Eq)] +struct ChangelogContents { + description: String, + #[serde(default)] + issues: Vec, + #[serde(default)] + breaking: bool, + #[serde(rename = "type")] + kind: ChangelogType, +} + +impl ChangelogContents { + fn sort_by(&self) -> (&ChangelogType, &str, &bool) { + (&self.kind, &self.description, &self.breaking) + } +} + +impl cmp::PartialOrd for ChangelogContents { + fn partial_cmp(&self, other: &ChangelogContents) -> Option { + Some(self.cmp(other)) + } +} + +impl cmp::Ord for ChangelogContents { + fn cmp(&self, other: &ChangelogContents) -> cmp::Ordering { + self.sort_by().cmp(&other.sort_by()) + } +} + +impl fmt::Display for ChangelogContents { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.breaking { + f.write_str("BREAKING: ")?; + } + f.write_str(&self.description) + } +} + +#[derive(Debug, Clone, PartialOrd, Ord, PartialEq, Eq)] +struct ChangelogEntry { + id: IdType, + contents: ChangelogContents, +} + +impl ChangelogEntry { + fn new(id: IdType, contents: ChangelogContents) -> Self { + Self { id, contents } + } + + fn parse(s: &str, kind: ChangelogType) -> cross::Result { + let (id, rest) = match s.split_once('-') { + Some((prefix, rest)) => match prefix.trim().strip_prefix('#') { + Some(prs) => (IdType::parse_changelog(prs)?, rest), + None => (IdType::Issue(vec![]), s), + }, + None => (IdType::Issue(vec![]), s), + }; + + let trimmed = rest.trim(); + let (breaking, description) = match trimmed.strip_prefix("BREAKING: ") { + Some(d) => (true, d.trim().to_owned()), + None => (false, trimmed.to_owned()), + }; + + Ok(ChangelogEntry { + id, + contents: ChangelogContents { + kind, + breaking, + description, + issues: vec![], + }, + }) + } + + fn from_object(id: IdType, value: serde_json::Value) -> cross::Result { + Ok(Self::new(id, serde_json::value::from_value(value)?)) + } + + fn from_value(id: IdType, mut value: serde_json::Value) -> cross::Result> { + let mut result = vec![]; + if value.is_array() { + for item in value.as_array_mut().expect("must be array") { + result.push(Self::from_object(id.clone(), item.take())?); + } + } else { + result.push(Self::from_object(id, value)?); + } + + Ok(result) + } +} + +impl fmt::Display for ChangelogEntry { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("-")?; + match &self.id { + IdType::PullRequest(prs) => f.write_fmt(format_args!( + " #{} -", + prs.iter() + .map(|x| x.to_string()) + .collect::>() + .join(",#") + ))?, + IdType::Issue(_) => (), + } + f.write_fmt(format_args!(" {}", self.contents))?; + f.write_str("\n") + } +} + +// de-duplicate in place +fn deduplicate_entries(original: &mut Vec) { + let mut result = Vec::with_capacity(original.len()); + let mut memo = BTreeSet::new(); + for item in original.iter() { + if memo.insert(item.to_string()) { + result.push(item.clone()); + } + } + + *original = result; +} + +#[derive(Debug, Clone, Default, PartialEq, Eq)] +struct Changes { + added: Vec, + changed: Vec, + fixed: Vec, + removed: Vec, + internal: Vec, +} + +impl Changes { + fn sort_descending(&mut self) { + self.added.sort_by(|x, y| y.cmp(x)); + self.changed.sort_by(|x, y| y.cmp(x)); + self.fixed.sort_by(|x, y| y.cmp(x)); + self.removed.sort_by(|x, y| y.cmp(x)); + self.internal.sort_by(|x, y| y.cmp(x)); + } + + fn deduplicate(&mut self) { + deduplicate_entries(&mut self.added); + deduplicate_entries(&mut self.changed); + deduplicate_entries(&mut self.fixed); + deduplicate_entries(&mut self.removed); + deduplicate_entries(&mut self.internal); + } + + fn merge(&mut self, other: &mut Self) { + self.added.append(&mut other.added); + self.changed.append(&mut other.changed); + self.fixed.append(&mut other.fixed); + self.removed.append(&mut other.removed); + self.internal.append(&mut other.internal); + } + + fn push(&mut self, entry: ChangelogEntry) { + match entry.contents.kind { + ChangelogType::Added => self.added.push(entry), + ChangelogType::Changed => self.changed.push(entry), + ChangelogType::Fixed => self.fixed.push(entry), + ChangelogType::Removed => self.removed.push(entry), + ChangelogType::Internal => self.internal.push(entry), + } + } +} + +macro_rules! fmt_changelog_vec { + ($self:ident, $fmt:ident, $field:ident, $header:literal) => {{ + if !$self.$field.is_empty() { + $fmt.write_str(concat!("\n### ", $header, "\n\n"))?; + for entry in &$self.$field { + $fmt.write_fmt(format_args!("{}", entry))?; + } + } + }}; +} + +impl fmt::Display for Changes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt_changelog_vec!(self, f, added, "Added"); + fmt_changelog_vec!(self, f, changed, "Changed"); + fmt_changelog_vec!(self, f, fixed, "Fixed"); + fmt_changelog_vec!(self, f, removed, "Removed"); + fmt_changelog_vec!(self, f, internal, "Internal"); + + Ok(()) + } +} + +fn file_stem(path: &Path) -> cross::Result<&str> { + path.file_stem() + .ok_or(eyre::eyre!("unable to get file stem {path:?}"))? + .to_utf8() +} + +fn read_changes(changes_dir: &Path) -> cross::Result { + let mut changes = Changes::default(); + for entry in fs::read_dir(changes_dir)? { + let entry = entry?; + let file_type = entry.file_type()?; + let file_name = entry.file_name(); + let path = entry.path(); + let ext = path.extension(); + if file_type.is_file() && ext.map_or(false, |v| v == "json") { + let stem = file_stem(&path)?; + let id = IdType::parse_stem(stem)?; + let contents = fs::read_to_string(path)?; + let value = serde_json::from_str(&contents) + .wrap_err_with(|| format!("unable to parse JSON for {file_name:?}"))?; + let new_entries = ChangelogEntry::from_value(id, value) + .wrap_err_with(|| format!("unable to extract changelog from {file_name:?}"))?; + for change in new_entries { + match change.contents.kind { + ChangelogType::Added => changes.added.push(change), + ChangelogType::Changed => changes.changed.push(change), + ChangelogType::Fixed => changes.fixed.push(change), + ChangelogType::Removed => changes.removed.push(change), + ChangelogType::Internal => changes.internal.push(change), + } + } + } + } + + Ok(changes) +} + +fn read_changelog(root: &Path) -> cross::Result<(String, Changes, String)> { + let lines: Vec = fs::read_to_string(root.join("CHANGELOG.md"))? + .lines() + .map(ToOwned::to_owned) + .collect(); + + let next_index = lines + .iter() + .position(|x| x.trim().starts_with("## [Unreleased]")) + .ok_or(eyre::eyre!("could not find unreleased section"))?; + let (header, rest) = lines.split_at(next_index); + + // need to skip the first index since it's previously + // matched, and then just increment our split by 1. + let last_index = 1 + rest[1..] + .iter() + .position(|x| x.trim().starts_with("## ")) + .ok_or(eyre::eyre!("could not find the next release section"))?; + let (section, footer) = rest.split_at(last_index); + + // the unreleased should have the format: + // ## [Unreleased] - ReleaseDate + // + // ### Added + // + // - #905 - ... + let mut kind = None; + let mut changes = Changes::default(); + for line in section { + let line = line.trim(); + if let Some(header) = line.strip_prefix("### ") { + kind = Some(ChangelogType::from_header(header)?); + } else if let Some(entry) = line.strip_prefix("- ") { + match kind { + Some(kind) => changes.push(ChangelogEntry::parse(entry, kind)?), + None => eyre::bail!("changelog entry \"{line}\" without header"), + } + } else if !(line.is_empty() || line == "## [Unreleased] - ReleaseDate") { + eyre::bail!("invalid changelog entry, got \"{line}\""); + } + } + + Ok((header.join("\n"), changes, footer.join("\n"))) +} + +fn delete_changes(root: &Path) -> cross::Result<()> { + // move all files to the denoted version release + for entry in fs::read_dir(root.join(".changes"))? { + let entry = entry?; + let file_type = entry.file_type()?; + let srcpath = entry.path(); + let ext = srcpath.extension(); + if file_type.is_file() && ext.map_or(false, |v| v == "json") { + fs::remove_file(srcpath)?; + } + } + + Ok(()) +} + +/// Get the date as a year/month/day tuple. +pub fn get_current_date() -> String { + let utc = Utc::now(); + let date = utc.date_naive(); + + format!("{}-{:0>2}-{}", date.year(), date.month(), date.day()) +} + +// used for internal testing +fn build_changelog_from_dir( + root: &Path, + changes_dir: &Path, + release: Option<&str>, +) -> cross::Result { + use std::fmt::Write; + + let mut new = read_changes(changes_dir)?; + let (header, mut existing, footer) = read_changelog(root)?; + new.merge(&mut existing); + new.deduplicate(); + new.sort_descending(); + + let mut output = header; + output.push_str("\n## [Unreleased] - ReleaseDate\n"); + if let Some(release) = release { + let version = semver::Version::parse(release)?; + if version.pre.is_empty() { + let date = get_current_date(); + writeln!(&mut output, "\n## [v{release}] - {date}")?; + } + } + output.push_str(&new.to_string()); + output.push('\n'); + output.push_str(&footer); + + Ok(output) +} + +pub fn build_changelog( + BuildChangelog { + dry_run, release, .. + }: BuildChangelog, + msg_info: &mut MessageInfo, +) -> cross::Result<()> { + msg_info.info("Building the changelog.")?; + msg_info.debug(format_args!( + "Running with dry-run set the {dry_run} and with release {release:?}" + ))?; + + let root = project_dir(msg_info)?; + let changes_dir = root.join(".changes"); + let output = build_changelog_from_dir(&root, &changes_dir, release.as_deref())?; + + let filename = match !dry_run && release.is_some() { + true => { + delete_changes(&root)?; + "CHANGELOG.md" + } + false => "CHANGELOG.md.draft", + }; + let path = root.join(filename); + write_to_string(&path, &output)?; + #[allow(clippy::disallowed_methods)] + msg_info.info(format_args!("Changelog written to `{}`", path.display()))?; + + Ok(()) +} + +#[allow(clippy::disallowed_methods)] +pub fn validate_changelog( + ValidateChangelog { mut files, .. }: ValidateChangelog, + msg_info: &mut MessageInfo, +) -> cross::Result<()> { + let root = project_dir(msg_info)?; + let changes_dir = root.join(".changes"); + if files.is_empty() { + files = fs::read_dir(&changes_dir)? + .filter_map(|x| x.ok()) + .filter(|x| x.file_type().map_or(false, |v| v.is_file())) + .filter_map(|x| { + if x.path() + .extension() + .and_then(|s: &std::ffi::OsStr| s.to_str()) + .unwrap_or_default() + == "json" + { + Some(x.file_name().to_utf8().unwrap().to_owned()) + } else { + None + } + }) + .collect(); + } + let mut errors = vec![]; + for file in files { + let file_name = Path::new(&file); + let path = changes_dir.join(file_name); + let stem = file_stem(&path)?; + let contents = fs::read_to_string(&path) + .wrap_err_with(|| eyre::eyre!("cannot find file {}", path.display()))?; + + let id = match IdType::parse_stem(stem) + .wrap_err_with(|| format!("unable to parse file stem for \"{}\"", path.display())) + { + Ok(id) => id, + Err(e) => { + errors.push(e); + continue; + } + }; + + let value = match serde_json::from_str(&contents) + .wrap_err_with(|| format!("unable to parse JSON for \"{}\"", path.display())) + { + Ok(value) => value, + Err(e) => { + errors.push(e); + continue; + } + }; + + let res = ChangelogEntry::from_value(id, value) + .wrap_err_with(|| format!("unable to extract changelog from \"{}\"", path.display())) + .map(|_| ()); + errors.extend(res.err()); + } + + if !errors.is_empty() { + return Err(crate::util::with_section_reports( + eyre::eyre!("some files were not validated"), + errors, + )); + } + // also need to validate the existing changelog + let _ = read_changelog(&root)?; + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + macro_rules! s { + ($x:literal) => { + $x.to_owned() + }; + } + + #[test] + fn test_id_type_parse_stem() -> cross::Result<()> { + assert_eq!(IdType::parse_stem("645")?, IdType::PullRequest(vec![645])); + assert_eq!( + IdType::parse_stem("640-645")?, + IdType::PullRequest(vec![640, 645]) + ); + assert_eq!( + IdType::parse_stem("issue640-645")?, + IdType::Issue(vec![640, 645]) + ); + + Ok(()) + } + + #[test] + fn test_id_type_parse_changelog() -> cross::Result<()> { + assert_eq!( + IdType::parse_changelog("645")?, + IdType::PullRequest(vec![645]) + ); + assert_eq!( + IdType::parse_changelog("640,645")?, + IdType::PullRequest(vec![640, 645]) + ); + + Ok(()) + } + + #[test] + fn changelog_type_sort() { + assert!(ChangelogType::Added > ChangelogType::Changed); + assert!(ChangelogType::Changed > ChangelogType::Fixed); + } + + #[test] + fn change_log_type_from_header() -> cross::Result<()> { + assert_eq!(ChangelogType::from_header("Added")?, ChangelogType::Added); + + Ok(()) + } + + #[test] + fn changelog_contents_deserialize() -> cross::Result<()> { + let actual: ChangelogContents = serde_json::from_str(CHANGES_OBJECT)?; + let expected = ChangelogContents { + description: s!("sample description for a PR adding one CHANGELOG entry."), + issues: vec![437], + breaking: false, + kind: ChangelogType::Fixed, + }; + assert_eq!(actual, expected); + + let actual: Vec = serde_json::from_str(CHANGES_ARRAY)?; + let expected = vec![ + ChangelogContents { + description: s!("this is one added entry."), + issues: vec![630], + breaking: false, + kind: ChangelogType::Added, + }, + ChangelogContents { + description: s!("this is another added entry."), + issues: vec![642], + breaking: false, + kind: ChangelogType::Added, + }, + ChangelogContents { + description: s!("this is a fixed entry that has no attached issue."), + issues: vec![], + breaking: false, + kind: ChangelogType::Fixed, + }, + ChangelogContents { + description: s!("this is a breaking change."), + issues: vec![679], + breaking: true, + kind: ChangelogType::Changed, + }, + ]; + assert_eq!(actual, expected); + + Ok(()) + } + + #[test] + fn changelog_entry_display() { + let mut entry = ChangelogEntry::new( + IdType::PullRequest(vec![637]), + ChangelogContents { + description: s!("this is one added entry."), + issues: vec![630], + breaking: false, + kind: ChangelogType::Added, + }, + ); + assert_eq!(entry.to_string(), s!("- #637 - this is one added entry.\n")); + + entry.contents.breaking = true; + assert_eq!( + entry.to_string(), + s!("- #637 - BREAKING: this is one added entry.\n") + ); + + entry.id = IdType::Issue(vec![640]); + assert_eq!( + entry.to_string(), + s!("- BREAKING: this is one added entry.\n") + ); + + entry.contents.breaking = false; + assert_eq!(entry.to_string(), s!("- this is one added entry.\n")); + } + + #[test] + fn read_template_changes() -> cross::Result<()> { + let mut msg_info = MessageInfo::default(); + let root = project_dir(&mut msg_info)?; + + let mut actual = read_changes(&root.join(".changes").join("template"))?; + actual.sort_descending(); + let expected = Changes { + added: vec![ + ChangelogEntry::new( + IdType::PullRequest(vec![979, 981]), + ChangelogContents { + description: s!("this has 2 PRs associated."), + issues: vec![441], + breaking: false, + kind: ChangelogType::Added, + }, + ), + ChangelogEntry::new( + IdType::PullRequest(vec![940]), + ChangelogContents { + description: s!("this is one added entry."), + issues: vec![630], + breaking: false, + kind: ChangelogType::Added, + }, + ), + ChangelogEntry::new( + IdType::PullRequest(vec![940]), + ChangelogContents { + description: s!("this is another added entry."), + issues: vec![642], + breaking: false, + kind: ChangelogType::Added, + }, + ), + ], + changed: vec![ChangelogEntry::new( + IdType::PullRequest(vec![940]), + ChangelogContents { + description: s!("this is a breaking change."), + issues: vec![679], + breaking: true, + kind: ChangelogType::Changed, + }, + )], + fixed: vec![ + ChangelogEntry::new( + IdType::PullRequest(vec![978]), + ChangelogContents { + description: s!("sample description for a PR adding one CHANGELOG entry."), + issues: vec![437], + breaking: false, + kind: ChangelogType::Fixed, + }, + ), + ChangelogEntry::new( + IdType::PullRequest(vec![940]), + ChangelogContents { + description: s!("this is a fixed entry that has no attached issue."), + issues: vec![], + breaking: false, + kind: ChangelogType::Fixed, + }, + ), + ChangelogEntry::new( + IdType::Issue(vec![440]), + ChangelogContents { + description: s!("no associated PR."), + issues: vec![440], + breaking: false, + kind: ChangelogType::Fixed, + }, + ), + ], + removed: vec![], + internal: vec![], + }; + assert_eq!(actual, expected); + + Ok(()) + } + + #[test] + fn read_template_changelog() -> cross::Result<()> { + let mut msg_info = MessageInfo::default(); + let root = project_dir(&mut msg_info)?; + + let (_, mut actual, _) = read_changelog(&root.join(".changes").join("template"))?; + actual.sort_descending(); + let expected = ChangelogEntry::new( + IdType::PullRequest(vec![905]), + ChangelogContents { + description: s!("added qemu emulation to `i586-unknown-linux-gnu`, `i686-unknown-linux-musl`, and `i586-unknown-linux-gnu`, so they can run on an `x86` CPU, rather than an `x86_64` CPU."), + issues: vec![], + breaking: false, + kind: ChangelogType::Added, + }, + ); + assert_eq!(actual.added[0], expected); + + let expected = ChangelogEntry::new( + IdType::PullRequest(vec![869]), + ChangelogContents { + description: s!("ensure cargo configuration environment variable flags are passed to the docker container."), + issues: vec![], + breaking: false, + kind: ChangelogType::Changed, + }, + ); + assert_eq!(actual.changed[0], expected); + + let expected = ChangelogEntry::new( + IdType::PullRequest(vec![905]), + ChangelogContents { + description: s!("fixed running dynamically-linked libraries for all musl targets except `x86_64-unknown-linux-musl`."), + issues: vec![], + breaking: false, + kind: ChangelogType::Fixed, + }, + ); + assert_eq!(actual.fixed[0], expected); + assert_eq!(actual.removed.len(), 0); + assert_eq!(actual.internal.len(), 0); + + Ok(()) + } + + fn build_changelog_test(release: Option<&str>) -> cross::Result { + let mut msg_info = MessageInfo::default(); + let root = project_dir(&mut msg_info)?; + let changes_dir = root.join(".changes").join("template"); + + build_changelog_from_dir(&changes_dir, &changes_dir, release) + } + + #[test] + fn test_build_changelog_no_release() -> cross::Result<()> { + let output = build_changelog_test(None)?; + let lines: Vec<&str> = output.lines().collect(); + + assert_eq!(lines[10], "- #979,#981 - this has 2 PRs associated."); + assert_eq!(lines[11], "- #940 - this is one added entry."); + assert_eq!( + lines[36], + "- #885 - handle symlinks when using remote docker." + ); + assert_eq!(lines[39], "- no associated PR."); + assert_eq!( + &lines[6..12], + &[ + "## [Unreleased] - ReleaseDate", + "", + "### Added", + "", + "- #979,#981 - this has 2 PRs associated.", + "- #940 - this is one added entry.", + ] + ); + + Ok(()) + } + + #[test] + fn test_build_changelog_dev_release() -> cross::Result<()> { + let output = build_changelog_test(Some("0.2.4-alpha"))?; + let lines: Vec<&str> = output.lines().collect(); + + assert_eq!( + &lines[6..12], + &[ + "## [Unreleased] - ReleaseDate", + "", + "### Added", + "", + "- #979,#981 - this has 2 PRs associated.", + "- #940 - this is one added entry.", + ] + ); + + Ok(()) + } + + #[test] + fn test_build_changelog_release() -> cross::Result<()> { + let output = build_changelog_test(Some("0.2.4"))?; + let lines: Vec<&str> = output.lines().collect(); + let date = get_current_date(); + + assert_eq!( + &lines[6..14], + &[ + "## [Unreleased] - ReleaseDate", + "", + &format!("## [v0.2.4] - {date}"), + "", + "### Added", + "", + "- #979,#981 - this has 2 PRs associated.", + "- #940 - this is one added entry.", + ] + ); + + Ok(()) + } + + static CHANGES_OBJECT: &str = r#" + { + "description": "sample description for a PR adding one CHANGELOG entry.", + "issues": [437], + "type": "fixed" + } + "#; + + static CHANGES_ARRAY: &str = r#" + [ + { + "description": "this is one added entry.", + "issues": [630], + "type": "added" + }, + { + "description": "this is another added entry.", + "issues": [642], + "type": "added" + }, + { + "description": "this is a fixed entry that has no attached issue.", + "type": "fixed" + }, + { + "description": "this is a breaking change.", + "issues": [679], + "breaking": true, + "type": "changed" + } + ] + "#; +} diff --git a/xtask/src/ci.rs b/xtask/src/ci.rs index 8582dd5b1..54ae759f9 100644 --- a/xtask/src/ci.rs +++ b/xtask/src/ci.rs @@ -1,3 +1,5 @@ +mod target_matrix; + use crate::util::gha_output; use clap::Subcommand; use cross::shell::Verbosity; @@ -17,13 +19,14 @@ pub enum CiJob { }, /// Check workspace metadata. Check { - // tag, branch + /// tag, branch #[clap(long, env = "GITHUB_REF_TYPE")] ref_type: String, - // main, v0.1.0 + /// main, v0.1.0 #[clap(long, env = "GITHUB_REF_NAME")] ref_name: String, }, + TargetMatrix(target_matrix::TargetMatrix), } pub fn ci(args: CiJob, metadata: CargoMetadata) -> cross::Result<()> { @@ -40,17 +43,22 @@ pub fn ci(args: CiJob, metadata: CargoMetadata) -> cross::Result<()> { // Set labels let mut labels = vec![]; - let image_title = match target.triplet.as_ref() { - "cross" => target.triplet.to_string(), - _ => format!("cross (for {})", target.triplet), + let image_title = match target.name.as_ref() { + "cross" => target.name.to_string(), + // TODO: Mention platform? + _ => format!("cross (for {})", target.name), }; labels.push(format!("org.opencontainers.image.title={image_title}")); labels.push(format!( "org.opencontainers.image.licenses={}", cross_meta.license.as_deref().unwrap_or_default() )); + labels.push(format!( + "org.opencontainers.image.created={}", + chrono::Utc::now().to_rfc3339_opts(chrono::SecondsFormat::Millis, true) + )); - gha_output("labels", &serde_json::to_string(&labels.join("\n"))?); + gha_output("labels", &serde_json::to_string(&labels.join("\n"))?)?; let version = cross_meta.version.clone(); @@ -65,15 +73,15 @@ pub fn ci(args: CiJob, metadata: CargoMetadata) -> cross::Result<()> { false, &version, )?[0], - ); + )?; if target.has_ci_image() { - gha_output("has-image", "true") + gha_output("has-image", "true")? } - if target.is_default_test_image() { - gha_output("test-variant", "default") + if target.is_standard_target_image() { + gha_output("test-variant", "default")? } else { - gha_output("test-variant", &target.triplet) + gha_output("test-variant", &target.name)? } } CiJob::Check { ref_type, ref_name } => { @@ -83,9 +91,9 @@ pub fn ci(args: CiJob, metadata: CargoMetadata) -> cross::Result<()> { eyre::bail!("a version tag was published, but the tag does not match the current version in Cargo.toml"); } let search = cargo_command() - .args(&["search", "--limit", "1"]) + .args(["search", "--limit", "1"]) .arg("cross") - .run_and_get_stdout(&mut Verbosity::Verbose.into())?; + .run_and_get_stdout(&mut Verbosity::Verbose(2).into())?; let (cross, rest) = search .split_once(" = ") .ok_or_else(|| eyre::eyre!("cargo search failed"))?; @@ -97,10 +105,13 @@ pub fn ci(args: CiJob, metadata: CargoMetadata) -> cross::Result<()> { .ok_or_else(|| eyre::eyre!("cargo search returned unexpected data"))?, )?; if version >= latest_version && version.pre.is_empty() { - gha_output("is-latest", "true") + gha_output("is-latest", "true")? } } } + CiJob::TargetMatrix(target_matrix) => { + target_matrix.run()?; + } } Ok(()) } diff --git a/xtask/src/ci/target_matrix.rs b/xtask/src/ci/target_matrix.rs new file mode 100644 index 000000000..4b8a518c3 --- /dev/null +++ b/xtask/src/ci/target_matrix.rs @@ -0,0 +1,491 @@ +use std::process::Command; + +use clap::builder::{BoolishValueParser, PossibleValuesParser}; +use clap::Parser; +use cross::{shell::Verbosity, CommandExt}; +use serde::{Deserialize, Serialize}; + +use crate::util::{get_matrix, gha_output, gha_print, CiTarget, ImageTarget}; + +#[derive(Parser, Debug)] +pub struct TargetMatrix { + /// check is being run as part of a weekly check + #[clap(long)] + pub weekly: bool, + /// merge group that is being checked. + #[clap(long)] + pub merge_group: Option, + #[clap(subcommand)] + pub subcommand: Option, +} + +#[derive(Parser, Debug)] +pub enum TargetMatrixSub { + Try { + /// pr to check + #[clap(long)] + pr: String, + /// comment to check + #[clap(long)] + comment: String, + }, +} + +impl TargetMatrix { + pub(crate) fn run(&self) -> Result<(), color_eyre::Report> { + let mut matrix: Vec = get_matrix().clone(); + matrix.retain(|t| !t.disabled); + let mut is_default_try = false; + let pr: Option; + let (prs, mut app) = match self { + TargetMatrix { + merge_group: Some(ref_), + .. + } => ( + vec![process_merge_group(ref_)?], + TargetMatrixArgs::default(), + ), + TargetMatrix { weekly: true, .. } => ( + vec![], + TargetMatrixArgs { + target: std::env::var("TARGETS") + .unwrap_or_default() + .split(' ') + .flat_map(|s| s.split(',')) + .filter(|s| !s.is_empty()) + .map(|s| s.to_owned()) + .collect(), + std: None, + cpp: None, + dylib: None, + run: None, + runners: vec![], + none: false, + has_image: true, + verbose: false, + tests: vec!["all".to_owned()], + }, + ), + TargetMatrix { + subcommand: Some(TargetMatrixSub::Try { pr, comment }), + .. + } => { + let process_try_comment = process_try_comment(comment)?; + is_default_try = process_try_comment.0; + (vec![pr.as_ref()], process_try_comment.1) + } + _ => { + pr = current_pr(); + ( + pr.iter().map(|s| s.as_str()).collect(), + TargetMatrixArgs::default(), + ) + } + }; + + // only apply ci labels on prs and `/ci try`, + // if the try command is not the default, we don't want to apply ci labels + if matches!( + self, + Self { + weekly: false, + merge_group: Some(_) | None, + subcommand: None, + } + ) || is_default_try + { + apply_ci_labels(&prs, &mut app)? + } + + app.filter(&mut matrix); + + let matrix = matrix + .iter() + .map(|target| TargetMatrixElement { + pretty: target.to_image_target().alt(), + platforms: target.platforms(), + target: &target.target, + sub: target.sub.as_deref(), + os: &target.os, + run: target.run.map(|b| b as u8), + deploy: target.deploy.map(|b| b as u8), + build_std: target.build_std.map(|b| b as u8), + cpp: target.cpp.map(|b| b as u8), + dylib: target.dylib.map(|b| b as u8), + runners: target.runners.as_deref(), + std: target.std.map(|b| b as u8), + verbose: app.verbose, + }) + .collect::>(); + + let json = serde_json::to_string(&matrix)?; + gha_output("matrix", &json)?; + let tests = serde_json::to_string(&app.tests()?)?; + gha_output("tests", &tests)?; + Ok(()) + } +} + +fn apply_ci_labels(prs: &[&str], app: &mut TargetMatrixArgs) -> Result<(), eyre::Error> { + apply_has_no_ci_tests(prs, app)?; + apply_has_no_ci_target(prs, app)?; + + let mut to_add = vec![]; + 'pr_loop: for pr in prs { + let labels = parse_gh_labels(pr)?; + let targets = labels + .iter() + .filter_map(|label| label.strip_prefix("CI-")) + .collect::>(); + if targets.is_empty() { + // if a pr doesn't specify a target, assume it affects all targets + to_add.clear(); + break 'pr_loop; + } + to_add.extend(targets.iter().map(|label| label.to_string())); + } + app.target.extend(to_add); + Ok(()) +} + +fn apply_has_no_ci_tests(prs: &[&str], app: &mut TargetMatrixArgs) -> Result<(), eyre::Error> { + if !prs.is_empty() + && prs.iter().try_fold(true, |b, pr| { + Ok::<_, eyre::Report>(b && has_no_ci_tests_label(pr)?) + })? + { + app.none = true; + app.tests = vec!["none".to_owned()]; + } + Ok(()) +} + +fn apply_has_no_ci_target(prs: &[&str], app: &mut TargetMatrixArgs) -> Result<(), eyre::Error> { + if !prs.is_empty() + && prs.iter().try_fold(true, |b, pr| { + Ok::<_, eyre::Report>(b && has_no_ci_target_label(pr)?) + })? + { + app.none = true; + } + Ok(()) +} + +fn parse_gh_labels(pr: &str) -> cross::Result> { + #[derive(Deserialize)] + struct PullRequest { + labels: Vec, + } + + #[derive(Deserialize)] + struct PullRequestLabels { + name: String, + } + eyre::ensure!( + pr.chars().all(|c| c.is_ascii_digit()), + "pr should be a number, got {:?}", + pr + ); + let stdout = Command::new("gh") + .args(["pr", "view", pr, "--json", "labels"]) + .run_and_get_stdout(&mut Verbosity::Quiet.into())?; + let pr_info: PullRequest = serde_json::from_str(&stdout)?; + Ok(pr_info.labels.into_iter().map(|l| l.name).collect()) +} + +fn has_no_ci_target_label(pr: &str) -> cross::Result { + Ok(parse_gh_labels(pr)?.contains(&"no-ci-targets".to_owned())) +} + +fn has_no_ci_tests_label(pr: &str) -> cross::Result { + Ok(parse_gh_labels(pr)?.contains(&"no-ci-tests".to_owned())) +} + +/// Convert a `GITHUB_REF` into it's merge group pr +fn process_merge_group(ref_: &str) -> cross::Result<&str> { + ref_.split('/') + .last() + .unwrap_or_default() + .strip_prefix("pr-") + .ok_or_else(|| eyre::eyre!("merge group ref must start last / segment with \"pr-\""))? + .split('-') + .next() + .ok_or_else(|| eyre::eyre!("merge group ref must include \"pr--\"")) +} + +fn current_pr() -> Option { + // gh pr view --json number --template "{{.number}}" + let stdout = Command::new("gh") + .args(["pr", "view", "--json", "number"]) + .run_and_get_stdout(&mut Verbosity::Quiet.into()) + .ok()?; + let pr_info: serde_json::Value = serde_json::from_str(&stdout).ok()?; + pr_info.get("number").map(|n| n.to_string()) +} + +/// Returns app to use for matrix on try comment, boolean is used to determine if its a try without arguments +fn process_try_comment(message: &str) -> cross::Result<(bool, TargetMatrixArgs)> { + for line in message.lines() { + let command = if let Some(command) = line.strip_prefix("/ci try") { + command.trim() + } else { + continue; + }; + if command.is_empty() { + return Ok((true, TargetMatrixArgs::default())); + } else { + return Ok((false, TargetMatrixArgs::parse_from(command.split(' ')))); + } + } + eyre::bail!("no /ci try command found in comment") +} + +#[derive(Serialize)] +#[serde(rename_all = "kebab-case")] +struct TargetMatrixElement<'a> { + pretty: String, + platforms: &'a [String], + target: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + sub: Option<&'a str>, + os: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + run: Option, + #[serde(skip_serializing_if = "Option::is_none")] + deploy: Option, + #[serde(skip_serializing_if = "Option::is_none")] + build_std: Option, + #[serde(skip_serializing_if = "Option::is_none")] + cpp: Option, + #[serde(skip_serializing_if = "Option::is_none")] + dylib: Option, + #[serde(skip_serializing_if = "Option::is_none")] + runners: Option<&'a str>, + #[serde(skip_serializing_if = "Option::is_none")] + std: Option, + verbose: bool, +} + +#[derive(Parser, Debug, PartialEq, Eq)] +#[clap(no_binary_name = true)] +struct TargetMatrixArgs { + #[clap(long, short, num_args = 0..)] + target: Vec, + #[clap(long, value_parser = BoolishValueParser::new())] + std: Option, + #[clap(long, value_parser = BoolishValueParser::new())] + cpp: Option, + #[clap(long, value_parser = BoolishValueParser::new())] + dylib: Option, + #[clap(long, value_parser = BoolishValueParser::new())] + run: Option, + #[clap(long, short, num_args = 0..)] + runners: Vec, + #[clap(long)] + none: bool, + #[clap(long)] + has_image: bool, + #[clap(long, short)] + verbose: bool, + #[clap(long, value_parser = PossibleValuesParser::new(&[ + "remote", + "bisect", + "foreign", + "docker-in-docker", + "podman", + "none", + "all" + ]), + num_args = 0.., + value_delimiter = ',', + default_value = "all" + )] + tests: Vec, +} + +impl Default for TargetMatrixArgs { + fn default() -> Self { + Self { + target: Vec::new(), + std: None, + cpp: None, + dylib: None, + run: None, + runners: Vec::new(), + none: false, + has_image: false, + verbose: false, + tests: vec!["all".to_owned()], + } + } +} + +impl TargetMatrixArgs { + pub fn filter(&self, matrix: &mut Vec) { + if self == &TargetMatrixArgs::default() { + gha_print("Running all targets."); + } + if self.none { + gha_print("Running no targets."); + std::mem::take(matrix); + return; + } + if self.has_image { + matrix.retain(|t| t.to_image_target().has_ci_image()); + } + if !self.target.is_empty() { + matrix.retain(|m| { + let matrix_target = m.to_image_target(); + let matrix_string = matrix_target.to_string(); + + return self + .target + .iter() + .any(|t| t.parse::().unwrap() == matrix_target) + || self + .target + .iter() + .any(|t| wildmatch::WildMatch::new(t).matches(&matrix_string)); + }) + }; + if let Some(std) = self.std { + matrix.retain(|m| m.std.unwrap_or_default() == std) + } + if let Some(cpp) = self.cpp { + matrix.retain(|m| m.cpp.unwrap_or_default() == cpp) + } + if let Some(dylib) = self.dylib { + matrix.retain(|m| m.dylib.unwrap_or_default() == dylib) + } + if let Some(run) = self.run { + matrix.retain(|m| m.run.unwrap_or_default() == run) + } + if !self.runners.is_empty() { + matrix.retain(|m| { + self.runners + .iter() + .any(|runner| m.runners.as_deref().unwrap_or_default().contains(runner)) + }); + } + } + + fn tests(&self) -> Result { + use clap::CommandFactory; + use serde::ser::SerializeMap; + struct Ser(Vec); + impl serde::Serialize for Ser { + fn serialize(&self, serializer: S) -> Result { + let mut map = serializer.serialize_map(Some(self.0.len()))?; + for e in &self.0 { + map.serialize_entry(&e, &true)?; + } + map.end() + } + } + let mut tests = match ( + self.tests.iter().any(|t| t == "all"), + self.tests.iter().any(|t| t == "none"), + ) { + (_, true) => vec![], + (true, false) => { + let possible: Vec = Self::command() + .get_arguments() + .find(|arg| arg.get_id() == "tests") + .expect("a `tests` argument should exist") + .get_possible_values() + .into_iter() + .map(|p| p.get_name().to_owned()) + .collect(); + + possible + } + _ => self.tests.clone(), + }; + tests.retain(|p| p != "all"); + serde_json::to_value(Ser(tests)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[track_caller] + fn run<'a>(args: impl IntoIterator) -> Vec { + let mut matrix = get_matrix().clone(); + matrix.retain_mut(|t| !t.disabled); + TargetMatrixArgs::try_parse_from(args) + .unwrap() + .filter(&mut matrix); + matrix + } + + #[test] + fn it_works() { + run([ + "--target", + "*", + "--std", + "1", + "--cpp", + "1", + "--dylib", + "1", + "--run", + "1", + "--runners", + "native", + ]); + } + + #[test] + fn exact() { + let matrix = run(["--target", "arm-unknown-linux-gnueabi"]); + assert_eq!(matrix.len(), 1); + assert_eq!(matrix[0].target, "arm-unknown-linux-gnueabi"); + } + + #[test] + fn glob() { + let matrix = run(["--target", "arm-unknown-linux-gnueabi*"]); + assert_eq!(matrix.len(), 2); + assert_eq!(matrix[0].target, "arm-unknown-linux-gnueabi"); + assert_eq!(matrix[1].target, "arm-unknown-linux-gnueabihf"); + } + + #[test] + fn ensure_filter_works() { + let matrix = run(["--dylib", "1"]); + assert!(matrix + .iter() + .any(|t| t.target == "aarch64-unknown-linux-gnu")); + assert!(matrix.iter().all(|t| t.target != "thumbv6m-none-eabi")); + + let matrix = run(["--dylib", "0"]); + assert!(matrix + .iter() + .all(|t| t.target != "aarch64-unknown-linux-gnu")); + assert!(matrix.iter().any(|t| t.target == "thumbv6m-none-eabi")); + } + + #[test] + fn all() { + let matrix = run([]); + let mut all = get_matrix().clone(); + all.retain(|t| !t.disabled); + assert_eq!(&all, &matrix); + } + + #[test] + fn none() { + let matrix = run(["--none"]); + assert_eq!(&Vec::::new(), &matrix); + } + + #[test] + fn merge_group() { + assert_eq!( + process_merge_group("refs/heads/gh-readonly-queue/main/pr-1375-44011c8854cb2eaac83b173cc323220ccdff18ea").unwrap(), + "1375" + ); + } +} diff --git a/xtask/src/codegen.rs b/xtask/src/codegen.rs new file mode 100644 index 000000000..790d8aecb --- /dev/null +++ b/xtask/src/codegen.rs @@ -0,0 +1,69 @@ +use clap::Args; +use eyre::Context; +use std::fmt::Write; + +use crate::util::{get_cargo_workspace, get_matrix}; + +#[derive(Args, Debug)] +pub struct Codegen {} + +pub fn codegen(Codegen { .. }: Codegen) -> cross::Result<()> { + let path = get_cargo_workspace().join("src/docker/provided_images.rs"); + std::fs::write(path, docker_images()).wrap_err("when writing src/docker/provided_images.rs")?; + Ok(()) +} + +pub fn docker_images() -> String { + let mut images = String::from( + r#"#![doc = "*** AUTO-GENERATED, do not touch. Run `cargo xtask codegen` to update ***"] +use super::{ImagePlatform, ProvidedImage}; + +#[rustfmt::skip] +pub static PROVIDED_IMAGES: &[ProvidedImage] = &["#, + ); + + for image_target in get_matrix() + .iter() + .filter(|i| i.builds_image() && i.to_image_target().is_toolchain_image() && !i.disabled) + { + write!( + &mut images, + r#" + ProvidedImage {{ + name: "{name}", + platforms: &[{platform}], + sub: {sub} + }},"#, + name = image_target.target.clone(), + platform = &image_target + .platforms() + .iter() + .map(|p| { + format!( + "ImagePlatform::{}", + p.replace('-', "_").to_ascii_uppercase() + ) + }) + .collect::>() + .join(", "), + sub = if let Some(sub) = &image_target.sub { + format!(r#"Some("{}")"#, sub) + } else { + "None".to_string() + } + ) + .expect("writing to string should not fail") + } + + images.push_str("\n];\n"); + images +} + +#[cfg(test)] +#[test] +pub fn ensure_correct_codegen() -> cross::Result<()> { + let provided_images = crate::util::get_cargo_workspace().join("src/docker/provided_images.rs"); + let content = cross::file::read(provided_images)?; + assert_eq!(content.replace("\r\n", "\n"), docker_images()); + Ok(()) +} diff --git a/xtask/src/crosstool.rs b/xtask/src/crosstool.rs index 6d12e4295..655fd1162 100644 --- a/xtask/src/crosstool.rs +++ b/xtask/src/crosstool.rs @@ -1,3 +1,4 @@ +use std::cmp::Ordering; use std::fmt::Write; use std::fs; use std::path::{Path, PathBuf}; @@ -9,6 +10,9 @@ use cross::ToUtf8; const DEFAULT_GCC_VERSION: &str = "8.3.0"; const DEFAULT_GLIBC_VERSION: &str = "2.17.0"; +const DEFAULT_UCLIBC_VERSION: &str = "1.0.31"; +const DEFAULT_MUSL_VERSION: &str = "1.1.24"; +const DEFAULT_NEWLIB_VERSION: &str = "3.1.0.20181231"; const DEFAULT_LINUX_VERSION: &str = "4.19.21"; const DOCKER: &str = "docker"; const CT_NG: &str = "crosstool-ng"; @@ -17,21 +21,21 @@ const CT_CONFIG: &str = "crosstool-config"; #[derive(Args, Debug)] pub struct ConfigureCrosstool { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// The gcc version to configure for. #[clap(long, env = "GCC_VERSION")] pub gcc_version: Option, /// The glibc version to configure for. #[clap(long, env = "GLIBC_VERSION")] pub glibc_version: Option, + /// The uclibc version to configure for. + #[clap(long, env = "UCLIBC_VERSION")] + pub uclibc_version: Option, + /// The musl version to configure for. + #[clap(long, env = "MUSL_VERSION")] + pub musl_version: Option, + /// The newlib version to configure for. + #[clap(long, env = "NEWLIB_VERSION")] + pub newlib_version: Option, /// The linux version to configure for. #[clap(long, env = "LINUX_VERSION")] pub linux_version: Option, @@ -75,33 +79,238 @@ fn read_config_dir(dir: &Path) -> cross::Result> { Ok(targets) } +fn configure_glibc( + glibc_version: &str, +) -> cross::Result<(&'static str, String, String, Option)> { + // configure the `CT_GLIBC` values + let glibc_versions: Vec<&str> = glibc_version.split('.').collect(); + if !matches!(glibc_versions.len(), 2 | 3) { + eyre::bail!("invalid glibc version, got {glibc_version}"); + } + + let glibc_major = glibc_versions[0].parse::()?; + let glibc_minor = glibc_versions[1].parse::()?; + let _glibc_patch = glibc_versions.get(2).unwrap_or(&"0").parse::()?; + if glibc_major != 2 { + eyre::bail!("glibc major versions other than 2 currently unsupported, got {glibc_major}"); + } + let ct_glibc_v = format!( + r#"CT_GLIBC_V_{glibc_major}_{glibc_minor}=y +# CT_GLIBC_NO_VERSIONS is not set +CT_GLIBC_VERSION="{glibc_major}.{glibc_minor}""# + ); + let mut ct_glibc = String::new(); + let glibc_older = [29, 27, 26, 25, 24, 23, 20]; + for minor in glibc_older { + if glibc_minor <= minor { + write!(ct_glibc, "\nCT_GLIBC_2_{minor}_or_older=y")?; + } + if glibc_minor < minor { + write!(ct_glibc, "\nCT_GLIBC_older_than_2_{minor}=y")?; + } + } + if glibc_minor >= 17 { + ct_glibc.push_str("\nCT_GLIBC_2_17_or_later=y"); + } + if glibc_minor <= 17 { + ct_glibc.push_str("\nCT_GLIBC_2_17_or_older=y"); + } + if glibc_minor > 14 { + ct_glibc.push_str("\nCT_GLIBC_later_than_2_14=y"); + } + if glibc_minor >= 14 { + ct_glibc.push_str("\nCT_GLIBC_2_14_or_later=y"); + } + + Ok(("GLIBC", ct_glibc_v, ct_glibc, None)) +} + +fn configure_uclibc( + uclibc_version: &str, +) -> cross::Result<(&'static str, String, String, Option)> { + // configure the `CT_UCLIBC` values + let uclibc_versions: Vec<&str> = uclibc_version.split('.').collect(); + if !matches!(uclibc_versions.len(), 3 | 4) { + eyre::bail!("invalid uclibc version, got {uclibc_version}"); + } + + let uclibc_major = uclibc_versions[0].parse::()?; + let uclibc_minor = uclibc_versions[1].parse::()?; + let uclibc_patch = uclibc_versions[2].parse::()?; + let uclibc_dev = uclibc_versions.get(3).unwrap_or(&"0").parse::()?; + let (key, version) = match uclibc_dev { + 0 => ( + format!("{uclibc_major}_{uclibc_minor}_{uclibc_patch}"), + format!("{uclibc_major}.{uclibc_minor}.{uclibc_patch}"), + ), + _ => ( + format!("{uclibc_major}_{uclibc_minor}_{uclibc_patch}_{uclibc_dev}"), + format!("{uclibc_major}.{uclibc_minor}.{uclibc_patch}.{uclibc_dev}"), + ), + }; + let (ct_uclibc_v, ct_extras) = match uclibc_major { + 0 => ( + format!( + r#"CT_UCLIBC_V_{key}=y +# CT_UCLIBC_NO_VERSIONS is not set +CT_UCLIBC_VERSION="{version}""# + ), + r#"CT_UCLIBC_USE_UCLIBC_ORG=y +CT_UCLIBC_USE="UCLIBC" +CT_UCLIBC_PKG_NAME="uClibc" +CT_UCLIBC_SRC_RELEASE=y +CT_UCLIBC_PATCH_ORDER="global" +CT_UCLIBC_MIRRORS="https://uclibc.org/downloads/" +CT_UCLIBC_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_UCLIBC_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_UCLIBC_ARCHIVE_FORMATS=".tar.xz .tar.bz2" +CT_UCLIBC_SIGNATURE_FORMAT="packed/.asc""#, + ), + _ => ( + format!( + r#"CT_UCLIBC_NG_V_{key}=y +# CT_UCLIBC_NG_NO_VERSIONS is not set +CT_UCLIBC_NG_VERSION="{version}""# + ), + r#"CT_UCLIBC_USE_UCLIBC_NG_ORG=y +CT_UCLIBC_USE="UCLIBC_NG" +CT_UCLIBC_NG_PKG_NAME="uClibc-ng" +CT_UCLIBC_NG_SRC_RELEASE=y +CT_UCLIBC_NG_PATCH_ORDER="global" +CT_UCLIBC_NG_MIRRORS="http://downloads.uclibc-ng.org/releases/${CT_UCLIBC_NG_VERSION}" +CT_UCLIBC_NG_ARCHIVE_FILENAME="@{pkg_name}-@{version}" +CT_UCLIBC_NG_ARCHIVE_DIRNAME="@{pkg_name}-@{version}" +CT_UCLIBC_NG_ARCHIVE_FORMATS=".tar.xz .tar.lz .tar.bz2 .tar.gz" +CT_UCLIBC_NG_SIGNATURE_FORMAT="packed/.asc""#, + ), + }; + + let mut ct_uclibc = String::new(); + let version = (uclibc_major, uclibc_minor, uclibc_patch, uclibc_dev); + let uclibc_older = [ + (1, 0, 23, 0), + (1, 0, 21, 0), + (1, 0, 15, 0), + (1, 0, 0, 0), + (0, 9, 33, 2), + ]; + for older in uclibc_older { + let cmp = older.cmp(&version); + let (major, minor, patch, dev) = older; + let key = match dev { + 0 => format!("{major}_{minor}_{patch}"), + _ => format!("{major}_{minor}_{patch}_{dev}"), + }; + match cmp { + Ordering::Greater => { + write!(ct_uclibc, "\nCT_UCLIBC_{key}_or_older=y")?; + write!(ct_uclibc, "\nCT_UCLIBC_older_than_{key}=y")?; + } + Ordering::Equal => { + write!(ct_uclibc, "\nCT_UCLIBC_{key}_or_later=y")?; + write!(ct_uclibc, "\nCT_UCLIBC_{key}_or_older=y")?; + } + Ordering::Less => { + write!(ct_uclibc, "\nCT_UCLIBC_{key}_or_later=y")?; + write!(ct_uclibc, "\nCT_UCLIBC_later_than_{key}=y")?; + } + } + } + + Ok(("UCLIBC", ct_uclibc_v, ct_uclibc, Some(ct_extras.to_owned()))) +} + +fn configure_musl( + musl_version: &str, +) -> cross::Result<(&'static str, String, String, Option)> { + let musl_versions: Vec<&str> = musl_version.split('.').collect(); + if !matches!(musl_versions.len(), 3) { + eyre::bail!("invalid musl version, got {musl_version}"); + } + + let musl_major = musl_versions[0].parse::()?; + let musl_minor = musl_versions[1].parse::()?; + let musl_patch = musl_versions[2].parse::()?; + let ct_musl_v = format!( + r#"CT_MUSL_V_{musl_major}_{musl_minor}_{musl_patch}=y +# CT_MUSL_NO_VERSIONS is not set +CT_MUSL_VERSION="{musl_major}.{musl_minor}.{musl_patch}""# + ); + + Ok(("MUSL", ct_musl_v, "".to_owned(), None)) +} + +fn configure_newlib( + newlib_version: &str, +) -> cross::Result<(&'static str, String, String, Option)> { + let newlib_versions: Vec<&str> = newlib_version.split('.').collect(); + if !matches!(newlib_versions.len(), 3 | 4) { + eyre::bail!("invalid newlib version, got {newlib_version}"); + } + + let newlib_major = newlib_versions[0].parse::()?; + let newlib_minor = newlib_versions[1].parse::()?; + let newlib_patch = newlib_versions[2].parse::()?; + let newlib_dev = newlib_versions.get(3).unwrap_or(&"0").parse::()?; + let ct_newlib_v = format!( + r#"CT_NEWLIB_V_{newlib_major}_{newlib_minor}=y +# CT_NEWLIB_NO_VERSIONS is not set +CT_NEWLIB_VERSION="{newlib_major}.{newlib_minor}.{newlib_patch}.{newlib_dev}""# + ); + + let mut ct_newlib = String::new(); + let version = (newlib_major, newlib_minor); + let newlib_older = [(2, 2), (2, 1), (2, 0)]; + for older in newlib_older { + let cmp = older.cmp(&version); + let (major, minor) = older; + let key = format!("{major}_{minor}"); + match cmp { + Ordering::Greater => { + write!(ct_newlib, "\nCT_NEWLIB_{key}_or_older=y")?; + write!(ct_newlib, "\nCT_NEWLIB_older_than_{key}=y")?; + } + Ordering::Equal => { + write!(ct_newlib, "\nCT_NEWLIB_{key}_or_later=y")?; + write!(ct_newlib, "\nCT_NEWLIB_{key}_or_older=y")?; + } + Ordering::Less => { + write!(ct_newlib, "\nCT_NEWLIB_{key}_or_later=y")?; + write!(ct_newlib, "\nCT_NEWLIB_later_than_{key}=y")?; + } + } + } + + Ok(("NEWLIB", ct_newlib_v, ct_newlib, None)) +} + fn configure_target( src_file: &Path, gcc_version: &str, glibc_version: &str, + uclibc_version: &str, + musl_version: &str, + newlib_version: &str, linux_version: &str, ) -> cross::Result { + let file_name = src_file + .file_name() + .ok_or(eyre::eyre!("unable to get filename for {src_file:?}"))? + .to_utf8()?; + let mut contents = fs::read_to_string(src_file)?; + + // configure the `CT_GCC` values let gcc_versions: Vec<&str> = gcc_version.split('.').collect(); - let glibc_versions: Vec<&str> = glibc_version.split('.').collect(); - let linux_versions: Vec<&str> = linux_version.split('.').collect(); if !matches!(gcc_versions.len(), 2 | 3) { eyre::bail!("invalid GCC version, got {gcc_version}"); } - if !matches!(glibc_versions.len(), 2 | 3) { - eyre::bail!("invalid glibc version, got {glibc_version}"); - } - if !matches!(linux_versions.len(), 2 | 3) { - eyre::bail!("invalid linux version, got {linux_version}"); - } - - // configure the `CT_GCC` values let gcc_major = gcc_versions[0].parse::()?; let gcc_minor = gcc_versions[1].parse::()?; let gcc_patch = gcc_versions.get(2).unwrap_or(&"0").parse::()?; let ct_gcc_v = format!( - "CT_GCC_V_{gcc_major}=y + r#"CT_GCC_V_{gcc_major}=y # CT_GCC_NO_VERSIONS is not set -CT_GCC_VERSION=\"{gcc_major}.{gcc_minor}.{gcc_patch}\"" +CT_GCC_VERSION="{gcc_major}.{gcc_minor}.{gcc_patch}""# ); let mut ct_gcc = String::new(); for major in (5..=7).rev() { @@ -124,84 +333,88 @@ CT_GCC_VERSION=\"{gcc_major}.{gcc_minor}.{gcc_patch}\"" if gcc_major > 4 || (gcc_major == 4 && gcc_major >= 8) { ct_gcc.push_str("\nCT_GCC_4_8_or_later=y"); } - - // configure the `CT_GLIBC` values - let glibc_major = glibc_versions[0].parse::()?; - let glibc_minor = glibc_versions[1].parse::()?; - let _glibc_patch = glibc_versions.get(2).unwrap_or(&"0").parse::()?; - if glibc_major != 2 { - eyre::bail!("glibc major versions other than 2 currently unsupported, got {glibc_major}"); + if ct_gcc.starts_with('\n') { + ct_gcc.remove(0); } - let ct_glibc_v = format!( - "CT_GLIBC_V_{glibc_major}_{glibc_minor}=y -# CT_GLIBC_NO_VERSIONS is not set -CT_GLIBC_VERSION=\"{glibc_major}.{glibc_minor}\"" - ); - let mut ct_glibc = String::new(); - let glibc_older = [29, 27, 26, 25, 24, 23, 20]; - for minor in glibc_older { - if glibc_minor <= minor { - write!(ct_glibc, "\nCT_GLIBC_2_{minor}_or_older=y")?; - } - if glibc_minor < minor { - write!(ct_glibc, "\nCT_GLIBC_older_than_2_{minor}=y")?; - } - } - if glibc_minor >= 17 { - ct_glibc.push_str("\nCT_GLIBC_2_17_or_later=y"); - } - if glibc_minor <= 17 { - ct_glibc.push_str("\nCT_GLIBC_2_17_or_older=y"); - } - if glibc_minor > 14 { - ct_glibc.push_str("\nCT_GLIBC_later_than_2_14=y"); + contents = contents + .replacen("%CT_GCC_V%", &ct_gcc_v, 1) + .replacen("%CT_GCC%", &ct_gcc, 1); + + // configure the libc versions + let (key, libc_v, mut libc, extras) = if file_name.contains("gnu") { + configure_glibc(glibc_version)? + } else if file_name.contains("uclibc") { + configure_uclibc(uclibc_version)? + } else if file_name.contains("musl") { + configure_musl(musl_version)? + } else if file_name.contains("none") { + configure_newlib(newlib_version)? + } else { + eyre::bail!("unsupported rust target for file {file_name}: unknown libc version"); + }; + if libc.starts_with('\n') { + libc.remove(0); } - if glibc_minor >= 14 { - ct_glibc.push_str("\nCT_GLIBC_2_14_or_later=y"); + contents = contents + .replacen(&format!("%CT_{key}_V%"), &libc_v, 1) + .replacen(&format!("%CT_{key}%"), &libc, 1); + if let Some(extras) = extras { + contents = contents.replacen(&format!("%CT_{key}_EXTRAS%"), &extras, 1); } // configure the `CT_LINUX` values - let linux_major = linux_versions[0].parse::()?; - let linux_minor = linux_versions[1].parse::()?; - let linux_patch = linux_versions.get(2).unwrap_or(&"0").parse::()?; - let ct_linux_v = format!( - "CT_LINUX_V_{linux_major}_{linux_minor}=y + if file_name.contains("linux") { + let linux_versions: Vec<&str> = linux_version.split('.').collect(); + if !matches!(linux_versions.len(), 2 | 3) { + eyre::bail!("invalid linux version, got {linux_version}"); + } + let linux_major = linux_versions[0].parse::()?; + let linux_minor = linux_versions[1].parse::()?; + let linux_patch = linux_versions.get(2).unwrap_or(&"0").parse::()?; + let ct_linux_v = format!( + r#"CT_LINUX_V_{linux_major}_{linux_minor}=y # CT_LINUX_NO_VERSIONS is not set -CT_LINUX_VERSION=\"{linux_major}.{linux_minor}.{linux_patch}\"" - ); - let mut ct_linux = String::new(); - if linux_major < 4 || (linux_major == 4 && linux_minor < 8) { - ct_linux.push_str("\nCT_LINUX_older_than_4_8=y"); - ct_linux.push_str("\nCT_LINUX_4_8_or_older=y"); - } else { - ct_linux.push_str("\nCT_LINUX_later_than_4_8=y"); - ct_linux.push_str("\nCT_LINUX_4_8_or_later=y"); - } - if linux_major < 3 || (linux_major == 3 && linux_minor < 7) { - ct_linux.push_str("\nCT_LINUX_older_than_3_7=y"); - ct_linux.push_str("\nCT_LINUX_3_7_or_older=y"); - ct_linux.push_str("\nCT_LINUX_older_than_3_2=y"); - ct_linux.push_str("\nCT_LINUX_3_2_or_older=y"); - } else { - ct_linux.push_str("\nCT_LINUX_later_than_3_7=y"); - ct_linux.push_str("\nCT_LINUX_3_7_or_later=y"); - ct_linux.push_str("\nCT_LINUX_later_than_3_2=y"); - ct_linux.push_str("\nCT_LINUX_3_2_or_later=y"); +CT_LINUX_VERSION="{linux_major}.{linux_minor}.{linux_patch}""# + ); + let mut ct_linux = String::new(); + if linux_major < 4 || (linux_major == 4 && linux_minor < 8) { + ct_linux.push_str("\nCT_LINUX_older_than_4_8=y"); + ct_linux.push_str("\nCT_LINUX_4_8_or_older=y"); + } else { + ct_linux.push_str("\nCT_LINUX_later_than_4_8=y"); + ct_linux.push_str("\nCT_LINUX_4_8_or_later=y"); + } + if linux_major < 3 || (linux_major == 3 && linux_minor < 7) { + ct_linux.push_str("\nCT_LINUX_older_than_3_7=y"); + ct_linux.push_str("\nCT_LINUX_3_7_or_older=y"); + ct_linux.push_str("\nCT_LINUX_older_than_3_2=y"); + ct_linux.push_str("\nCT_LINUX_3_2_or_older=y"); + } else { + ct_linux.push_str("\nCT_LINUX_later_than_3_7=y"); + ct_linux.push_str("\nCT_LINUX_3_7_or_later=y"); + ct_linux.push_str("\nCT_LINUX_later_than_3_2=y"); + ct_linux.push_str("\nCT_LINUX_3_2_or_later=y"); + } + if ct_linux.starts_with('\n') { + ct_linux.remove(0); + } + + contents = + contents + .replacen("%CT_LINUX_V%", &ct_linux_v, 1) + .replacen("%CT_LINUX%", &ct_linux, 1); } - Ok(fs::read_to_string(src_file)? - .replacen("%CT_GCC_V%", &ct_gcc_v, 1) - .replacen("%CT_GCC%", &ct_gcc, 1) - .replacen("%CT_GLIBC_V%", &ct_glibc_v, 1) - .replacen("%CT_GLIBC%", &ct_glibc, 1) - .replacen("%CT_LINUX_V%", &ct_linux_v, 1) - .replacen("%CT_LINUX%", &ct_linux, 1)) + Ok(contents) } pub fn configure_crosstool( ConfigureCrosstool { gcc_version, glibc_version, + uclibc_version, + musl_version, + newlib_version, linux_version, mut targets, .. @@ -210,6 +423,9 @@ pub fn configure_crosstool( ) -> cross::Result<()> { let gcc_version = gcc_version.as_deref().unwrap_or(DEFAULT_GCC_VERSION); let glibc_version = glibc_version.as_deref().unwrap_or(DEFAULT_GLIBC_VERSION); + let uclibc_version = uclibc_version.as_deref().unwrap_or(DEFAULT_UCLIBC_VERSION); + let musl_version = musl_version.as_deref().unwrap_or(DEFAULT_MUSL_VERSION); + let newlib_version = newlib_version.as_deref().unwrap_or(DEFAULT_NEWLIB_VERSION); let linux_version = linux_version.as_deref().unwrap_or(DEFAULT_LINUX_VERSION); let root = project_dir(msg_info)?; @@ -226,7 +442,15 @@ pub fn configure_crosstool( .map(|t| locate_ctng_config(&t, &root, &cross_toolchains_root)) .collect::>>()?; for (src_file, dst_file) in config_files { - let configured = configure_target(&src_file, gcc_version, glibc_version, linux_version)?; + let configured = configure_target( + &src_file, + gcc_version, + glibc_version, + uclibc_version, + musl_version, + newlib_version, + linux_version, + )?; write_to_string(&dst_file, &configured)?; } diff --git a/xtask/src/hooks.rs b/xtask/src/hooks.rs index 739b4f75c..57dcbbe46 100644 --- a/xtask/src/hooks.rs +++ b/xtask/src/hooks.rs @@ -1,66 +1,62 @@ use std::fs::File; use std::io::{BufRead, BufReader, ErrorKind}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::process::Command; -use crate::util::{cargo, get_channel_prefer_nightly}; +use crate::util::{cargo, cargo_metadata, get_channel_prefer_nightly}; +use clap::builder::BoolishValueParser; use clap::Args; use cross::shell::MessageInfo; use cross::CommandExt; +use eyre::Context; const CARGO_FLAGS: &[&str] = &["--all-features", "--all-targets", "--workspace"]; #[derive(Args, Debug)] pub struct Check { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Run shellcheck on all files, not just staged files. #[clap(short, long)] all: bool, + /// Run Python linter checks. + #[clap(short, long, env = "PYTHON", value_parser = BoolishValueParser::new())] + python: bool, + /// Flake8 command (either an executable or list of arguments) + #[clap(short, long, env = "FLAKE8")] + flake8: Option, } #[derive(Args, Debug)] pub struct Test { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, + /// Run Python test suite. + #[clap(short, long, env = "PYTHON", value_parser = BoolishValueParser::new())] + python: bool, + /// Tox command (either an executable or list of arguments) + #[clap(short, long, env = "TOX")] + tox: Option, } +#[track_caller] fn cargo_fmt(msg_info: &mut MessageInfo, channel: Option<&str>) -> cross::Result<()> { cargo(channel) - .args(&["fmt", "--", "--check"]) + .args(["fmt", "--", "--check"]) .run(msg_info, false) - .map_err(Into::into) } +#[track_caller] fn cargo_clippy(msg_info: &mut MessageInfo, channel: Option<&str>) -> cross::Result<()> { cargo(channel) .arg("clippy") .args(CARGO_FLAGS) - .args(&["--", "--deny", "warnings"]) + .args(["--", "--deny", "warnings"]) .run(msg_info, false) - .map_err(Into::into) } +#[track_caller] fn cargo_test(msg_info: &mut MessageInfo, channel: Option<&str>) -> cross::Result<()> { cargo(channel) .arg("test") .args(CARGO_FLAGS) .run(msg_info, false) - .map_err(Into::into) } fn splitlines(string: String) -> Vec { @@ -69,7 +65,7 @@ fn splitlines(string: String) -> Vec { fn staged_files(msg_info: &mut MessageInfo) -> cross::Result> { Command::new("git") - .args(&["diff", "--cached", "--name-only", "--diff-filter=ACM"]) + .args(["diff", "--cached", "--name-only", "--diff-filter=ACM"]) .run_and_get_stdout(msg_info) .map(splitlines) } @@ -122,27 +118,109 @@ fn shellcheck(all: bool, msg_info: &mut MessageInfo) -> cross::Result<()> { Ok(()) } +fn parse_command(value: &str) -> cross::Result> { + shell_words::split(value).wrap_err_with(|| format!("could not parse command of {}", value)) +} + +fn python_dir(metadata: &cross::CargoMetadata) -> PathBuf { + metadata.workspace_root.join("docker").join("android") +} + +fn python_env(cmd: &mut Command, metadata: &cross::CargoMetadata) { + cmd.env("PYTHONDONTWRITEBYTECODE", "1"); + cmd.env( + "PYTHONPYCACHEPREFIX", + metadata.target_directory.join("__pycache__"), + ); +} + +fn python_lint(flake8: Option<&str>, msg_info: &mut MessageInfo) -> cross::Result<()> { + let metadata = cargo_metadata(msg_info)?; + let args = flake8 + .map(parse_command) + .unwrap_or_else(|| Ok(vec!["flake8".to_owned()]))?; + let mut cmd = Command::new( + args.first() + .ok_or_else(|| eyre::eyre!("empty string provided for flake8 command"))?, + ); + cmd.args(&args[1..]); + python_env(&mut cmd, &metadata); + if msg_info.is_verbose() { + cmd.arg("--verbose"); + } + cmd.current_dir(python_dir(&metadata)); + cmd.run(msg_info, false)?; + + Ok(()) +} + +fn python_test(tox: Option<&str>, msg_info: &mut MessageInfo) -> cross::Result<()> { + let metadata = cargo_metadata(msg_info)?; + let args = tox + .map(parse_command) + .unwrap_or_else(|| Ok(vec!["tox".to_owned()]))?; + let mut cmd = Command::new( + args.first() + .ok_or_else(|| eyre::eyre!("empty string provided for tox command"))?, + ); + cmd.args(&args[1..]); + cmd.args(["-e", "py3"]); + python_env(&mut cmd, &metadata); + cmd.arg("--workdir"); + cmd.arg(&metadata.target_directory); + if msg_info.is_verbose() { + cmd.arg("--verbose"); + } + cmd.current_dir(python_dir(&metadata)); + cmd.run(msg_info, false)?; + + Ok(()) +} + pub fn check( - Check { all, .. }: Check, + Check { + all, + python, + flake8, + .. + }: Check, toolchain: Option<&str>, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - msg_info.info("Running rustfmt, clippy, and shellcheck checks.")?; + let mut checks = vec!["rustfmt", "clippy", "shellcheck"]; + if python { + checks.push("python"); + } + msg_info.info(format_args!("Running {} checks.", checks.join(", ")))?; let channel = get_channel_prefer_nightly(msg_info, toolchain)?; - cargo_fmt(msg_info, channel)?; - cargo_clippy(msg_info, channel)?; - shellcheck(all, msg_info)?; + cargo_fmt(msg_info, channel).wrap_err("fmt failed")?; + cargo_clippy(msg_info, channel).wrap_err("clippy failed")?; + shellcheck(all, msg_info).wrap_err("shellcheck failed")?; + if python { + python_lint(flake8.as_deref(), msg_info)?; + } Ok(()) } -pub fn test(toolchain: Option<&str>, msg_info: &mut MessageInfo) -> cross::Result<()> { - msg_info.info("Running cargo fmt and tests")?; +pub fn test( + Test { python, tox, .. }: Test, + toolchain: Option<&str>, + msg_info: &mut MessageInfo, +) -> cross::Result<()> { + let mut tests = vec!["rustfmt", "unit"]; + if python { + tests.push("python"); + } + msg_info.info(format_args!("Running {} tests.", tests.join(", ")))?; let channel = get_channel_prefer_nightly(msg_info, toolchain)?; cargo_fmt(msg_info, channel)?; cargo_test(msg_info, channel)?; + if python { + python_test(tox.as_deref(), msg_info)?; + } Ok(()) } diff --git a/xtask/src/install_git_hooks.rs b/xtask/src/install_git_hooks.rs index e4d34fb3b..63d8d973e 100644 --- a/xtask/src/install_git_hooks.rs +++ b/xtask/src/install_git_hooks.rs @@ -3,17 +3,7 @@ use clap::Args; use cross::shell::MessageInfo; #[derive(Args, Debug)] -pub struct InstallGitHooks { - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, -} +pub struct InstallGitHooks {} pub fn install_git_hooks(msg_info: &mut MessageInfo) -> cross::Result<()> { let root = project_dir(msg_info)?; diff --git a/xtask/src/main.rs b/xtask/src/main.rs index d2c68e211..d8bbf4f9f 100644 --- a/xtask/src/main.rs +++ b/xtask/src/main.rs @@ -1,7 +1,9 @@ #![deny(missing_debug_implementations, rust_2018_idioms)] pub mod build_docker_image; +pub mod changelog; pub mod ci; +pub mod codegen; pub mod crosstool; pub mod hooks; pub mod install_git_hooks; @@ -10,11 +12,13 @@ pub mod util; use ci::CiJob; use clap::{CommandFactory, Parser, Subcommand}; +use codegen::Codegen; use cross::docker; use cross::shell::{MessageInfo, Verbosity}; use util::{cargo_metadata, ImageTarget}; use self::build_docker_image::BuildDockerImage; +use self::changelog::Changelog; use self::crosstool::ConfigureCrosstool; use self::hooks::{Check, Test}; use self::install_git_hooks::InstallGitHooks; @@ -28,6 +32,15 @@ struct Cli { toolchain: Option, #[clap(subcommand)] command: Commands, + /// Provide verbose diagnostic output. + #[clap(short, long, global = true, action = clap::ArgAction::Count)] + pub verbose: u8, + /// Do not print cross log messages. + #[clap(short, long, global = true)] + pub quiet: bool, + /// Coloring: auto, always, never + #[clap(long, global = true)] + pub color: Option, } // hidden implied parser so we can get matches without recursion. @@ -54,6 +67,11 @@ enum Commands { CiJob(CiJob), /// Configure crosstool config files. ConfigureCrosstool(ConfigureCrosstool), + /// Changelog related commands + #[clap(subcommand)] + Changelog(Changelog), + /// Code generation + Codegen(Codegen), } fn is_toolchain(toolchain: &str) -> cross::Result { @@ -71,46 +89,39 @@ macro_rules! get_engine { }}; } -macro_rules! get_msg_info { - ($args:ident, $verbose:expr) => {{ - MessageInfo::create($verbose, $args.quiet, $args.color.as_deref()) - }}; -} - pub fn main() -> cross::Result<()> { cross::install_panic_hook()?; let cli = Cli::parse(); + let mut msg_info = MessageInfo::create(cli.verbose, cli.quiet, cli.color.as_deref())?; match cli.command { Commands::TargetInfo(args) => { - let mut msg_info = get_msg_info!(args, args.verbose)?; let engine = get_engine!(args, msg_info)?; target_info::target_info(args, &engine, &mut msg_info)?; } Commands::BuildDockerImage(args) => { - let mut msg_info = get_msg_info!(args, args.verbose != 0)?; let engine = get_engine!(args, msg_info)?; build_docker_image::build_docker_image(args, &engine, &mut msg_info)?; } - Commands::InstallGitHooks(args) => { - let mut msg_info = get_msg_info!(args, args.verbose)?; + Commands::InstallGitHooks(_) => { install_git_hooks::install_git_hooks(&mut msg_info)?; } Commands::Check(args) => { - let mut msg_info = get_msg_info!(args, args.verbose)?; hooks::check(args, cli.toolchain.as_deref(), &mut msg_info)?; } Commands::Test(args) => { - let mut msg_info = get_msg_info!(args, args.verbose)?; - hooks::test(cli.toolchain.as_deref(), &mut msg_info)?; + hooks::test(args, cli.toolchain.as_deref(), &mut msg_info)?; } Commands::CiJob(args) => { - let metadata = cargo_metadata(&mut Verbosity::Verbose.into())?; + let metadata = cargo_metadata(&mut Verbosity::Verbose(2).into())?; ci::ci(args, metadata)?; } Commands::ConfigureCrosstool(args) => { - let mut msg_info = get_msg_info!(args, args.verbose)?; crosstool::configure_crosstool(args, &mut msg_info)?; } + Commands::Changelog(args) => { + changelog::changelog(args, &mut msg_info)?; + } + Commands::Codegen(args) => codegen::codegen(args)?, } Ok(()) diff --git a/xtask/src/target_info.rs b/xtask/src/target_info.rs index 08bd8c51c..70faaf061 100644 --- a/xtask/src/target_info.rs +++ b/xtask/src/target_info.rs @@ -13,15 +13,6 @@ const TARGET_INFO_SCRIPT: &str = include_str!("target_info.sh"); pub struct TargetInfo { /// If not provided, get info for all targets. pub targets: Vec, - /// Provide verbose diagnostic output. - #[clap(short, long)] - pub verbose: bool, - /// Do not print cross log messages. - #[clap(short, long)] - pub quiet: bool, - /// Coloring: auto, always, never - #[clap(long)] - pub color: Option, /// Image registry. #[clap(long, default_value_t = String::from("ghcr.io"))] pub registry: String, @@ -48,23 +39,21 @@ fn image_info( pull_image(engine, image, msg_info)?; } - let mut command = docker::command(engine); + let mut command = engine.command(); command.arg("run"); command.arg("--rm"); - command.args(&["-e", &format!("TARGET={}", target.triplet)]); + command.args(["-e", &format!("TARGET={}", target.name)]); if msg_info.is_verbose() { - command.args(&["-e", "VERBOSE=1"]); + command.args(["-e", "VERBOSE=1"]); } if has_test { - command.args(&["-e", "HAS_TEST=1"]); + command.args(["-e", "HAS_TEST=1"]); } else { - command.args(&["-e", "HAS_TEST="]); + command.args(["-e", "HAS_TEST="]); } command.arg(image); - command.args(&["bash", "-c", TARGET_INFO_SCRIPT]); - command - .run(msg_info, msg_info.is_verbose()) - .map_err(Into::into) + command.args(["bash", "-c", TARGET_INFO_SCRIPT]); + command.run(msg_info, msg_info.is_verbose()) } pub fn target_info( diff --git a/xtask/src/target_info.sh b/xtask/src/target_info.sh index a371612ba..1056d6743 100755 --- a/xtask/src/target_info.sh +++ b/xtask/src/target_info.sh @@ -161,7 +161,7 @@ case "${target}" in *-*-netbsd) cc_regex=".*gcc \(.*\) ([0-9]+.[0-9]+.[0-9]+).*" ;; - *-*-dragonfly) + *-*-dragonfly) cc_regex=".*gcc \(GCC\) ([0-9]+.[0-9]+.[0-9]+).*" ;; *-*-solaris) @@ -288,6 +288,9 @@ case "${target}" in libc=$(max_glibc_version "${libdir}") fi ;; + riscv64gc-unknown-linux-gnu) + libc="$(dpkg-query --showformat='${Version}' --show libc6-riscv64-cross | cut -d- -f1)" + ;; *-*-linux-gnu*) toolchain_prefix="${!cc_var//-gcc/}" libdir="/usr/${toolchain_prefix}/lib" diff --git a/xtask/src/util.rs b/xtask/src/util.rs index b5884660d..04f1b8cec 100644 --- a/xtask/src/util.rs +++ b/xtask/src/util.rs @@ -1,3 +1,4 @@ +use std::env; use std::fs; use std::io::Write; use std::path::{Path, PathBuf}; @@ -5,76 +6,134 @@ use std::process::Command; use cross::shell::MessageInfo; use cross::{docker, CommandExt, ToUtf8}; -use once_cell::sync::OnceCell; -use serde::Deserialize; - -const WORKFLOW: &str = include_str!("../../.github/workflows/ci.yml"); - -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct Workflow { - jobs: Jobs, -} - -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct Jobs { - #[serde(rename = "generate-matrix")] - generate_matrix: GenerateMatrix, -} - -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct GenerateMatrix { - steps: Vec, -} -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct Steps { - env: Env, -} +use once_cell::sync::{Lazy, OnceCell}; +use serde::Deserialize; -#[derive(Debug, PartialEq, Eq, Deserialize)] -struct Env { - matrix: String, +static WORKSPACE: OnceCell = OnceCell::new(); + +/// Returns the cargo workspace for the manifest +pub fn get_cargo_workspace() -> &'static Path { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); + WORKSPACE.get_or_init(|| { + cross::cargo_metadata_with_args( + Some(manifest_dir.as_ref()), + None, + &mut MessageInfo::create(2, false, None).expect("should not fail"), + ) + .unwrap() + .unwrap() + .workspace_root + }) } -#[derive(Debug, PartialEq, Eq, Deserialize)] -pub struct Matrix { +#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[serde(deny_unknown_fields, rename_all = "kebab-case")] +pub struct CiTarget { + /// The name of the target. This can either be a target triple, or if the image is "special", the name of the special thing it does. pub target: String, - pub sub: Option, #[serde(default)] - pub run: i64, + pub special: bool, + #[serde(skip_serializing_if = "Option::is_none")] + pub sub: Option, + /// The runner to use in CI, see https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#choosing-github-hosted-runners + /// + /// if this is not equal to `ubuntu-latest`, no docker image will be built unless it's been special cased. pub os: String, + /// if `true` test more extensive cargo support, including tests and running binaries + #[serde(skip_serializing_if = "Option::is_none")] + pub run: Option, + /// if `true` publish the generated binaries for cross + #[serde(default)] + pub deploy: Option, + /// the platform to build this image for, defaults to `["linux/amd64"]`, takes multiple + #[serde(skip_serializing_if = "Option::is_none")] + platforms: Option>, + /// if `true` signal that this target requires `-Zbuild-std` + #[serde(skip_serializing_if = "Option::is_none")] + pub build_std: Option, + /// test the cpp compiler + #[serde(skip_serializing_if = "Option::is_none")] + pub cpp: Option, + /// test dylib support, requires `run = true` + #[serde(skip_serializing_if = "Option::is_none")] + pub dylib: Option, + /// qemu runners that can be used with this target, space separated. + #[serde(skip_serializing_if = "Option::is_none")] + pub runners: Option, + /// if `true` test no std support as if std does exists. If `false` build https://github.com/rust-lang/compiler-builtins + #[serde(skip_serializing_if = "Option::is_none")] + pub std: Option, + #[serde(skip_serializing_if = "is_false", default)] + pub disabled: bool, +} + +pub fn is_false(b: &bool) -> bool { + !*b } -impl Matrix { +impl CiTarget { pub fn has_test(&self, target: &str) -> bool { // bare-metal targets don't have unittests right now - self.run != 0 && !target.contains("-none-") + self.run.unwrap_or_default() && !target.contains("-none-") } pub fn to_image_target(&self) -> crate::ImageTarget { crate::ImageTarget { - triplet: self.target.clone(), + name: self.target.clone(), sub: self.sub.clone(), } } - fn builds_image(&self) -> bool { + pub fn builds_image(&self) -> bool { self.os == "ubuntu-latest" } -} -static MATRIX: OnceCell> = OnceCell::new(); + pub fn platforms(&self) -> &[String] { + self.platforms.as_ref().unwrap_or(&DEFAULT_PLATFORMS_STRING) + } +} -pub fn get_matrix() -> &'static Vec { +/// Default platforms to build images with +/// +/// if this is changed, make sure to update documentation on [CiTarget::platforms] +pub static DEFAULT_PLATFORMS: &[cross::docker::ImagePlatform] = + &[cross::docker::ImagePlatform::DEFAULT]; + +pub static DEFAULT_PLATFORMS_STRING: Lazy> = Lazy::new(|| { + DEFAULT_PLATFORMS + .iter() + .map(|p| p.target.to_string()) + .collect() +}); + +static MATRIX: OnceCell> = OnceCell::new(); + +pub fn get_matrix() -> &'static Vec { + #[derive(Deserialize)] + struct Targets { + target: Vec, + } MATRIX .get_or_try_init::<_, eyre::Report>(|| { - let workflow: Workflow = serde_yaml::from_str(WORKFLOW)?; - let matrix = &workflow.jobs.generate_matrix.steps[0].env.matrix; - serde_yaml::from_str(matrix).map_err(Into::into) + let targets: Targets = toml::from_str(std::str::from_utf8(&std::fs::read( + get_cargo_workspace().join("targets.toml"), + )?)?)?; + Ok(targets.target) }) .unwrap() } +pub fn with_section_reports( + origin: eyre::Report, + iter: impl IntoIterator, +) -> eyre::Report { + use color_eyre::{Section as _, SectionExt as _}; + iter.into_iter().fold(origin, |report, e| { + report.section(format!("{e:?}").header("Error:")) + }) +} + pub fn format_repo(registry: &str, repository: &str) -> String { let mut output = String::new(); if !repository.is_empty() { @@ -92,7 +151,7 @@ pub fn pull_image( image: &str, msg_info: &mut MessageInfo, ) -> cross::Result<()> { - let mut command = docker::subcommand(engine, "pull"); + let mut command = engine.subcommand("pull"); command.arg(image); let out = command.run_and_get_output(msg_info)?; command.status_result(msg_info, out.status, Some(&out))?; @@ -101,24 +160,20 @@ pub fn pull_image( #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] pub struct ImageTarget { - pub triplet: String, + pub name: String, pub sub: Option, } impl ImageTarget { pub fn image_name(&self, repository: &str, tag: &str) -> String { - if let Some(sub) = &self.sub { - format!("{repository}/{}:{tag}-{sub}", self.triplet) - } else { - format!("{repository}/{}:{tag}", self.triplet) - } + cross::docker::image_name(&self.name, self.sub.as_deref(), repository, tag) } pub fn alt(&self) -> String { if let Some(sub) = &self.sub { - format!("{}:{sub}", self.triplet,) + format!("{}:{sub}", self.name) } else { - self.triplet.to_string() + self.name.to_string() } } @@ -127,17 +182,43 @@ impl ImageTarget { let matrix = get_matrix(); matrix .iter() - .any(|m| m.builds_image() && m.target == self.triplet && m.sub == self.sub) + .any(|m| m.builds_image() && m.target == self.name && m.sub == self.sub) } - /// Determine if this target uses the default test script - pub fn is_default_test_image(&self) -> bool { - self.triplet != "cross" + /// Determine if this target is a "normal" target for a triplet + pub fn is_standard_target_image(&self) -> bool { + let matrix = get_matrix(); + + !matrix + .iter() + .filter(|m| m.special) + .any(|m| m.target == self.name) + && self.has_ci_image() + } + + // this exists solely for zig, since we also want it as a provided target. + /// Determine if this target has a toolchain image + pub fn is_toolchain_image(&self) -> bool { + !matches!(self.name.as_ref(), "cross") && self.has_ci_image() } /// Determine if this target needs to interact with the project root. pub fn needs_workspace_root_context(&self) -> bool { - self.triplet == "cross" + self.name == "cross" + } + + pub fn is_armv6(&self) -> bool { + matches!( + self.name.as_str(), + "arm-unknown-linux-gnueabi" | "arm-unknown-linux-musleabi" + ) + } + + pub fn is_armv7(&self) -> bool { + matches!( + self.name.as_str(), + "armv7-unknown-linux-gnueabihf" | "armv7-unknown-linux-musleabihf" + ) } } @@ -145,26 +226,32 @@ impl std::str::FromStr for ImageTarget { type Err = std::convert::Infallible; fn from_str(s: &str) -> Result { - if let Some((target, sub)) = s.split_once('.') { - Ok(ImageTarget { - triplet: target.to_string(), - sub: Some(sub.to_string()), - }) - } else { - Ok(ImageTarget { - triplet: s.to_string(), - sub: None, - }) + // we designate certain targets like `x86_64-unknown-linux-gnu.centos`, + // where `centos` is a subtype of `x86_64-unknown-linux-gnu`. however, + // LLVM triples can also contain `.` characters, such as with + // `thumbv8m.main-none-eabihf`, so we make sure it's only at the end. + if let Some((target, sub)) = s.rsplit_once('.') { + if sub.chars().all(|x| char::is_ascii_alphabetic(&x)) { + return Ok(ImageTarget { + name: target.to_string(), + sub: Some(sub.to_string()), + }); + } } + + Ok(ImageTarget { + name: s.to_string(), + sub: None, + }) } } impl std::fmt::Display for ImageTarget { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(sub) = &self.sub { - write!(f, "{}.{sub}", self.triplet,) + write!(f, "{}.{sub}", self.name) } else { - write!(f, "{}", self.triplet) + write!(f, "{}", self.name) } } } @@ -174,7 +261,6 @@ pub fn has_nightly(msg_info: &mut MessageInfo) -> cross::Result { .arg("+nightly") .run_and_get_output(msg_info) .map(|o| o.status.success()) - .map_err(Into::into) } pub fn get_channel_prefer_nightly<'a>( @@ -191,9 +277,12 @@ pub fn get_channel_prefer_nightly<'a>( } pub fn cargo(channel: Option<&str>) -> Command { - let mut command = cross::cargo_command(); + let mut command; if let Some(channel) = channel { - command.arg(&format!("+{channel}")); + command = Command::new("rustup"); + command.args(["run", channel, "cargo"]); + } else { + command = cross::cargo_command(); } command } @@ -207,23 +296,33 @@ pub fn project_dir(msg_info: &mut MessageInfo) -> cross::Result { Ok(cargo_metadata(msg_info)?.workspace_root) } +macro_rules! gha_output { + ($fmt:literal$(, $args:expr)* $(,)?) => { + #[cfg(not(test))] + println!($fmt $(, $args)*); + #[cfg(test)] + eprintln!($fmt $(,$args)*); + }; +} + // note: for GHA actions we need to output these tags no matter the verbosity level pub fn gha_print(content: &str) { - println!("{}", content) + gha_output!("{}", content); } // note: for GHA actions we need to output these tags no matter the verbosity level pub fn gha_error(content: &str) { - println!("::error {}", content) + gha_output!("::error {}", content); } #[track_caller] -pub fn gha_output(tag: &str, content: &str) { +pub fn gha_output(tag: &str, content: &str) -> cross::Result<()> { if content.contains('\n') { // https://github.com/actions/toolkit/issues/403 - panic!("output `{tag}` contains newlines, consider serializing with json and deserializing in gha with fromJSON()") + eyre::bail!("output `{tag}` contains newlines, consider serializing with json and deserializing in gha with fromJSON()"); } - println!("::set-output name={tag}::{}", content) + write_to_gha_env_file("GITHUB_OUTPUT", &format!("{tag}={content}"))?; + Ok(()) } pub fn read_dockerfiles(msg_info: &mut MessageInfo) -> cross::Result> { @@ -243,6 +342,33 @@ pub fn read_dockerfiles(msg_info: &mut MessageInfo) -> cross::Result cross::Result<()> { + let mut file = fs::OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(path)?; + writeln!(file, "{}", contents)?; + Ok(()) +} + +// https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#environment-files +pub fn write_to_gha_env_file(env_name: &str, contents: &str) -> cross::Result<()> { + eprintln!("{contents}"); + let path = if let Ok(path) = env::var(env_name) { + PathBuf::from(path) + } else { + eyre::ensure!( + env::var("GITHUB_ACTIONS").is_err(), + "expected GHA envfile to exist" + ); + return Ok(()); + }; + let mut file = fs::OpenOptions::new().append(true).open(path)?; + writeln!(file, "{}", contents)?; + Ok(()) +} + #[cfg(test)] mod tests { use super::*; @@ -250,11 +376,45 @@ mod tests { use cross::shell::Verbosity; use std::collections::BTreeMap; + #[test] + fn test_parse_image_target() { + assert_eq!( + ImageTarget { + name: "x86_64-unknown-linux-gnu".to_owned(), + sub: None, + }, + "x86_64-unknown-linux-gnu".parse().unwrap() + ); + assert_eq!( + ImageTarget { + name: "x86_64-unknown-linux-gnu".to_owned(), + sub: Some("centos".to_owned()), + }, + "x86_64-unknown-linux-gnu.centos".parse().unwrap() + ); + assert_eq!( + ImageTarget { + name: "thumbv8m.main-none-eabihf".to_owned(), + sub: None, + }, + "thumbv8m.main-none-eabihf".parse().unwrap() + ); + assert_eq!( + ImageTarget { + name: "thumbv8m.main-unknown-linux-gnueabihf".to_owned(), + sub: Some("alpine".to_owned()), + }, + "thumbv8m.main-unknown-linux-gnueabihf.alpine" + .parse() + .unwrap() + ); + } + #[test] fn check_ubuntu_base() -> cross::Result<()> { // count all the entries of FROM for our images let mut counts = BTreeMap::new(); - let mut msg_info = Verbosity::Verbose.into(); + let mut msg_info = Verbosity::Verbose(2).into(); let dockerfiles = read_dockerfiles(&mut msg_info)?; for (path, dockerfile) in dockerfiles { let lines: Vec<&str> = dockerfile.lines().collect(); @@ -289,13 +449,3 @@ mod tests { } } } - -pub fn write_to_string(path: &Path, contents: &str) -> cross::Result<()> { - let mut file = fs::OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(path)?; - writeln!(file, "{}", contents)?; - Ok(()) -}