This repository has been archived by the owner on Oct 22, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 55
/
Jenkinsfile
664 lines (601 loc) · 31.4 KB
/
Jenkinsfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
pipeline {
options {
timestamps()
}
agent {
label "pmem-csi"
}
environment {
/*
Change this into "true" to enable capturing the journalctl
output of the build host and each VM, either by editing the
Jenkinsfile in a PR or by logging into Jenkins and editing
the pipeline before running it again.
*/
LOGGING_JOURNALCTL = "false"
/*
Delay in seconds between dumping system statistics.
*/
LOGGING_SAMPLING_DELAY = "infinity"
/*
Pod names in the kube-system namespace for which
log output is to be captured. Empty by default,
valid values:
etcd kube-apiserver kube-controller-manager kube-scheduler
*/
LOGGING_PODS = " " // the space is intentional, otherwise ${env.LOGGING_PODS} expands to null below
/*
For each major Kubernetes release we need one version of Clear Linux
which had that release. Installing different Kubernetes releases
on the latest Clear Linux is not supported because we always
use the Clear Linux kubelet, and a more recent kubelet than
the control plane is unsupported.
*/
CLEAR_LINUX_VERSION_1_17 = "32690" // current release at the moment
CLEAR_LINUX_VERSION_1_16 = "31760"
CLEAR_LINUX_VERSION_1_15 = "31070"
REGISTRY_NAME = "cloud-native-image-registry.westus.cloudapp.azure.com"
// Per-branch build environment, marked as "do not promote to public registry".
// Set below via a script, must *not* be set here as it can't be overwritten.
// BUILD_IMAGE = ""
// A running container based on BUILD_IMAGE, with volumes for everything that we
// need from the build host.
BUILD_CONTAINER = "builder"
// Tag or branch name that is getting built, depending on the job.
// Set below via a script, must *not* be set here as it can't be overwritten.
// BUILD_TARGET = ""
// CACHEBUST is passed when building images to ensure that the base layer gets
// updated when building releases.
// CACHEBUST = ""
// This image is pulled at the beginning and used as cache.
// TODO: Here we use "canary" which is correct for the "devel" branch, but other
// branches may need something else to get better caching.
PMEM_CSI_IMAGE = "${env.REGISTRY_NAME}/pmem-csi-driver:canary"
// A file stored on a sufficiently large tmpfs for use as etcd volume
// and its size. It has to be inside the data directory of the master node.
CLUSTER = "govm"
TEST_ETCD_TMPFS = "${WORKSPACE}/_work/${env.CLUSTER}/data/pmem-csi-${env.CLUSTER}-master/etcd-tmpfs"
TEST_ETCD_VOLUME = "${env.TEST_ETCD_TMPFS}/etcd-volume"
TEST_ETCD_VOLUME_SIZE = "1073741824" // 1GB
// Tests that will get skipped when collecting coverage information.
//
// The operator itself installs without enabling coverage collection,
// so running those tests doesn't help us. The relevant test is
// "operator API".
//
// Testing with OLM doesn't add much additional coverage.
COVERAGE_SKIP = "[email protected]@Top.Level..olm"
}
stages {
stage('Create build environment') {
options {
timeout(time: 60, unit: "MINUTES")
}
steps {
SetupHost()
withDockerRegistry([ credentialsId: "${env.DOCKER_REGISTRY}", url: "https://${REGISTRY_NAME}" ]) {
script {
env.CACHEBUST = ""
// Despite its name, GIT_LOCAL_BRANCH contains the tag name when building a tag.
// At some point it also contained the branch name when building
// a branch, but not anymore, therefore we fall back to BRANCH_NAME
// if unset. Even that isn't set in non-multibranch jobs
// (https://issues.jenkins-ci.org/browse/JENKINS-47226), but at least
// then we have GIT_BRANCH.
if (env.GIT_LOCAL_BRANCH != null) {
env.BUILD_TARGET = env.GIT_LOCAL_BRANCH
env.CACHEBUST = env.GIT_LOCAL_BRANCH
} else if ( env.BRANCH_NAME != null ) {
env.BUILD_TARGET = env.BRANCH_NAME
} else {
env.BUILD_TARGET = env.GIT_BRANCH - 'origin/' // Strip prefix.
}
if (env.CHANGE_ID != null) {
env.BUILD_IMAGE = "${env.REGISTRY_NAME}/pmem-clearlinux-builder:${env.CHANGE_TARGET}-rejected"
// Pull previous image and use it as cache (https://andrewlock.net/caching-docker-layers-on-serverless-build-hosts-with-multi-stage-builds---target,-and---cache-from/).
sh ( script: "docker image pull ${env.BUILD_IMAGE} || true")
sh ( script: "docker image pull ${env.PMEM_CSI_IMAGE} || true")
} else {
env.BUILD_IMAGE = "${env.REGISTRY_NAME}/pmem-clearlinux-builder:${env.BRANCH_NAME}-rejected"
}
}
sh "env; echo Building BUILD_IMAGE=${env.BUILD_IMAGE} for BUILD_TARGET=${env.BUILD_TARGET}, CHANGE_ID=${env.CHANGE_ID}, CACHEBUST=${env.CACHEBUST}."
sh "docker build --cache-from ${env.BUILD_IMAGE} --label cachebust=${env.CACHEBUST} --target build --build-arg CACHEBUST=${env.CACHEBUST} -t ${env.BUILD_IMAGE} ."
PrepareEnv()
}
}
}
stage('docsite') {
steps {
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} env GITHUB_SHA=${GIT_COMMIT} GITHUB_REPOSITORY=${SourceRepo()} make vhtml"
publishHTML([allowMissing: false, alwaysLinkToLastBuild: false, keepAll: false, reportDir: '_output/html', reportFiles: 'index.html', reportName: 'Doc Site', reportTitles: ''])
}
}
stage('make test') {
options {
timeout(time: 40, unit: "MINUTES")
}
steps {
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} make test"
}
}
stage('Build test image') {
options {
timeout(time: 60, unit: "MINUTES")
}
steps {
// This builds images for REGISTRY_NAME with the version automatically determined by
// the make rules.
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} make build-images CACHEBUST=${env.CACHEBUST}"
// For testing we have to have those same images also in a registry. Tag and push for
// localhost, which is the default test registry.
sh "imageversion=\$(${RunInBuilder()} ${env.BUILD_CONTAINER} make print-image-version) && \
for suffix in '' '-test'; do \
docker tag ${env.REGISTRY_NAME}/pmem-csi-driver\$suffix:\$imageversion localhost:5000/pmem-csi-driver\$suffix:\$imageversion && \
docker push localhost:5000/pmem-csi-driver\$suffix:\$imageversion; \
done"
}
}
// In order to enable running on additional Jenkins workers in parallel, we
// need to save and stash the images, then (if needed for a new worker) restore
// the build environment.
//
// lz4 is used because compression with gzip slowed down creating the archive too much.
//
// Alternatively, we could transmit images through the shared registry, but then would
// need to solve assigning a per-job tag and garbage collection of those images.
stage('Stash images') {
steps {
sh "imageversion=\$(${RunInBuilder()} ${env.BUILD_CONTAINER} make print-image-version) && \
docker save localhost:5000/pmem-csi-driver:\$imageversion \
localhost:5000/pmem-csi-driver-test:\$imageversion \
${env.BUILD_IMAGE} | \
lz4 > _work/images.tar.lz4 && \
ls -l -h _work/images.tar.lz4"
stash includes: '_work/images.tar.lz4', name: 'images'
}
}
// Some stages are skipped entirely when testing PRs, the
// others skip certain tests in that case:
// - production deployment is tested on the oldest supported Kubernetes
// (less tests, runs faster)
// - testing deployment is tested on the newest supported Kubernetes
// (more tests, runs longer, thus gets to use the existing worker)
stage('Testing') {
parallel {
stage('1.25') {
steps {
// Skip production, i.e. run testing.
TestInVM("", "", "fedora", "", "1.25", "Top.Level..[[:alpha:]]*-production[[:space:]]", "")
}
}
// When adding or removing coverage workers, update the "Code Coverage" step below!
stage('coverage-1.25') {
when {
beforeAgent true
not { changeRequest() }
}
agent {
label "pmem-csi"
}
steps {
TestInVM("fedora-coverage-1.25", "coverage-", "fedora", "", "1.25", "", "${env.COVERAGE_SKIP}")
}
}
// All others set up their own worker.
stage('1.24') {
when {
beforeAgent true
not { changeRequest() }
}
agent {
label "pmem-csi"
}
steps {
TestInVM("fedora-1.24", "", "fedora", "", "1.24", "", "")
}
}
stage('1.23') {
when {
beforeAgent true
not { changeRequest() }
}
agent {
label "pmem-csi"
}
steps {
TestInVM("fedora-1.23", "", "fedora", "", "1.23", "", "")
}
}
stage('1.22') {
when {
beforeAgent true
not { changeRequest() }
}
agent {
label "pmem-csi"
}
steps {
TestInVM("fedora-1.22", "", "fedora", "", "1.22", "", "")
}
}
stage('1.21') {
agent {
label "pmem-csi"
}
steps {
// Skip testing, i.e. run production.
TestInVM("fedora-1.21", "", "fedora", "", "1.21", "Top.Level..[[:alpha:]]*-testing[[:space:]]", "")
}
}
stage('coverage-1.21') {
when {
beforeAgent true
not { changeRequest() }
}
agent {
label "pmem-csi"
}
steps {
TestInVM("fedora-coverage-1.21", "coverage-", "fedora", "", "1.21", "", "${env.COVERAGE_SKIP}")
}
}
}
}
// This doesn't do anything. It's just serves as a reminder that "unstable"
// test steps are not the same as "successful". We had those for a while when
// accidentally ignoring the "make test_e2e" return code.
stage('Testing succeeded') {
steps {
echo "Testing succeeded."
}
}
stage('Push new release') {
when {
environment name: 'JOB_BASE_NAME', value: 'pmem-csi-release'
}
steps{
sshagent(['9b2359bb-540b-4df3-a4b7-d304a426b2db']) {
// We build a branch, but have it checked out by commit (detached head).
// Therefore we have to specify the branch name explicitly when pushing.
sh "git push origin --follow-tags HEAD:${env.BUILD_TARGET}"
}
}
}
stage('Update master branch') {
// This stage runs each time "devel" is rebuilt after a merge.
when {
environment name: 'BUILD_TARGET', value: 'devel'
environment name: 'JOB_NAME', value: 'pmem-csi/devel'
}
steps{
sshagent(['9b2359bb-540b-4df3-a4b7-d304a426b2db']) {
// All tests have passed on the "devel" branch, we can now fast-forward "master" to it.
sh '''
head=$(git rev-parse HEAD) &&
git diff &&
git reset --hard &&
git fetch origin master &&
git checkout FETCH_HEAD &&
git merge --ff-only $head &&
git push origin HEAD:master
'''
}
}
}
// Pushing images uses the DOCKER_CONFIG set up inside the build container earlier.
stage('Push images') {
when {
not { changeRequest() }
not { environment name: 'JOB_BASE_NAME', value: 'pmem-csi-release' } // New release will be built and pushed normally.
}
steps {
// Push PMEM-CSI images without rebuilding them.
//
// When building a tag, we expect the code to contain that version as image version.
// When building a branch, we expect "canary" for the "devel" branch and (currently) don't publish
// canary images for other branches.
sh "imageversion=\$(${RunInBuilder()} ${env.BUILD_CONTAINER} make print-image-version) && \
expectedversion=\$(echo '${env.BUILD_TARGET}' | sed -e 's/devel/canary/') && \
if [ \"\$imageversion\" = \"\$expectedversion\" ] ; then \
${RunInBuilder()} ${env.BUILD_CONTAINER} make push-images CACHEBUST=${env.CACHEBUST} PUSH_IMAGE_DEP=; \
else \
echo \"Skipping the pushing of PMEM-CSI driver images with version \$imageversion because this build is for ${env.BUILD_TARGET}.\"; \
fi"
// Also push the build image, for later reuse in PR jobs.
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} docker image push ${env.BUILD_IMAGE}"
}
}
// Merge and publish coverage data.
stage('Code Coverage') {
when {
not { changeRequest() }
}
steps {
// Restore <cluster>-coverage.out files.
unstash '1.25-coverage'
unstash '1.21-coverage'
// Merge and convert to Cobertura XML.
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} make _work/gocovmerge _work/gocover-cobertura"
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} _work/gocovmerge *-coverage.out >coverage.out"
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} go tool cover -func coverage.out"
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} _work/gocover-cobertura <coverage.out >coverage.xml"
// Simplify relative paths ("github.com/intel/pmem-csi/...").
// To view source code (https://stackoverflow.com/a/59951809):
// - Jenkins users must be logged in.
// - A job must complete successfully.
sh "sed -i -e 's;filename=\"github.com/intel/pmem-csi/;filename=\";g' coverage.xml"
// The relationship between "Code Coverage API Plugin" and "Cobertura" plugin is not clear
// (https://stackoverflow.com/questions/71133394/what-is-the-relationship-between-the-jenkins-cobertura-and-code-coverage-api).
//
// With just "Code Coverage API Plugin" installed, this here works, but doesn't show source code
// (old UI?):
// publishCoverage adapters: [cobertura(path: 'coverage.xml')], tag: 't'
// When both are installed, this here works (note the different coberura parameter!)
// and shows source code.
publishCoverage adapters: [cobertura(coberturaReportFile: 'coverage.xml')]
// There is also a "coberturaAdapter". That one hasn't been tested.
}
}
}
}
/*
A command line for running some command inside the build container with:
- common Makefile values (cachebust, cache populated from images if available) in environment
- source in current directory
- GOPATH alongside it
- HOME above it
- same uid as on the host, gid same as for Docker socket
Using the same uid/gid and auxiliary groups would be nicer, but "docker exec" does not
support --group-add.
A function is used because a variable, even one which uses a closure with lazy evaluation,
didn't actually result in a string with all variables replaced by the current values.
Do not use lazy evaluation inside the function, that caused steps which use
this function to get skipped silently?!
*/
String RunInBuilder() {
"\
docker exec \
-i \
-e CACHEBUST=${env.CACHEBUST} \
-e 'BUILD_ARGS=--cache-from ${env.BUILD_IMAGE} --cache-from ${env.PMEM_CSI_IMAGE}' \
-e DOCKER_CONFIG=${WORKSPACE}/_work/docker-config \
-e REGISTRY_NAME=${env.REGISTRY_NAME} \
-e HOME=${WORKSPACE}/.. \
-e GOPATH=${WORKSPACE}/../gopath \
-e USER=`id -nu` \
--user `id -u`:`stat --format %g /var/run/docker.sock` \
--workdir ${WORKSPACE} \
"
}
/*
Returns <owner>/<repo> from which the code was built.
*/
String SourceRepo() {
// Content of CHANGE_FORK varies, see https://issues.jenkins-ci.org/browse/JENKINS-58450.
(! env.CHANGE_FORK) ?
"github.com/intel/pmem-csi" :
env.CHANGE_FORK.matches('.*/.*') ?
env.CHANGE_FORK :
env.CHANGE_FORK + '/pmem-csi'
}
/*
Dump and/or change the configuration of the host on which the agent runs.
*/
void SetupHost() {
sh '''
hostname
docker version
git version
free
command -v top >/dev/null 2>&1 ||
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get install procps
else
sudo dnf -y install procps
fi
head -n 30 /proc/cpuinfo; echo ...; tail -n 30 /proc/cpuinfo
git remote set-url origin [email protected]:intel/pmem-csi.git
git config user.name 'Intel Kubernetes CI/CD Bot'
git config user.email '[email protected]'
'''
// known_hosts entry created and verified as described in https://serverfault.com/questions/856194/securely-add-a-host-e-g-github-to-the-ssh-known-hosts-file
sh "mkdir -p ~/.ssh && echo 'github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==' >>~/.ssh/known_hosts && chmod -R go-rxw ~/.ssh"
}
/*
Set up build container and Docker registry.
Must be called after the build image is ready.
*/
void PrepareEnv() {
// Create a tmpfs for use as backing store for a large file that will be passed
// into QEMU for storing the etcd database.
sh """
mkdir -p '${env.TEST_ETCD_TMPFS}'
sudo mount -osize=${env.TEST_ETCD_VOLUME_SIZE} -t tmpfs none '${env.TEST_ETCD_TMPFS}'
sudo truncate --size=${env.TEST_ETCD_VOLUME_SIZE} '${env.TEST_ETCD_VOLUME}'
"""
// Create a running container (https://stackoverflow.com/a/38308399). We keep it running
// and just "docker exec" commands in it. withDockerRegistry creates the DOCKER_CONFIG directory
// and deletes it when done, so we have to make a copy for later use inside the container.
withDockerRegistry([ credentialsId: "${env.DOCKER_REGISTRY}", url: "https://${REGISTRY_NAME}" ]) {
sh "mkdir -p _work"
sh "cp -a $DOCKER_CONFIG _work/docker-config"
sh "docker create --name=${env.BUILD_CONTAINER} \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume /usr/bin/docker:/usr/bin/docker \
--volume ${WORKSPACE}/..:${WORKSPACE}/.. \
${env.BUILD_IMAGE} \
sleep infinity"
}
sh "docker start ${env.BUILD_CONTAINER} && \
timeout=0; \
while [ \$(docker inspect --format '{{.State.Status}}' ${env.BUILD_CONTAINER}) != running ]; do \
docker ps; \
if [ \$timeout -ge 60 ]; then \
docker inspect ${env.BUILD_CONTAINER}; \
echo 'ERROR: ${env.BUILD_CONTAINER} container still not running'; \
exit 1; \
fi; \
sleep 10; \
timeout=\$((timeout + 10)); \
done"
// Make /usr/local/bin writable for all users. Used to install kubectl.
sh "docker exec ${env.BUILD_CONTAINER} sh -c 'mkdir -p /usr/local/bin && chmod a+wx /usr/local/bin'"
// Some tools expect a user entry for the jenkins user (like govm?)
sh "echo jenkins:x:`id -u`:0:Jenkins:${WORKSPACE}/..:/bin/bash | docker exec -i ${env.BUILD_CONTAINER} tee --append /etc/passwd >/dev/null"
sh "echo 'jenkins:*:0:0:99999:0:::' | docker exec -i ${env.BUILD_CONTAINER} tee --append /etc/shadow >/dev/null"
// Verify that docker works in the updated image.
sh "${RunInBuilder()} ${env.BUILD_CONTAINER} docker ps"
// Run a per-test registry on the build host. This is where we
// will push images for use by the cluster during testing.
sh "docker run -d -p 5000:5000 --restart=always --name registry registry:2"
}
/*
Must be called on additional agents to replicate the environment on the main agent.
*/
void RestoreEnv() {
SetupHost()
// Get images, ready for use and/or pushing to localhost:5000.
unstash 'images'
sh 'lz4cat _work/images.tar.lz4 | docker load'
// Set up build container and registry.
PrepareEnv()
// Now populate the registry like we did on the master node.
sh "imageversion=\$(${RunInBuilder()} ${env.BUILD_CONTAINER} make print-image-version) && \
for suffix in '' '-test'; do \
docker push localhost:5000/pmem-csi-driver\$suffix:\$imageversion; \
done"
}
void TestInVM(worker, coverage, distro, distroVersion, kubernetesVersion, skipIfPR, skipAlways) {
if (worker) {
RestoreEnv()
}
if (coverage) {
sh "${RunInBuilder()} -e CLUSTER=${env.CLUSTER} ${env.BUILD_CONTAINER} make kustomize KUSTOMIZE_WITH_COVERAGE=true"
}
try { timeout(unit: "HOURS", time: TestTimeoutHours()) {
/*
We have to run "make start" in the current directory
because the QEMU instances that it starts under Docker
run outside of the container and thus paths used inside
the container have to be the same as outside.
For "make test_e2e" we then have to switch into the
GOPATH. Once we can build outside of the GOPATH, we can
simplify that to build inside one directory.
This spawns some long running processes. Those do not killed when the
main process returns when using "docker exec", so we should better clean
up ourselves. "make stop" was hanging and waiting for these processes to
exit even though there were from a different "docker exec" invocation.
The default QEMU cpu enables nested virtualization with "-cpu host".
However, that fails on some Azure machines:
`qemu-system-x86_64: error: failed to set MSR 0x48b to 0x1582e00000000`,
https://www.mail-archive.com/[email protected]/msg665051.html,
so for now we disable VMX with -vmx.
*/
sh "#!/bin/bash\n \
echo Note: job output is filtered, see joblog-${BUILD_TAG}-test-${coverage}${kubernetesVersion}.log artifact for full output. && \
set -o pipefail && \
( \
loggers=; \
atexit () { set -x; kill \$loggers ||true; killall sleep ||true; }; \
trap atexit EXIT; \
mkdir -p build/reports && \
if ${env.LOGGING_JOURNALCTL}; then sudo journalctl -f; fi & \
( set +x; while sleep ${env.LOGGING_SAMPLING_DELAY}; do top -i -b -n 1 -w 120; df -h; done ) & \
loggers=\"\$loggers \$!\" && \
${RunInBuilder()} \
-e CLUSTER=${env.CLUSTER} \
-e TEST_LOCAL_REGISTRY=\$(ip addr show dev docker0 | grep ' inet ' | sed -e 's/.* inet //' -e 's;/.*;;'):5000 \
-e TEST_CHECK_SIGNED_FILES=false \
-e TEST_CHECK_KVM=false \
-e TEST_QEMU_CPU=host,-vmx \
-e TEST_DISTRO=${distro} \
-e TEST_DISTRO_VERSION=${distroVersion} \
-e TEST_KUBERNETES_VERSION=${kubernetesVersion} \
-e TEST_ETCD_VOLUME=${env.TEST_ETCD_VOLUME} \
${env.BUILD_CONTAINER} \
bash -c 'set -x; \
loggers=; \
atexit () { set -x; kill \$loggers ||true; }; \
trap atexit EXIT; \
make stop && \
make start && \
_work/${env.CLUSTER}/ssh.0 kubectl get pods --all-namespaces -o wide && \
for pod in ${env.LOGGING_PODS}; do \
_work/${env.CLUSTER}/ssh.0 kubectl logs -f -n kube-system \$pod-pmem-csi-${env.CLUSTER}-master | sed -e \"s/^/\$pod: /\" & \
loggers=\"\$loggers \$!\"; \
done && \
_work/${env.CLUSTER}/ssh.0 tar -C / -cf - usr/bin/kubectl | tar -C /usr/local/bin --strip-components=2 -xf - && \
for ssh in \$(ls _work/${env.CLUSTER}/ssh.[0-9]); do \
hostname=\$(\$ssh hostname) && \
if ${env.LOGGING_JOURNALCTL}; then \
( set +x; while true; do \$ssh journalctl -f; done ) & \
loggers=\"\$loggers \$!\"; \
fi; \
( set +x; \
while sleep ${env.LOGGING_SAMPLING_DELAY}; do \
\$ssh top -i -b -n 1 -w 120 2>&1; \
done | sed -e \"s/^/\$hostname: /\" ) & \
loggers=\"\$loggers \$!\"; \
done && \
testrun=\$(echo '${distro}-${distroVersion}-${coverage}${kubernetesVersion}' | sed -e s/--*/-/g | tr . _ ) && \
make test_e2e TEST_E2E_REPORT_DIR=${WORKSPACE}/build/reports.tmp/\$testrun \
TEST_E2E_SKIP=${skipAlways}@\$(if [ \"${env.CHANGE_ID}\" ] && [ \"${env.CHANGE_ID}\" != null ]; then echo \\\\[Slow\\\\]@${skipIfPR}; fi) \
TEST_E2E_TIMEOUT=${TestTimeoutHours()-1}h \
TEST_E2E_ARGS=-ginkgo.no-color \
') 2>&1 | tee joblog-${BUILD_TAG}-test-${coverage}${kubernetesVersion}.log | grep --line-buffered -E -e 'checking for test|Passed|FAIL:|^ERROR' \
"
} } finally {
echo "Writing cluster state and kubelet logs into files."
sh "_work/${env.CLUSTER}/ssh.0 kubectl get nodes -o wide > joblog-${BUILD_TAG}-${kubernetesVersion}-nodestate.log"
sh "_work/${env.CLUSTER}/ssh.0 kubectl get pods --all-namespaces -o wide > joblog-${BUILD_TAG}-${kubernetesVersion}-podstate.log"
sh "for cmd in `ls _work/${env.CLUSTER}/ssh.*`; do suffix=`basename \$cmd | sed -e s/^ssh.//`; \$cmd sudo journalctl -u kubelet > joblog-${BUILD_TAG}-${kubernetesVersion}-kubelet.\${suffix}.log; \$cmd sudo journalctl > joblog-${BUILD_TAG}-${kubernetesVersion}-journal-\${suffix}.log; done"
// Each test run produces junit_*.xml files with testsuite name="PMEM E2E suite".
// To make test names unique in the Jenkins UI, we rename that test suite per run,
// mangle the <testcase name="..." classname="..."> such that
// Jenkins shows them group as <testrun>/[sanity|E2E]/<test case>,
// and place files where the 'junit' step above expects them.
//
// Example input and output (note that "gotests" only has two words in the name, not three,
// to prevent it from being listed under "direct-testing"):
//
// < <testcase name="direct-production E2E [Driver: direct-production-pmem-csi] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options" classname="PMEM E2E suite" time="0.021836673">
// > <testcase name="[Driver: direct-production-pmem-csi] [Testpattern: Dynamic PV (ntfs)][sig-windows] provisioning should provision storage with mount options" classname="fedora-1_16.direct-production.E2E" time="0.021836673">
//
// < <testcase name="direct-testing-gotests ./pkg/pmem-csi-driver" classname="PMEM E2E suite" time="69.389477842"></testcase>
// > <testcase name="./pkg/pmem-csi-driver" classname="fedora-1_16.direct-production-gotests" time="69.389477842"></testcase>
sh '''set -x
for i in build/reports.tmp/*/*.xml; do
if [ -f $i ]; then
testrun=$(basename $(dirname $i))
sed -e "s/PMEM E2E suite/$testrun/" \
-e 's/testcase name="\\([^ ]*\\) \\([^ ]*\\) \\(..*\\)" classname="\\([^"]*\\)"/testcase classname="\\4.\\1.\\2" name="\\3"/' \
-e 's/testcase name="\\([^ ]*\\) \\(..*\\)" classname="\\([^"]*\\)"/testcase classname="\\3.\\1" name="\\2"/' \
$i >build/reports/$testrun.xml
diff $i build/reports/$testrun.xml || true
fi
done'''
archiveArtifacts('**/joblog-*')
junit 'build/reports/**/*.xml'
if (coverage) {
// https://stackoverflow.com/questions/36918370/cobertura-code-coverage-report-for-jenkins-pipeline-jobs
// https://www.jenkins.io/doc/pipeline/steps/cobertura/
sh "${RunInBuilder()} -e CLUSTER=${env.CLUSTER} ${env.BUILD_CONTAINER} make _work/coverage/coverage.txt"
sh "cat _work/coverage/coverage.txt"
// https://plugins.jenkins.io/code-coverage-api/#plugin-content-reports-combining-support
// claims that different reports can be merged, but that didn't work in practice
// (two "coverage reports" listed in the job UI with the same URL and unmerged data from
// one worker). Therefore we stash the individual results and merge later.
sh "mv _work/coverage/coverage.out ${kubernetesVersion}-coverage.out"
stash includes: "${kubernetesVersion}-coverage.out", name: "${kubernetesVersion}-coverage"
}
}
}
int TestTimeoutHours() {
if ( env.CHANGE_ID && env.CHANGE_ID != "null" ) {
// Timeout for PRs is lower.
return 10;
} else {
return 20;
}
}