Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
orangepi-xunlong
GitHub Repository: orangepi-xunlong/orangepi-build
Path: blob/next/scripts/general.sh
22030 views
1
#!/bin/bash
2
#
3
# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com
4
#
5
# This file is licensed under the terms of the GNU General Public
6
# License version 2. This program is licensed "as is" without any
7
# warranty of any kind, whether express or implied.
8
9
10
# Functions:
11
# cleaning
12
# exit_with_error
13
# get_package_list_hash
14
# create_sources_list
15
# clean_up_git
16
# waiter_local_git
17
# fetch_from_repo
18
# improved_git
19
# display_alert
20
# fingerprint_image
21
# distro_menu
22
# addtorepo
23
# repo-remove-old-packages
24
# wait_for_package_manager
25
# install_pkg_deb
26
# prepare_host_basic
27
# prepare_host
28
# webseed
29
# download_and_verify
30
# show_developer_warning
31
# show_checklist_variables
32
33
34
# cleaning <target>
35
#
36
# target: what to clean
37
# "make" - "make clean" for selected kernel and u-boot
38
# "debs" - delete output/debs for board&branch
39
# "ubootdebs" - delete output/debs for uboot&board&branch
40
# "alldebs" - delete output/debs
41
# "cache" - delete output/cache
42
# "oldcache" - remove old output/cache
43
# "images" - delete output/images
44
# "sources" - delete output/sources
45
#
46
47
cleaning()
48
{
49
case $1 in
50
debs) # delete ${DEB_STORAGE} for current branch and family
51
if [[ -d "${DEB_STORAGE}" ]]; then
52
display_alert "Cleaning ${DEB_STORAGE} for" "$BOARD $BRANCH" "info"
53
# easier than dealing with variable expansion and escaping dashes in file names
54
find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete
55
find "${DEB_STORAGE}" \( -name "${CHOSEN_KERNEL}_*.deb" -o \
56
-name "orangepi-*.deb" -o \
57
-name "plymouth-theme-orangepi_*.deb" -o \
58
-name "${CHOSEN_KERNEL/image/dtb}_*.deb" -o \
59
-name "${CHOSEN_KERNEL/image/headers}_*.deb" -o \
60
-name "${CHOSEN_KERNEL/image/source}_*.deb" -o \
61
-name "${CHOSEN_KERNEL/image/firmware-image}_*.deb" \) -delete
62
[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/${CHOSEN_ROOTFS}"_*.deb
63
[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/orangepi-desktop-${RELEASE}"_*.deb
64
fi
65
;;
66
67
ubootdebs) # delete ${DEB_STORAGE} for uboot, current branch and family
68
if [[ -d "${DEB_STORAGE}" ]]; then
69
display_alert "Cleaning ${DEB_STORAGE} for u-boot" "$BOARD $BRANCH" "info"
70
# easier than dealing with variable expansion and escaping dashes in file names
71
find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete
72
fi
73
;;
74
75
extras) # delete ${DEB_STORAGE}/extra/$RELEASE for all architectures
76
if [[ -n $RELEASE && -d ${DEB_STORAGE}/extra/$RELEASE ]]; then
77
display_alert "Cleaning ${DEB_STORAGE}/extra for" "$RELEASE" "info"
78
rm -rf "${DEB_STORAGE}/extra/${RELEASE}"
79
fi
80
;;
81
82
alldebs) # delete output/debs
83
[[ -d "${DEB_STORAGE}" ]] && display_alert "Cleaning" "${DEB_STORAGE}" "info" && rm -rf "${DEB_STORAGE}"/*
84
;;
85
86
cache) # delete output/cache
87
[[ -d $EXTER/cache/rootfs ]] && display_alert "Cleaning" "rootfs cache (all)" "info" && find $EXTER/cache/rootfs -type f -delete
88
;;
89
90
images) # delete output/images
91
[[ -d "${DEST}"/images ]] && display_alert "Cleaning" "output/images" "info" && rm -rf "${DEST}"/images/*
92
;;
93
94
sources) # delete output/sources and output/buildpkg
95
[[ -d $EXTER/cache/sources ]] && display_alert "Cleaning" "sources" "info" && rm -rf $EXTER/cache/sources/* "${DEST}"/buildpkg/*
96
;;
97
98
oldcache) # remove old `cache/rootfs` except for the newest 8 files
99
if [[ -d $EXTER/cache/rootfs && $(ls -1 $EXTER/cache/rootfs/*.lz4 2> /dev/null | wc -l) -gt "${ROOTFS_CACHE_MAX}" ]]; then
100
display_alert "Cleaning" "rootfs cache (old)" "info"
101
(cd $EXTER/cache/rootfs; ls -t *.lz4 | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)
102
# Remove signatures if they are present. We use them for internal purpose
103
(cd $EXTER/cache/rootfs; ls -t *.asc | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)
104
fi
105
;;
106
esac
107
}
108
109
# exit_with_error <message> <highlight>
110
#
111
# a way to terminate build process
112
# with verbose error message
113
#
114
115
exit_with_error()
116
{
117
local _file
118
local _line=${BASH_LINENO[0]}
119
local _function=${FUNCNAME[1]}
120
local _description=$1
121
local _highlight=$2
122
_file=$(basename "${BASH_SOURCE[1]}")
123
local stacktrace="$(get_extension_hook_stracktrace "${BASH_SOURCE[*]}" "${BASH_LINENO[*]}")"
124
125
display_alert "ERROR in function $_function" "$stacktrace" "err"
126
display_alert "$_description" "$_highlight" "err"
127
display_alert "Process terminated" "" "info"
128
129
if [[ "${ERROR_DEBUG_SHELL}" == "yes" ]]; then
130
display_alert "MOUNT" "${MOUNT}" "err"
131
display_alert "SDCARD" "${SDCARD}" "err"
132
display_alert "Here's a shell." "debug it" "err"
133
bash < /dev/tty || true
134
fi
135
136
# TODO: execute run_after_build here?
137
overlayfs_wrapper "cleanup"
138
# unlock loop device access in case of starvation
139
exec {FD}>/var/lock/orangepi-debootstrap-losetup
140
flock -u "${FD}"
141
142
exit 255
143
}
144
145
# get_package_list_hash
146
#
147
# returns md5 hash for current package list and rootfs cache version
148
149
get_package_list_hash()
150
{
151
local package_arr exclude_arr
152
local list_content
153
read -ra package_arr <<< "${DEBOOTSTRAP_LIST} ${PACKAGE_LIST}"
154
read -ra exclude_arr <<< "${PACKAGE_LIST_EXCLUDE}"
155
( ( printf "%s\n" "${package_arr[@]}"; printf -- "-%s\n" "${exclude_arr[@]}" ) | sort -u; echo "${1}" ) \
156
| md5sum | cut -d' ' -f 1
157
}
158
159
# create_sources_list <release> <basedir>
160
#
161
# <release>: buster|bullseye|bookworm|bionic|focal|jammy|noble|hirsute|sid
162
# <basedir>: path to root directory
163
#
164
create_sources_list()
165
{
166
local release=$1
167
local basedir=$2
168
[[ -z $basedir ]] && exit_with_error "No basedir passed to create_sources_list"
169
170
case $release in
171
stretch|buster)
172
cat <<-EOF > "${basedir}"/etc/apt/sources.list
173
deb http://${DEBIAN_MIRROR} $release main contrib non-free
174
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free
175
176
deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
177
#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
178
179
deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
180
#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
181
182
deb http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free
183
#deb-src http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free
184
EOF
185
;;
186
187
bullseye)
188
cat <<-EOF > "${basedir}"/etc/apt/sources.list
189
deb https://${DEBIAN_MIRROR} $release main contrib non-free
190
#deb-src https://${DEBIAN_MIRROR} $release main contrib non-free
191
192
deb https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
193
#deb-src https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
194
195
deb https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
196
#deb-src https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
197
198
deb https://${DEBIAN_SECURTY} ${release}-security main contrib non-free
199
#deb-src https://${DEBIAN_SECURTY} ${release}-security main contrib non-free
200
EOF
201
;;
202
203
bookworm)
204
cat <<- EOF > "${basedir}"/etc/apt/sources.list
205
deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
206
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
207
208
deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware
209
#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware
210
211
deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware
212
#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware
213
214
deb http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware
215
#deb-src http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware
216
EOF
217
;;
218
219
sid) # sid is permanent unstable development and has no such thing as updates or security
220
cat <<- EOF > "${basedir}"/etc/apt/sources.list
221
deb https://snapshot.debian.org/archive/debian-ports/20221225T084846Z unstable main
222
#deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
223
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
224
225
#deb http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware
226
#deb-src http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware
227
EOF
228
;;
229
230
xenial|bionic|focal|hirsute|impish|jammy)
231
cat <<-EOF > "${basedir}"/etc/apt/sources.list
232
deb http://${UBUNTU_MIRROR} $release main restricted universe multiverse
233
#deb-src http://${UBUNTU_MIRROR} $release main restricted universe multiverse
234
235
deb http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse
236
#deb-src http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse
237
238
deb http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse
239
#deb-src http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse
240
241
deb http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse
242
#deb-src http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse
243
EOF
244
;;
245
246
noble)
247
distro="ubuntu"
248
# Drop deboostrap sources leftovers
249
rm -f "${basedir}/etc/apt/sources.list"
250
251
cat <<- EOF > "${basedir}/etc/apt/sources.list.d/${distro}.sources"
252
Types: deb
253
URIs: http://${UBUNTU_MIRROR}
254
Suites: ${release} ${release}-security ${release}-updates ${release}-backports
255
Components: main restricted universe multiverse
256
Signed-By: /usr/share/keyrings/ubuntu-archive-keyring.gpg
257
EOF
258
;;
259
260
raspi)
261
cat <<-EOF > "${basedir}"/etc/apt/sources.list
262
deb http://${DEBIAN_MIRROR} bullseye main contrib non-free
263
#deb-src http://${DEBIAN_MIRROR} bullseye main contrib non-free
264
265
deb http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free
266
#deb-src http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free
267
268
deb http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free
269
#deb-src http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free
270
271
deb http://${DEBIAN_SECURTY} bullseye-security main contrib non-free
272
#deb-src http://${DEBIAN_SECURTY} bullseye-security main contrib non-free
273
EOF
274
275
cat <<-EOF > "${basedir}"/etc/apt/sources.list.d/raspi.list
276
deb http://${RASPI_MIRROR} bullseye main
277
# Uncomment line below then 'apt-get update' to enable 'apt-get source'
278
#deb-src http://archive.raspberrypi.org/debian/ bullseye main
279
EOF
280
281
if [ -n "$APT_PROXY" ]; then
282
install -m 644 files/51cache "${APT_PROXY}/etc/apt/apt.conf.d/51cache"
283
sed "${basedir}/etc/apt/apt.conf.d/51cache" -i -e "s|APT_PROXY|${APT_PROXY}|"
284
else
285
rm -f "${basedir}/etc/apt/apt.conf.d/51cache"
286
fi
287
288
cat ${EXTER}/packages/raspi/stage0/00-configure-apt/files/raspberrypi.gpg.key | gpg --dearmor > "${basedir}/raspberrypi-archive-stable.gpg"
289
install -m 644 "${basedir}/raspberrypi-archive-stable.gpg" "${basedir}/etc/apt/trusted.gpg.d/"
290
;;
291
esac
292
293
# stage: add armbian repository and install key
294
#if [[ $DOWNLOAD_MIRROR == "china" ]]; then
295
# echo "deb https://mirrors.tuna.tsinghua.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
296
#elif [[ $DOWNLOAD_MIRROR == "bfsu" ]]; then
297
# echo "deb http://mirrors.bfsu.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
298
#else
299
# echo "deb http://"$([[ $BETA == yes ]] && echo "beta" || echo "apt" )".armbian.com $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
300
#fi
301
302
# replace local package server if defined. Suitable for development
303
#[[ -n $LOCAL_MIRROR ]] && echo "deb http://$LOCAL_MIRROR $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
304
305
#display_alert "Adding Armbian repository and authentication key" "/etc/apt/sources.list.d/armbian.list" "info"
306
#cp "${EXTER}"/config/armbian.key "${SDCARD}"
307
#chroot "${SDCARD}" /bin/bash -c "cat armbian.key | apt-key add - > /dev/null 2>&1"
308
#rm "${SDCARD}"/armbian.key
309
}
310
311
312
#
313
# This function retries Git operations to avoid failure in case remote is borked
314
# If the git team needs to call a remote server, use this function.
315
#
316
improved_git()
317
{
318
319
local realgit=$(command -v git)
320
local retries=3
321
local delay=10
322
local count=1
323
while [ $count -lt $retries ]; do
324
$realgit "$@"
325
if [[ $? -eq 0 || -f .git/index.lock ]]; then
326
retries=0
327
break
328
fi
329
let count=$count+1
330
sleep $delay
331
done
332
333
}
334
335
clean_up_git ()
336
{
337
local target_dir=$1
338
339
# Files that are not tracked by git and were added
340
# when the patch was applied must be removed.
341
git -C $target_dir clean -qdf
342
343
# Return the files that are tracked by git to the initial state.
344
git -C $target_dir checkout -qf HEAD
345
}
346
347
# used : waiter_local_git arg1='value' arg2:'value'
348
# waiter_local_git \
349
# url='https://github.com/megous/linux' \
350
# name='megous' \
351
# dir='linux-mainline/5.14' \
352
# branch='orange-pi-5.14' \
353
# obj=<tag|commit> or tag:$tag ...
354
# An optional parameter for switching to a git object such as a tag, commit,
355
# or a specific branch. The object must exist in the local repository.
356
# This optional parameter takes precedence. If it is specified, then
357
# the commit state corresponding to the specified git object will be extracted
358
# to the working directory. Otherwise, the commit corresponding to the top of
359
# the branch will be extracted.
360
# The settings for the kernel variables of the original kernel
361
# VAR_SHALLOW_ORIGINAL=var_origin_kernel must be in the main script
362
# before calling the function
363
waiter_local_git ()
364
{
365
for arg in $@;do
366
367
case $arg in
368
url=*|https://*|git://*) eval "local url=${arg/url=/}"
369
;;
370
dir=*|/*/*/*) eval "local dir=${arg/dir=/}"
371
;;
372
*=*|*:*) eval "local ${arg/:/=}"
373
;;
374
esac
375
376
done
377
378
# Required variables cannot be empty.
379
for var in url name dir branch; do
380
[ "${var#*=}" == "" ] && exit_with_error "Error in configuration"
381
done
382
383
local reachability
384
385
# The 'offline' variable must always be set to 'true' or 'false'
386
if [ "$OFFLINE_WORK" == "yes" ]; then
387
local offline=true
388
else
389
local offline=false
390
fi
391
392
local work_dir="$(realpath ${EXTER}/cache/sources)/$dir"
393
mkdir -p $work_dir
394
cd $work_dir || exit_with_error
395
396
display_alert "Checking git sources" "$dir $url$name/$branch" "info"
397
398
if [ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]; then
399
git init -q .
400
401
# Run in the sub shell to avoid mixing environment variables.
402
if [ -n "$VAR_SHALLOW_ORIGINAL" ]; then
403
(
404
$VAR_SHALLOW_ORIGINAL
405
406
display_alert "Add original git sources" "$dir $name/$branch" "info"
407
if [ "$(improved_git ls-remote -h $url $branch | \
408
awk -F'/' '{if (NR == 1) print $NF}')" != "$branch" ];then
409
display_alert "Bad $branch for $url in $VAR_SHALLOW_ORIGINAL"
410
exit 177
411
fi
412
413
git remote add -t $branch $name $url
414
415
# Handle an exception if the initial tag is the top of the branch
416
# As v5.16 == HEAD
417
if [ "${start_tag}.1" == "$(improved_git ls-remote -t $url ${start_tag}.1 | \
418
awk -F'/' '{ print $NF }')" ]
419
then
420
improved_git fetch --shallow-exclude=$start_tag $name
421
else
422
improved_git fetch --depth 1 $name
423
fi
424
improved_git fetch --deepen=1 $name
425
# For a shallow clone, this works quickly and saves space.
426
git gc
427
)
428
429
[ "$?" == "177" ] && exit
430
fi
431
fi
432
433
files_for_clean="$(git status -s | wc -l)"
434
if [ "$files_for_clean" != "0" ];then
435
display_alert " Cleaning .... " "$files_for_clean files"
436
clean_up_git $work_dir
437
fi
438
439
if [ "$name" != "$(git remote show | grep $name)" ];then
440
git remote add -t $branch $name $url
441
fi
442
443
if ! $offline; then
444
for t_name in $(git remote show);do
445
improved_git fetch $t_name
446
done
447
fi
448
449
# When switching, we use the concept of only "detached branch". Therefore,
450
# we extract the hash from the tag, the branch name, or from the hash itself.
451
# This serves as a check of the reachability of the extraction.
452
# We do not use variables that characterize the current state of the git,
453
# such as `HEAD` and `FETCH_HEAD`.
454
reachability=false
455
for var in obj tag commit branch;do
456
eval pval=\$$var
457
458
if [ -n "$pval" ] && [ "$pval" != *HEAD ]; then
459
case $var in
460
obj|tag|commit) obj=$pval ;;
461
branch) obj=${name}/$branch ;;
462
esac
463
464
if t_hash=$(git rev-parse $obj 2>/dev/null);then
465
reachability=true
466
break
467
else
468
display_alert "Variable $var=$obj unreachable for extraction"
469
fi
470
fi
471
done
472
473
if $reachability && [ "$t_hash" != "$(git rev-parse @ 2>/dev/null)" ];then
474
# Switch "detached branch" as hash
475
display_alert "Switch $obj = $t_hash"
476
git checkout -qf $t_hash
477
else
478
# the working directory corresponds to the target commit,
479
# nothing needs to be done
480
display_alert "Up to date"
481
fi
482
}
483
484
# fetch_from_repo <url> <directory> <ref> <ref_subdir>
485
# <url>: remote repository URL
486
# <directory>: local directory; subdir for branch/tag will be created
487
# <ref>:
488
# branch:name
489
# tag:name
490
# head(*)
491
# commit:hash
492
#
493
# *: Implies ref_subdir=no
494
#
495
# <ref_subdir>: "yes" to create subdirectory for tag or branch name
496
#
497
fetch_from_repo()
498
{
499
local url=$1
500
local dir=$2
501
local ref=$3
502
local ref_subdir=$4
503
504
# Set GitHub mirror before anything else touches $url
505
url=${url//'https://github.com/'/$GITHUB_SOURCE'/'}
506
507
# The 'offline' variable must always be set to 'true' or 'false'
508
if [ "$OFFLINE_WORK" == "yes" ]; then
509
local offline=true
510
else
511
local offline=false
512
fi
513
514
[[ -z $ref || ( $ref != tag:* && $ref != branch:* && $ref != head && $ref != commit:* ) ]] && exit_with_error "Error in configuration"
515
local ref_type=${ref%%:*}
516
if [[ $ref_type == head ]]; then
517
local ref_name=HEAD
518
else
519
local ref_name=${ref##*:}
520
fi
521
522
display_alert "Checking git sources" "$dir $ref_name" "info"
523
524
# get default remote branch name without cloning
525
# local ref_name=$(git ls-remote --symref $url HEAD | grep -o 'refs/heads/\S*' | sed 's%refs/heads/%%')
526
# for git:// protocol comparing hashes of "git ls-remote -h $url" and "git ls-remote --symref $url HEAD" is needed
527
528
if [[ $ref_subdir == yes ]]; then
529
local workdir=$dir/$ref_name
530
else
531
local workdir=$dir
532
fi
533
534
mkdir -p "${workdir}" 2>/dev/null || \
535
exit_with_error "No path or no write permission" "${workdir}"
536
537
cd "${workdir}" || exit
538
539
# check if existing remote URL for the repo or branch does not match current one
540
# may not be supported by older git versions
541
# Check the folder as a git repository.
542
# Then the target URL matches the local URL.
543
544
if [[ "$(git rev-parse --git-dir 2>/dev/null)" == ".git" && \
545
"$url" != *"$(git remote get-url origin | sed 's/^.*@//' | sed 's/^.*\/\///' 2>/dev/null)" ]]; then
546
display_alert "Remote URL does not match, removing existing local copy"
547
rm -rf .git ./*
548
fi
549
550
if [[ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]]; then
551
display_alert "Creating local copy"
552
git init -q .
553
git remote add origin "${url}"
554
# Here you need to upload from a new address
555
offline=false
556
fi
557
558
local changed=false
559
560
# when we work offline we simply return the sources to their original state
561
if ! $offline; then
562
local local_hash
563
local_hash=$(git rev-parse @ 2>/dev/null)
564
565
case $ref_type in
566
branch)
567
# TODO: grep refs/heads/$name
568
local remote_hash
569
remote_hash=$(improved_git ls-remote -h "${url}" "$ref_name" | head -1 | cut -f1)
570
[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
571
;;
572
573
tag)
574
local remote_hash
575
remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name" | cut -f1)
576
if [[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]]; then
577
remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name^{}" | cut -f1)
578
[[ -z $remote_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
579
fi
580
;;
581
582
head)
583
local remote_hash
584
remote_hash=$(improved_git ls-remote "${url}" HEAD | cut -f1)
585
[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
586
;;
587
588
commit)
589
[[ -z $local_hash || $local_hash == "@" ]] && changed=true
590
;;
591
esac
592
593
fi # offline
594
595
if [[ $changed == true ]]; then
596
597
# remote was updated, fetch and check out updates
598
display_alert "Fetching updates"
599
case $ref_type in
600
branch) improved_git fetch --depth 200 origin "${ref_name}" ;;
601
tag) improved_git fetch --depth 200 origin tags/"${ref_name}" ;;
602
head) improved_git fetch --depth 200 origin HEAD ;;
603
esac
604
605
# commit type needs support for older git servers that doesn't support fetching id directly
606
if [[ $ref_type == commit ]]; then
607
608
improved_git fetch --depth 200 origin "${ref_name}"
609
610
# cover old type
611
if [[ $? -ne 0 ]]; then
612
613
display_alert "Commit checkout not supported on this repository. Doing full clone." "" "wrn"
614
improved_git pull
615
git checkout -fq "${ref_name}"
616
display_alert "Checkout out to" "$(git --no-pager log -2 --pretty=format:"$ad%s [%an]" | head -1)" "info"
617
618
else
619
620
display_alert "Checking out"
621
git checkout -f -q FETCH_HEAD
622
git clean -qdf
623
624
fi
625
else
626
627
display_alert "Checking out"
628
git checkout -f -q FETCH_HEAD
629
git clean -qdf
630
631
fi
632
elif [[ -n $(git status -uno --porcelain --ignore-submodules=all) ]]; then
633
# working directory is not clean
634
display_alert " Cleaning .... " "$(git status -s | wc -l) files"
635
636
# Return the files that are tracked by git to the initial state.
637
git checkout -f -q HEAD
638
639
# Files that are not tracked by git and were added
640
# when the patch was applied must be removed.
641
git clean -qdf
642
else
643
# working directory is clean, nothing to do
644
display_alert "Up to date"
645
fi
646
647
if [[ -f .gitmodules ]]; then
648
display_alert "Updating submodules" "" "ext"
649
# FML: http://stackoverflow.com/a/17692710
650
for i in $(git config -f .gitmodules --get-regexp path | awk '{ print $2 }'); do
651
cd "${workdir}" || exit
652
local surl sref
653
surl=$(git config -f .gitmodules --get "submodule.$i.url")
654
sref=$(git config -f .gitmodules --get "submodule.$i.branch")
655
if [[ -n $sref ]]; then
656
sref="branch:$sref"
657
else
658
sref="head"
659
fi
660
fetch_from_repo "$surl" "$workdir/$i" "$sref"
661
done
662
fi
663
} #############################################################################
664
665
#--------------------------------------------------------------------------------------------------------------------------------
666
# Let's have unique way of displaying alerts
667
#--------------------------------------------------------------------------------------------------------------------------------
668
display_alert()
669
{
670
# log function parameters to install.log
671
[[ -n "${DEST}" ]] && echo "Displaying message: $@" >> "${DEST}"/${LOG_SUBPATH}/output.log
672
673
local tmp=""
674
[[ -n $2 ]] && tmp="[\e[0;33m $2 \x1B[0m]"
675
676
case $3 in
677
err)
678
echo -e "[\e[0;31m error \x1B[0m] $1 $tmp"
679
;;
680
681
wrn)
682
echo -e "[\e[0;35m warn \x1B[0m] $1 $tmp"
683
;;
684
685
ext)
686
echo -e "[\e[0;32m o.k. \x1B[0m] \e[1;32m$1\x1B[0m $tmp"
687
;;
688
689
info)
690
echo -e "[\e[0;32m o.k. \x1B[0m] $1 $tmp"
691
;;
692
693
*)
694
echo -e "[\e[0;32m .... \x1B[0m] $1 $tmp"
695
;;
696
esac
697
}
698
699
#--------------------------------------------------------------------------------------------------------------------------------
700
# fingerprint_image <out_txt_file> [image_filename]
701
# Saving build summary to the image
702
#--------------------------------------------------------------------------------------------------------------------------------
703
fingerprint_image()
704
{
705
cat <<-EOF > "${1}"
706
--------------------------------------------------------------------------------
707
Title: ${VENDOR} $REVISION ${BOARD^} $DISTRIBUTION $RELEASE $BRANCH
708
Kernel: Linux $VER
709
Build date: $(date +'%d.%m.%Y')
710
Maintainer: $MAINTAINER <$MAINTAINERMAIL>
711
Sources: https://github.com/orangepi-xunlong/orangepi-build
712
Support: http://www.orangepi.org/
713
EOF
714
715
if [ -n "$2" ]; then
716
cat <<-EOF >> "${1}"
717
--------------------------------------------------------------------------------
718
Partitioning configuration: $IMAGE_PARTITION_TABLE offset: $OFFSET
719
Boot partition type: ${BOOTFS_TYPE:-(none)} ${BOOTSIZE:+"(${BOOTSIZE} MB)"}
720
Root partition type: $ROOTFS_TYPE ${FIXED_IMAGE_SIZE:+"(${FIXED_IMAGE_SIZE} MB)"}
721
722
CPU configuration: $CPUMIN - $CPUMAX with $GOVERNOR
723
--------------------------------------------------------------------------------
724
Verify GPG signature:
725
gpg --verify $2.img.asc
726
727
Verify image file integrity:
728
sha256sum --check $2.img.sha
729
730
Prepare SD card (four methodes):
731
zcat $2.img.gz | pv | dd of=/dev/sdX bs=1M
732
dd if=$2.img of=/dev/sdX bs=1M
733
balena-etcher $2.img.gz -d /dev/sdX
734
balena-etcher $2.img -d /dev/sdX
735
EOF
736
fi
737
738
cat <<-EOF >> "${1}"
739
--------------------------------------------------------------------------------
740
$(cat "${SRC}"/LICENSE)
741
--------------------------------------------------------------------------------
742
EOF
743
}
744
745
746
#--------------------------------------------------------------------------------------------------------------------------------
747
# Create kernel boot logo from packages/blobs/splash/logo.png and packages/blobs/splash/spinner.gif (animated)
748
# and place to the file /lib/firmware/bootsplash
749
#--------------------------------------------------------------------------------------------------------------------------------
750
function boot_logo ()
751
{
752
display_alert "Building kernel splash logo" "$RELEASE" "info"
753
754
LOGO=${EXTER}/packages/blobs/splash/logo.png
755
LOGO_WIDTH=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 1)
756
LOGO_HEIGHT=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 2)
757
THROBBER=${EXTER}/packages/blobs/splash/spinner.gif
758
THROBBER_WIDTH=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 1)
759
THROBBER_HEIGHT=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 2)
760
convert -alpha remove -background "#000000" $LOGO "${SDCARD}"/tmp/logo.rgb
761
convert -alpha remove -background "#000000" $THROBBER "${SDCARD}"/tmp/throbber%02d.rgb
762
${EXTER}/packages/blobs/splash/bootsplash-packer \
763
--bg_red 0x00 \
764
--bg_green 0x00 \
765
--bg_blue 0x00 \
766
--frame_ms 48 \
767
--picture \
768
--pic_width $LOGO_WIDTH \
769
--pic_height $LOGO_HEIGHT \
770
--pic_position 0 \
771
--blob "${SDCARD}"/tmp/logo.rgb \
772
--picture \
773
--pic_width $THROBBER_WIDTH \
774
--pic_height $THROBBER_HEIGHT \
775
--pic_position 0x05 \
776
--pic_position_offset 200 \
777
--pic_anim_type 1 \
778
--pic_anim_loop 0 \
779
--blob "${SDCARD}"/tmp/throbber00.rgb \
780
--blob "${SDCARD}"/tmp/throbber01.rgb \
781
--blob "${SDCARD}"/tmp/throbber02.rgb \
782
--blob "${SDCARD}"/tmp/throbber03.rgb \
783
--blob "${SDCARD}"/tmp/throbber04.rgb \
784
--blob "${SDCARD}"/tmp/throbber05.rgb \
785
--blob "${SDCARD}"/tmp/throbber06.rgb \
786
--blob "${SDCARD}"/tmp/throbber07.rgb \
787
--blob "${SDCARD}"/tmp/throbber08.rgb \
788
--blob "${SDCARD}"/tmp/throbber09.rgb \
789
--blob "${SDCARD}"/tmp/throbber10.rgb \
790
--blob "${SDCARD}"/tmp/throbber11.rgb \
791
--blob "${SDCARD}"/tmp/throbber12.rgb \
792
--blob "${SDCARD}"/tmp/throbber13.rgb \
793
--blob "${SDCARD}"/tmp/throbber14.rgb \
794
--blob "${SDCARD}"/tmp/throbber15.rgb \
795
--blob "${SDCARD}"/tmp/throbber16.rgb \
796
--blob "${SDCARD}"/tmp/throbber17.rgb \
797
--blob "${SDCARD}"/tmp/throbber18.rgb \
798
--blob "${SDCARD}"/tmp/throbber19.rgb \
799
--blob "${SDCARD}"/tmp/throbber20.rgb \
800
--blob "${SDCARD}"/tmp/throbber21.rgb \
801
--blob "${SDCARD}"/tmp/throbber22.rgb \
802
--blob "${SDCARD}"/tmp/throbber23.rgb \
803
--blob "${SDCARD}"/tmp/throbber24.rgb \
804
--blob "${SDCARD}"/tmp/throbber25.rgb \
805
--blob "${SDCARD}"/tmp/throbber26.rgb \
806
--blob "${SDCARD}"/tmp/throbber27.rgb \
807
--blob "${SDCARD}"/tmp/throbber28.rgb \
808
--blob "${SDCARD}"/tmp/throbber29.rgb \
809
--blob "${SDCARD}"/tmp/throbber30.rgb \
810
--blob "${SDCARD}"/tmp/throbber31.rgb \
811
--blob "${SDCARD}"/tmp/throbber32.rgb \
812
--blob "${SDCARD}"/tmp/throbber33.rgb \
813
--blob "${SDCARD}"/tmp/throbber34.rgb \
814
--blob "${SDCARD}"/tmp/throbber35.rgb \
815
--blob "${SDCARD}"/tmp/throbber36.rgb \
816
--blob "${SDCARD}"/tmp/throbber37.rgb \
817
--blob "${SDCARD}"/tmp/throbber38.rgb \
818
--blob "${SDCARD}"/tmp/throbber39.rgb \
819
--blob "${SDCARD}"/tmp/throbber40.rgb \
820
--blob "${SDCARD}"/tmp/throbber41.rgb \
821
--blob "${SDCARD}"/tmp/throbber42.rgb \
822
--blob "${SDCARD}"/tmp/throbber43.rgb \
823
--blob "${SDCARD}"/tmp/throbber44.rgb \
824
--blob "${SDCARD}"/tmp/throbber45.rgb \
825
--blob "${SDCARD}"/tmp/throbber46.rgb \
826
--blob "${SDCARD}"/tmp/throbber47.rgb \
827
--blob "${SDCARD}"/tmp/throbber48.rgb \
828
--blob "${SDCARD}"/tmp/throbber49.rgb \
829
--blob "${SDCARD}"/tmp/throbber50.rgb \
830
--blob "${SDCARD}"/tmp/throbber51.rgb \
831
--blob "${SDCARD}"/tmp/throbber52.rgb \
832
--blob "${SDCARD}"/tmp/throbber53.rgb \
833
--blob "${SDCARD}"/tmp/throbber54.rgb \
834
--blob "${SDCARD}"/tmp/throbber55.rgb \
835
--blob "${SDCARD}"/tmp/throbber56.rgb \
836
--blob "${SDCARD}"/tmp/throbber57.rgb \
837
--blob "${SDCARD}"/tmp/throbber58.rgb \
838
--blob "${SDCARD}"/tmp/throbber59.rgb \
839
--blob "${SDCARD}"/tmp/throbber60.rgb \
840
--blob "${SDCARD}"/tmp/throbber61.rgb \
841
--blob "${SDCARD}"/tmp/throbber62.rgb \
842
--blob "${SDCARD}"/tmp/throbber63.rgb \
843
--blob "${SDCARD}"/tmp/throbber64.rgb \
844
--blob "${SDCARD}"/tmp/throbber65.rgb \
845
--blob "${SDCARD}"/tmp/throbber66.rgb \
846
--blob "${SDCARD}"/tmp/throbber67.rgb \
847
--blob "${SDCARD}"/tmp/throbber68.rgb \
848
--blob "${SDCARD}"/tmp/throbber69.rgb \
849
--blob "${SDCARD}"/tmp/throbber70.rgb \
850
--blob "${SDCARD}"/tmp/throbber71.rgb \
851
--blob "${SDCARD}"/tmp/throbber72.rgb \
852
--blob "${SDCARD}"/tmp/throbber73.rgb \
853
--blob "${SDCARD}"/tmp/throbber74.rgb \
854
"${SDCARD}"/lib/firmware/bootsplash.orangepi >/dev/null 2>&1
855
if [[ $BOOT_LOGO == yes || $BOOT_LOGO == desktop && $BUILD_DESKTOP == yes && $RELEASE != buster ]]; then
856
[[ -f "${SDCARD}"/boot/orangepiEnv.txt ]] && grep -q '^bootlogo' "${SDCARD}"/boot/orangepiEnv.txt && \
857
sed -i 's/^bootlogo.*/bootlogo=true/' "${SDCARD}"/boot/orangepiEnv.txt || echo 'bootlogo=true' >> "${SDCARD}"/boot/orangepiEnv.txt
858
[[ -f "${SDCARD}"/boot/boot.ini ]] && sed -i 's/^setenv bootlogo.*/setenv bootlogo "true"/' "${SDCARD}"/boot/boot.ini
859
fi
860
# enable additional services
861
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-ask-password-console.path >/dev/null 2>&1"
862
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-hide-when-booted.service >/dev/null 2>&1"
863
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-show-on-shutdown.service >/dev/null 2>&1"
864
}
865
866
867
868
DISTRIBUTIONS_DESC_DIR="external/config/distributions"
869
870
function distro_menu ()
871
{
872
# create a select menu for choosing a distribution based EXPERT status
873
874
local distrib_dir="${1}"
875
876
if [[ -d "${distrib_dir}" && -f "${distrib_dir}/support" ]]; then
877
local support_level="$(cat "${distrib_dir}/support")"
878
if [[ "${support_level}" != "supported" && $EXPERT != "yes" ]]; then
879
:
880
else
881
local distro_codename="$(basename "${distrib_dir}")"
882
local distro_fullname="$(cat "${distrib_dir}/name")"
883
local expert_infos=""
884
[[ $EXPERT == "yes" ]] && expert_infos="(${support_level})"
885
886
if [[ "${BRANCH}" == "legacy" ]]; then
887
DISTRIB_TYPE="${DISTRIB_TYPE_LEGACY}"
888
[[ -z "${DISTRIB_TYPE_LEGACY}" ]] && DISTRIB_TYPE="buster bionic focal"
889
elif [[ "${BRANCH}" == "current" ]]; then
890
DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"
891
[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"
892
elif [[ "${BRANCH}" == "next" ]]; then
893
if [[ -n "${DISTRIB_TYPE_NEXT}" ]]; then
894
DISTRIB_TYPE="${DISTRIB_TYPE_NEXT}"
895
else
896
DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"
897
[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"
898
fi
899
fi
900
901
if [[ "${DISTRIB_TYPE}" =~ "${distro_codename}" ]]; then
902
options+=("${distro_codename}" "${distro_fullname} ${expert_infos}")
903
fi
904
fi
905
fi
906
}
907
908
function distros_options() {
909
for distrib_dir in "${DISTRIBUTIONS_DESC_DIR}/"*; do
910
distro_menu "${distrib_dir}"
911
done
912
}
913
914
function set_distribution_status() {
915
916
local distro_support_desc_filepath="${SRC}/${DISTRIBUTIONS_DESC_DIR}/${RELEASE}/support"
917
if [[ ! -f "${distro_support_desc_filepath}" ]]; then
918
exit_with_error "Distribution ${distribution_name} does not exist"
919
else
920
DISTRIBUTION_STATUS="$(cat "${distro_support_desc_filepath}")"
921
fi
922
923
[[ "${DISTRIBUTION_STATUS}" != "supported" ]] && [[ "${EXPERT}" != "yes" ]] && exit_with_error "Orange Pi ${RELEASE} is unsupported and, therefore, only available to experts (EXPERT=yes)"
924
925
}
926
927
adding_packages()
928
{
929
# add deb files to repository if they are not already there
930
931
display_alert "Checking and adding to repository $release" "$3" "ext"
932
for f in "${DEB_STORAGE}${2}"/*.deb
933
do
934
local name version arch
935
name=$(dpkg-deb -I "${f}" | grep Package | awk '{print $2}')
936
version=$(dpkg-deb -I "${f}" | grep Version | awk '{print $2}')
937
arch=$(dpkg-deb -I "${f}" | grep Architecture | awk '{print $2}')
938
# add if not already there
939
aptly repo search -architectures="${arch}" -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" 'Name (% '${name}'), $Version (='${version}'), $Architecture (='${arch}')' &>/dev/null
940
if [[ $? -ne 0 ]]; then
941
display_alert "Adding ${1}" "$name" "info"
942
aptly repo add -force-replace=true -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" "${f}" &>/dev/null
943
fi
944
done
945
946
}
947
948
949
950
951
addtorepo()
952
{
953
# create repository
954
# parameter "remove" dumps all and creates new
955
# parameter "delete" remove incoming directory if publishing is succesful
956
# function: cycle trough distributions
957
958
local distributions=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")
959
#local distributions=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))
960
local errors=0
961
962
for release in "${distributions[@]}"; do
963
964
local forceoverwrite=""
965
966
# let's drop from publish if exits
967
if [[ -n $(aptly publish list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then
968
aptly publish drop -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" > /dev/null 2>&1
969
fi
970
971
# create local repository if not exist
972
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then
973
display_alert "Creating section" "main" "info"
974
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="main" \
975
-comment="Armbian main repository" "${release}" >/dev/null
976
fi
977
978
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "^utils") ]]; then
979
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="utils" \
980
-comment="Armbian utilities (backwards compatibility)" utils >/dev/null
981
fi
982
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-utils") ]]; then
983
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-utils" \
984
-comment="Armbian ${release} utilities" "${release}-utils" >/dev/null
985
fi
986
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-desktop") ]]; then
987
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-desktop" \
988
-comment="Armbian ${release} desktop" "${release}-desktop" >/dev/null
989
fi
990
991
992
# adding main
993
if find "${DEB_STORAGE}"/ -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
994
adding_packages "$release" "" "main"
995
else
996
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
997
fi
998
999
local COMPONENTS="main"
1000
1001
# adding main distribution packages
1002
if find "${DEB_STORAGE}/${release}" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
1003
adding_packages "${release}-utils" "/${release}" "release packages"
1004
else
1005
# workaround - add dummy package to not trigger error
1006
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
1007
fi
1008
1009
# adding release-specific utils
1010
if find "${DEB_STORAGE}/extra/${release}-utils" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
1011
adding_packages "${release}-utils" "/extra/${release}-utils" "release utils"
1012
else
1013
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
1014
fi
1015
COMPONENTS="${COMPONENTS} ${release}-utils"
1016
1017
# adding desktop
1018
if find "${DEB_STORAGE}/extra/${release}-desktop" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
1019
adding_packages "${release}-desktop" "/extra/${release}-desktop" "desktop"
1020
else
1021
# workaround - add dummy package to not trigger error
1022
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
1023
fi
1024
COMPONENTS="${COMPONENTS} ${release}-desktop"
1025
1026
local mainnum utilnum desknum
1027
mainnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | grep "Number of packages" | awk '{print $NF}')
1028
utilnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | grep "Number of packages" | awk '{print $NF}')
1029
desknum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" | grep "Number of packages" | awk '{print $NF}')
1030
1031
if [ $mainnum -gt 0 ] && [ $utilnum -gt 0 ] && [ $desknum -gt 0 ]; then
1032
1033
# publish
1034
aptly publish \
1035
-acquire-by-hash \
1036
-passphrase="${GPG_PASS}" \
1037
-origin="Armbian" \
1038
-label="Armbian" \
1039
-config="${SCRIPTPATH}config/${REPO_CONFIG}" \
1040
-component="${COMPONENTS// /,}" \
1041
-distribution="${release}" repo "${release}" ${COMPONENTS//main/} >/dev/null
1042
1043
if [[ $? -ne 0 ]]; then
1044
display_alert "Publishing failed" "${release}" "err"
1045
errors=$((errors+1))
1046
exit 0
1047
fi
1048
else
1049
errors=$((errors+1))
1050
local err_txt=": All components must be present: main, utils and desktop for first build"
1051
fi
1052
1053
done
1054
1055
# cleanup
1056
display_alert "Cleaning repository" "${DEB_STORAGE}" "info"
1057
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}"
1058
1059
# display what we have
1060
echo ""
1061
display_alert "List of local repos" "local" "info"
1062
(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}") | grep -E packages
1063
1064
# remove debs if no errors found
1065
if [[ $errors -eq 0 ]]; then
1066
if [[ "$2" == "delete" ]]; then
1067
display_alert "Purging incoming debs" "all" "ext"
1068
find "${DEB_STORAGE}" -name "*.deb" -type f -delete
1069
fi
1070
else
1071
display_alert "There were some problems $err_txt" "leaving incoming directory intact" "err"
1072
fi
1073
1074
}
1075
1076
1077
1078
1079
repo-manipulate()
1080
{
1081
# repository manipulation
1082
# "show" displays packages in each repository
1083
# "server" serve repository - useful for local diagnostics
1084
# "unique" manually select which package should be removed from all repositories
1085
# "update" search for new files in output/debs* to add them to repository
1086
# "purge" leave only last 5 versions
1087
1088
local DISTROS=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")
1089
#local DISTROS=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))
1090
1091
case $@ in
1092
1093
serve)
1094
# display repository content
1095
display_alert "Serving content" "common utils" "ext"
1096
aptly serve -listen=$(ip -f inet addr | grep -Po 'inet \K[\d.]+' | grep -v 127.0.0.1 | head -1):80 -config="${SCRIPTPATH}config/${REPO_CONFIG}"
1097
exit 0
1098
;;
1099
1100
show)
1101
# display repository content
1102
for release in "${DISTROS[@]}"; do
1103
display_alert "Displaying repository contents for" "$release" "ext"
1104
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +7
1105
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +7
1106
done
1107
display_alert "Displaying repository contents for" "common utils" "ext"
1108
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +7
1109
echo "done."
1110
exit 0
1111
;;
1112
1113
unique)
1114
# which package should be removed from all repositories
1115
IFS=$'\n'
1116
while true; do
1117
LIST=()
1118
for release in "${DISTROS[@]}"; do
1119
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +7) )
1120
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +7) )
1121
done
1122
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +7) )
1123
LIST=( $(echo "${LIST[@]}" | tr ' ' '\n' | sort -u))
1124
new_list=()
1125
# create a human readable menu
1126
for ((n=0;n<$((${#LIST[@]}));n++));
1127
do
1128
new_list+=( "${LIST[$n]}" )
1129
new_list+=( "" )
1130
done
1131
LIST=("${new_list[@]}")
1132
LIST_LENGTH=$((${#LIST[@]}/2));
1133
exec 3>&1
1134
TARGET_VERSION=$(dialog --cancel-label "Cancel" --backtitle "BACKTITLE" --no-collapse --title "Remove packages from repositories" --clear --menu "Delete" $((9+${LIST_LENGTH})) 82 65 "${LIST[@]}" 2>&1 1>&3)
1135
exitstatus=$?;
1136
exec 3>&-
1137
if [[ $exitstatus -eq 0 ]]; then
1138
for release in "${DISTROS[@]}"; do
1139
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "$TARGET_VERSION"
1140
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "$TARGET_VERSION"
1141
done
1142
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "utils" "$TARGET_VERSION"
1143
else
1144
exit 1
1145
fi
1146
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&1
1147
done
1148
;;
1149
1150
update)
1151
# display full help test
1152
# run repository update
1153
addtorepo "update" ""
1154
# add a key to repo
1155
cp "${SCRIPTPATH}"config/armbian.key "${REPO_STORAGE}"/public/
1156
exit 0
1157
;;
1158
1159
purge)
1160
for release in "${DISTROS[@]}"; do
1161
repo-remove-old-packages "$release" "armhf" "5"
1162
repo-remove-old-packages "$release" "arm64" "5"
1163
repo-remove-old-packages "$release" "amd64" "5"
1164
repo-remove-old-packages "$release" "all" "5"
1165
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1166
done
1167
exit 0
1168
;;
1169
1170
purgeedge)
1171
for release in "${DISTROS[@]}"; do
1172
repo-remove-old-packages "$release" "armhf" "3" "edge"
1173
repo-remove-old-packages "$release" "arm64" "3" "edge"
1174
repo-remove-old-packages "$release" "amd64" "3" "edge"
1175
repo-remove-old-packages "$release" "all" "3" "edge"
1176
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1177
done
1178
exit 0
1179
;;
1180
1181
1182
purgesource)
1183
for release in "${DISTROS[@]}"; do
1184
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" 'Name (% *-source*)'
1185
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1186
done
1187
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&1
1188
exit 0
1189
;;
1190
*)
1191
1192
echo -e "Usage: repository show | serve | unique | create | update | purge | purgesource\n"
1193
echo -e "\n show = display repository content"
1194
echo -e "\n serve = publish your repositories on current server over HTTP"
1195
echo -e "\n unique = manually select which package should be removed from all repositories"
1196
echo -e "\n update = updating repository"
1197
echo -e "\n purge = removes all but last 5 versions"
1198
echo -e "\n purgeedge = removes all but last 3 edge versions"
1199
echo -e "\n purgesource = removes all sources\n\n"
1200
exit 0
1201
;;
1202
1203
esac
1204
1205
}
1206
1207
1208
1209
1210
# Removes old packages in the received repo
1211
#
1212
# $1: Repository
1213
# $2: Architecture
1214
# $3: Amount of packages to keep
1215
# $4: Additional search pattern
1216
repo-remove-old-packages() {
1217
local repo=$1
1218
local arch=$2
1219
local keep=$3
1220
for pkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Architecture ($arch)" | grep -v "ERROR: no results" | sort -t '.' -nk4 | grep -e "$4"); do
1221
local pkg_name
1222
count=0
1223
pkg_name=$(echo "${pkg}" | cut -d_ -f1)
1224
for subpkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name)" | grep -v "ERROR: no results" | sort -rt '.' -nk4); do
1225
((count+=1))
1226
if [[ $count -gt $keep ]]; then
1227
pkg_version=$(echo "${subpkg}" | cut -d_ -f2)
1228
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name), Version (= $pkg_version)"
1229
fi
1230
done
1231
done
1232
}
1233
1234
1235
1236
1237
# wait_for_package_manager
1238
#
1239
# * installation will break if we try to install when package manager is running
1240
#
1241
wait_for_package_manager()
1242
{
1243
# exit if package manager is running in the back
1244
while true; do
1245
if [[ "$(fuser /var/lib/dpkg/lock 2>/dev/null; echo $?)" != 1 && "$(fuser /var/lib/dpkg/lock-frontend 2>/dev/null; echo $?)" != 1 ]]; then
1246
display_alert "Package manager is running in the background." "Please wait! Retrying in 30 sec" "wrn"
1247
sleep 30
1248
else
1249
break
1250
fi
1251
done
1252
}
1253
1254
1255
1256
# Installing debian packages in the orangepi build system.
1257
# The function accepts four optional parameters:
1258
# autoupdate - If the installation list is not empty then update first.
1259
# upgrade, clean - the same name for apt
1260
# verbose - detailed log for the function
1261
#
1262
# list="pkg1 pkg2 pkg3 pkgbadname pkg-1.0 | pkg-2.0 pkg5 (>= 9)"
1263
# install_pkg_deb upgrade verbose $list
1264
# or
1265
# install_pkg_deb autoupdate $list
1266
#
1267
# If the package has a bad name, we will see it in the log file.
1268
# If there is an LOG_OUTPUT_FILE variable and it has a value as
1269
# the full real path to the log file, then all the information will be there.
1270
#
1271
# The LOG_OUTPUT_FILE variable must be defined in the calling function
1272
# before calling the install_pkg_deb function and unset after.
1273
#
1274
install_pkg_deb ()
1275
{
1276
local list=""
1277
local log_file
1278
local for_install
1279
local need_autoup=false
1280
local need_upgrade=false
1281
local need_clean=false
1282
local need_verbose=false
1283
local _line=${BASH_LINENO[0]}
1284
local _function=${FUNCNAME[1]}
1285
local _file=$(basename "${BASH_SOURCE[1]}")
1286
local tmp_file=$(mktemp /tmp/install_log_XXXXX)
1287
export DEBIAN_FRONTEND=noninteractive
1288
1289
list=$(
1290
for p in $*;do
1291
case $p in
1292
autoupdate) need_autoup=true; continue ;;
1293
upgrade) need_upgrade=true; continue ;;
1294
clean) need_clean=true; continue ;;
1295
verbose) need_verbose=true; continue ;;
1296
\||\(*|*\)) continue ;;
1297
esac
1298
echo " $p"
1299
done
1300
)
1301
1302
if [ -d $(dirname $LOG_OUTPUT_FILE) ]; then
1303
log_file=${LOG_OUTPUT_FILE}
1304
else
1305
log_file="${SRC}/output/${LOG_SUBPATH}/install.log"
1306
fi
1307
1308
# This is necessary first when there is no apt cache.
1309
if $need_upgrade; then
1310
apt-get -q update || echo "apt cannot update" >>$tmp_file
1311
apt-get -y upgrade || echo "apt cannot upgrade" >>$tmp_file
1312
fi
1313
1314
# If the package is not installed, check the latest
1315
# up-to-date version in the apt cache.
1316
# Exclude bad package names and send a message to the log.
1317
for_install=$(
1318
for p in $list;do
1319
if $(dpkg-query -W -f '${db:Status-Abbrev}' $p |& awk '/ii/{exit 1}');then
1320
apt-cache show $p -o APT::Cache::AllVersions=no |& \
1321
awk -v p=$p -v tmp_file=$tmp_file \
1322
'/^Package:/{print $2} /^E:/{print "Bad package name: ",p >>tmp_file}'
1323
fi
1324
done
1325
)
1326
1327
# This information should be logged.
1328
if [ -s $tmp_file ]; then
1329
echo -e "\nInstalling packages in function: $_function" "[$_file:$_line]" \
1330
>>$log_file
1331
echo -e "\nIncoming list:" >>$log_file
1332
printf "%-30s %-30s %-30s %-30s\n" $list >>$log_file
1333
echo "" >>$log_file
1334
cat $tmp_file >>$log_file
1335
fi
1336
1337
if [ -n "$for_install" ]; then
1338
if $need_autoup; then
1339
apt-get -q update
1340
apt-get -y upgrade
1341
fi
1342
apt-get install -qq -y --no-install-recommends $for_install
1343
echo -e "\nPackages installed:" >>$log_file
1344
dpkg-query -W \
1345
-f '${binary:Package;-27} ${Version;-23}\n' \
1346
$for_install >>$log_file
1347
1348
fi
1349
1350
# We will show the status after installation all listed
1351
if $need_verbose; then
1352
echo -e "\nstatus after installation:" >>$log_file
1353
dpkg-query -W \
1354
-f '${binary:Package;-27} ${Version;-23} [ ${Status} ]\n' \
1355
$list >>$log_file
1356
fi
1357
1358
if $need_clean;then apt-get clean; fi
1359
rm $tmp_file
1360
}
1361
1362
1363
1364
# prepare_host_basic
1365
#
1366
# * installs only basic packages
1367
#
1368
prepare_host_basic()
1369
{
1370
1371
# command:package1 package2 ...
1372
# list of commands that are neeeded:packages where this command is
1373
local check_pack install_pack
1374
local checklist=(
1375
"whiptail:whiptail"
1376
"dialog:dialog"
1377
"fuser:psmisc"
1378
"getfacl:acl"
1379
"uuid:uuid uuid-runtime"
1380
"curl:curl"
1381
"gpg:gnupg"
1382
"gawk:gawk"
1383
"git:git"
1384
)
1385
1386
for check_pack in "${checklist[@]}"; do
1387
if ! which ${check_pack%:*} >/dev/null; then local install_pack+=${check_pack#*:}" "; fi
1388
done
1389
1390
if [[ -n $install_pack ]]; then
1391
display_alert "Installing basic packages" "$install_pack"
1392
sudo bash -c "apt-get -qq update && apt-get install -qq -y --no-install-recommends $install_pack"
1393
fi
1394
1395
}
1396
1397
1398
1399
1400
# prepare_host
1401
#
1402
# * checks and installs necessary packages
1403
# * creates directory structure
1404
# * changes system settings
1405
#
1406
prepare_host()
1407
{
1408
display_alert "Preparing" "host" "info"
1409
1410
# The 'offline' variable must always be set to 'true' or 'false'
1411
if [ "$OFFLINE_WORK" == "yes" ]; then
1412
local offline=true
1413
else
1414
local offline=false
1415
fi
1416
1417
# wait until package manager finishes possible system maintanace
1418
wait_for_package_manager
1419
1420
# fix for Locales settings
1421
if ! grep -q "^en_US.UTF-8 UTF-8" /etc/locale.gen; then
1422
sudo sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen
1423
sudo locale-gen
1424
fi
1425
1426
export LC_ALL="en_US.UTF-8"
1427
1428
# packages list for host
1429
# NOTE: please sync any changes here with the Dockerfile and Vagrantfile
1430
1431
local hostdeps="acl aptly aria2 bc binfmt-support bison btrfs-progs \
1432
build-essential ca-certificates ccache cpio cryptsetup curl \
1433
debian-archive-keyring debian-keyring debootstrap device-tree-compiler \
1434
dialog dirmngr dosfstools dwarves f2fs-tools fakeroot flex gawk \
1435
gcc-arm-linux-gnueabihf gdisk gpg imagemagick jq kmod libbison-dev \
1436
libc6-dev-armhf-cross libelf-dev libfdt-dev libfile-fcntllock-perl \
1437
libfl-dev liblz4-tool libncurses-dev libpython2.7-dev libssl-dev \
1438
libusb-1.0-0-dev linux-base locales lzop ncurses-base ncurses-term \
1439
nfs-kernel-server ntpdate p7zip-full parted patchutils pigz pixz \
1440
pkg-config pv python3-dev python3-distutils qemu-user-static rsync swig \
1441
systemd-container u-boot-tools udev unzip uuid-dev wget whiptail zip \
1442
zlib1g-dev gcc-riscv64-linux-gnu uuid-runtime fatattr git-lfs scons \
1443
mtools"
1444
1445
if [[ $(dpkg --print-architecture) == amd64 ]]; then
1446
1447
hostdeps+=" distcc lib32ncurses-dev lib32stdc++6 libc6-i386"
1448
grep -q i386 <(dpkg --print-foreign-architectures) || dpkg --add-architecture i386
1449
1450
elif [[ $(dpkg --print-architecture) == arm64 ]]; then
1451
1452
hostdeps+=" gcc-arm-linux-gnueabi gcc-arm-none-eabi libc6 libc6-amd64-cross qemu"
1453
1454
else
1455
1456
display_alert "Please read documentation to set up proper compilation environment"
1457
display_alert "https://www.armbian.com/using-armbian-tools/"
1458
exit_with_error "Running this tool on non x86_64 build host is not supported"
1459
1460
fi
1461
1462
# Add support for Ubuntu 20.04, 21.04 and Mint 20.x
1463
if [[ $HOSTRELEASE =~ ^(focal|hirsute|jammy|noble|noble|ulyana|ulyssa|bullseye|bookworm|uma)$ ]]; then
1464
hostdeps+=" python2 python3"
1465
ln -fs /usr/bin/python2.7 /usr/bin/python2
1466
ln -fs /usr/bin/python2.7 /usr/bin/python
1467
else
1468
hostdeps+=" python libpython-dev"
1469
fi
1470
1471
display_alert "Build host OS release" "${HOSTRELEASE:-(unknown)}" "info"
1472
1473
# Ubuntu 21.04.x (Hirsute) x86_64 is the only fully supported host OS release
1474
# Using Docker/VirtualBox/Vagrant is the only supported way to run the build script on other Linux distributions
1475
#
1476
# NO_HOST_RELEASE_CHECK overrides the check for a supported host system
1477
# Disable host OS check at your own risk. Any issues reported with unsupported releases will be closed without discussion
1478
if [[ -z $HOSTRELEASE || "focal jammy noble" != *"$HOSTRELEASE"* ]]; then
1479
if [[ $NO_HOST_RELEASE_CHECK == yes ]]; then
1480
display_alert "You are running on an unsupported system" "${HOSTRELEASE:-(unknown)}" "wrn"
1481
display_alert "Do not report any errors, warnings or other issues encountered beyond this point" "" "wrn"
1482
else
1483
exit_with_error "It seems you ignore documentation and run an unsupported build system: ${HOSTRELEASE:-(unknown)}"
1484
fi
1485
fi
1486
1487
if grep -qE "(Microsoft|WSL)" /proc/version; then
1488
if [ -f /.dockerenv ]; then
1489
display_alert "Building images using Docker on WSL2 may fail" "" "wrn"
1490
else
1491
exit_with_error "Windows subsystem for Linux is not a supported build environment"
1492
fi
1493
fi
1494
1495
if systemd-detect-virt -q -c; then
1496
display_alert "Running in container" "$(systemd-detect-virt)" "info"
1497
# disable apt-cacher unless NO_APT_CACHER=no is not specified explicitly
1498
if [[ $NO_APT_CACHER != no ]]; then
1499
display_alert "apt-cacher is disabled in containers, set NO_APT_CACHER=no to override" "" "wrn"
1500
NO_APT_CACHER=yes
1501
fi
1502
CONTAINER_COMPAT=yes
1503
# trying to use nested containers is not a good idea, so don't permit EXTERNAL_NEW=compile
1504
if [[ $EXTERNAL_NEW == compile ]]; then
1505
display_alert "EXTERNAL_NEW=compile is not available when running in container, setting to prebuilt" "" "wrn"
1506
EXTERNAL_NEW=prebuilt
1507
fi
1508
SYNC_CLOCK=no
1509
fi
1510
1511
1512
# Skip verification if you are working offline
1513
if ! $offline; then
1514
1515
# warning: apt-cacher-ng will fail if installed and used both on host and in
1516
# container/chroot environment with shared network
1517
# set NO_APT_CACHER=yes to prevent installation errors in such case
1518
if [[ $NO_APT_CACHER != yes ]]; then hostdeps+=" apt-cacher-ng"; fi
1519
1520
export EXTRA_BUILD_DEPS=""
1521
call_extension_method "add_host_dependencies" <<- 'ADD_HOST_DEPENDENCIES'
1522
*run before installing host dependencies*
1523
you can add packages to install, space separated, to ${EXTRA_BUILD_DEPS} here.
1524
ADD_HOST_DEPENDENCIES
1525
1526
if [ -n "${EXTRA_BUILD_DEPS}" ]; then hostdeps+=" ${EXTRA_BUILD_DEPS}"; fi
1527
1528
display_alert "Installing build dependencies"
1529
# don't prompt for apt cacher selection
1530
sudo echo "apt-cacher-ng apt-cacher-ng/tunnelenable boolean false" | sudo debconf-set-selections
1531
1532
LOG_OUTPUT_FILE="${DEST}"/${LOG_SUBPATH}/hostdeps.log
1533
install_pkg_deb "autoupdate $hostdeps"
1534
unset LOG_OUTPUT_FILE
1535
1536
update-ccache-symlinks
1537
1538
export FINAL_HOST_DEPS="$hostdeps ${EXTRA_BUILD_DEPS}"
1539
call_extension_method "host_dependencies_ready" <<- 'HOST_DEPENDENCIES_READY'
1540
*run after all host dependencies are installed*
1541
At this point we can read `${FINAL_HOST_DEPS}`, but changing won't have any effect.
1542
All the dependencies, including the default/core deps and the ones added via `${EXTRA_BUILD_DEPS}`
1543
are installed at this point. The system clock has not yet been synced.
1544
HOST_DEPENDENCIES_READY
1545
1546
# sync clock
1547
if [[ $SYNC_CLOCK != no ]]; then
1548
display_alert "Syncing clock" "${NTP_SERVER:-pool.ntp.org}" "info"
1549
ntpdate -s "${NTP_SERVER:-pool.ntp.org}"
1550
fi
1551
1552
# create directory structure
1553
mkdir -p $SRC/output $EXTER/cache $USERPATCHES_PATH
1554
if [[ -n $SUDO_USER ]]; then
1555
chgrp --quiet sudo cache output "${USERPATCHES_PATH}"
1556
# SGID bit on cache/sources breaks kernel dpkg packaging
1557
chmod --quiet g+w,g+s output "${USERPATCHES_PATH}"
1558
# fix existing permissions
1559
find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -group sudo -exec chgrp --quiet sudo {} \;
1560
find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -perm -g+w,g+s -exec chmod --quiet g+w,g+s {} \;
1561
fi
1562
mkdir -p $DEST/debs/{extra,u-boot} $DEST/{config,debug,patch,images} $USERPATCHES_PATH/overlay $EXTER/cache/{debs,sources,hash} $SRC/toolchains $SRC/.tmp
1563
1564
# build aarch64
1565
if [[ $(dpkg --print-architecture) == amd64 ]]; then
1566
if [[ "${SKIP_EXTERNAL_TOOLCHAINS}" != "yes" ]]; then
1567
1568
# bind mount toolchain if defined
1569
if [[ -d "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" ]]; then
1570
mountpoint -q "${SRC}"/cache/toolchain && umount -l "${SRC}"/cache/toolchain
1571
mount --bind "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" "${SRC}"/cache/toolchain
1572
fi
1573
1574
display_alert "Checking for external GCC compilers" "" "info"
1575
# download external Linaro compiler and missing special dependencies since they are needed for certain sources
1576
1577
case ${BOARDFAMILY} in
1578
cix)
1579
local toolchains=(
1580
"arm-gnu-toolchain-12.3.rel1-x86_64-aarch64-none-linux-gnu.tar.xz"
1581
)
1582
;;
1583
*)
1584
local toolchains=(
1585
"ky-toolchain-linux-glibc-x86_64-v1.0.1.tar.xz"
1586
"gcc-linaro-aarch64-none-elf-4.8-2013.11_linux.tar.xz"
1587
"gcc-linaro-arm-none-eabi-4.8-2014.04_linux.tar.xz"
1588
"gcc-linaro-arm-linux-gnueabihf-4.8-2014.04_linux.tar.xz"
1589
"gcc-linaro-4.9.4-2017.01-x86_64_arm-linux-gnueabi.tar.xz"
1590
"gcc-linaro-4.9.4-2017.01-x86_64_aarch64-linux-gnu.tar.xz"
1591
"gcc-linaro-5.5.0-2017.10-x86_64_arm-linux-gnueabihf.tar.xz"
1592
"gcc-linaro-7.4.1-2019.02-x86_64_arm-linux-gnueabi.tar.xz"
1593
"gcc-linaro-7.4.1-2019.02-x86_64_aarch64-linux-gnu.tar.xz"
1594
"gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf.tar.xz"
1595
"gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz"
1596
"gcc-arm-11.2-2022.02-x86_64-arm-none-linux-gnueabihf.tar.xz"
1597
"gcc-arm-11.2-2022.02-x86_64-aarch64-none-linux-gnu.tar.xz"
1598
)
1599
;;
1600
esac
1601
1602
USE_TORRENT_STATUS=${USE_TORRENT}
1603
USE_TORRENT="no"
1604
for toolchain in ${toolchains[@]}; do
1605
download_and_verify "_toolchain" "${toolchain##*/}"
1606
done
1607
USE_TORRENT=${USE_TORRENT_STATUS}
1608
1609
rm -rf $SRC/toolchains/*.tar.xz*
1610
local existing_dirs=( $(ls -1 $SRC/toolchains) )
1611
for dir in ${existing_dirs[@]}; do
1612
local found=no
1613
for toolchain in ${toolchains[@]}; do
1614
local filename=${toolchain##*/}
1615
local dirname=${filename//.tar.xz}
1616
[[ $dir == $dirname ]] && found=yes
1617
done
1618
if [[ $found == no ]]; then
1619
display_alert "Removing obsolete toolchain" "$dir"
1620
rm -rf $SRC/toolchains/$dir
1621
fi
1622
done
1623
else
1624
display_alert "Ignoring toolchains" "SKIP_EXTERNAL_TOOLCHAINS: ${SKIP_EXTERNAL_TOOLCHAINS}" "info"
1625
fi
1626
fi
1627
1628
fi # check offline
1629
1630
# enable arm binary format so that the cross-architecture chroot environment will work
1631
if [[ $BUILD_OPT == "image" || $BUILD_OPT == "rootfs" ]]; then
1632
modprobe -q binfmt_misc
1633
mountpoint -q /proc/sys/fs/binfmt_misc/ || mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
1634
if [[ "$(arch)" != "aarch64" ]]; then
1635
test -e /proc/sys/fs/binfmt_misc/qemu-arm || update-binfmts --enable qemu-arm
1636
test -e /proc/sys/fs/binfmt_misc/qemu-aarch64 || update-binfmts --enable qemu-aarch64
1637
fi
1638
fi
1639
1640
[[ ! -f "${USERPATCHES_PATH}"/customize-image.sh ]] && cp "${EXTER}"/config/templates/customize-image.sh.template "${USERPATCHES_PATH}"/customize-image.sh
1641
1642
if [[ ! -f "${USERPATCHES_PATH}"/README ]]; then
1643
rm -f "${USERPATCHES_PATH}"/readme.txt
1644
echo 'Please read documentation about customizing build configuration' > "${USERPATCHES_PATH}"/README
1645
echo 'https:/www.orangepi.org' >> "${USERPATCHES_PATH}"/README
1646
1647
# create patches directory structure under USERPATCHES_PATH
1648
find $EXTER/patch -maxdepth 2 -type d ! -name . | sed "s%/.*patch%/$USERPATCHES_PATH%" | xargs mkdir -p
1649
fi
1650
1651
# check free space (basic)
1652
local freespace=$(findmnt --target "${SRC}" -n -o AVAIL -b 2>/dev/null) # in bytes
1653
if [[ -n $freespace && $(( $freespace / 1073741824 )) -lt 10 ]]; then
1654
display_alert "Low free space left" "$(( $freespace / 1073741824 )) GiB" "wrn"
1655
# pause here since dialog-based menu will hide this message otherwise
1656
echo -e "Press \e[0;33m<Ctrl-C>\x1B[0m to abort compilation, \e[0;33m<Enter>\x1B[0m to ignore and continue"
1657
read
1658
fi
1659
}
1660
1661
1662
1663
1664
function webseed ()
1665
{
1666
# list of mirrors that host our files
1667
unset text
1668
# Hardcoded to EU mirrors since
1669
local CCODE=$(curl -s redirect.armbian.com/geoip | jq '.continent.code' -r)
1670
WEBSEED=($(curl -s https://redirect.armbian.com/mirrors | jq -r '.'${CCODE}' | .[] | values'))
1671
# aria2 simply split chunks based on sources count not depending on download speed
1672
# when selecting china mirrors, use only China mirror, others are very slow there
1673
if [[ $DOWNLOAD_MIRROR == china ]]; then
1674
WEBSEED=(
1675
https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/
1676
)
1677
elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then
1678
WEBSEED=(
1679
https://mirrors.bfsu.edu.cn/armbian-releases/
1680
)
1681
fi
1682
if [[ ${filename} == *ky* ]] || [[ ${filename} == *arm-gnu-toolchain* ]]; then
1683
WEBSEED=(
1684
http://www.iplaystore.cn/upload/
1685
)
1686
fi
1687
for toolchain in ${WEBSEED[@]}; do
1688
text="${text} ${toolchain}${1}"
1689
done
1690
text="${text:1}"
1691
echo "${text}"
1692
}
1693
1694
1695
1696
1697
download_and_verify()
1698
{
1699
1700
local remotedir=$1
1701
local filename=$2
1702
local localdir=$SRC/toolchains
1703
local dirname=${filename//.tar.xz}
1704
1705
if [[ $DOWNLOAD_MIRROR == china ]]; then
1706
local server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"
1707
elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then
1708
local server="https://mirrors.bfsu.edu.cn/armbian-releases/"
1709
else
1710
local server=${ARMBIAN_MIRROR}
1711
fi
1712
1713
if [[ ${filename} == *ky* ]] || [[ ${filename} == *arm-gnu-toolchain* ]]; then
1714
server="http://www.iplaystore.cn/upload/"
1715
fi
1716
1717
if [[ -f ${localdir}/${dirname}/.download-complete ]]; then
1718
return
1719
fi
1720
1721
# switch to china mirror if US timeouts
1722
timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null
1723
if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then
1724
display_alert "Timeout from $server" "retrying" "info"
1725
server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"
1726
if [[ ${filename} == *ky* ]] || [[ ${filename} == *arm-gnu-toolchain* ]]; then
1727
server="http://www.iplaystore.cn/upload/"
1728
fi
1729
1730
# switch to another china mirror if tuna timeouts
1731
timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null
1732
if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then
1733
display_alert "Timeout from $server" "retrying" "info"
1734
server="https://mirrors.bfsu.edu.cn/armbian-releases/"
1735
if [[ ${filename} == *ky* ]] || [[ ${filename} == *arm-gnu-toolchain* ]]; then
1736
server="http://www.iplaystore.cn/upload/"
1737
fi
1738
fi
1739
fi
1740
1741
# check if file exists on remote server before running aria2 downloader
1742
[[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename}` ]] && return
1743
1744
cd "${localdir}" || exit
1745
1746
# use local control file
1747
if [[ -f "${EXTER}"/config/torrents/${filename}.asc ]]; then
1748
local torrent="${EXTER}"/config/torrents/${filename}.torrent
1749
ln -sf "${EXTER}/config/torrents/${filename}.asc" "${localdir}/${filename}.asc"
1750
elif [[ ! `timeout 10 curl --head --fail --silent "${server}${remotedir}/${filename}.asc"` ]]; then
1751
return
1752
else
1753
# download control file
1754
local torrent=${server}$remotedir/${filename}.torrent
1755
aria2c --download-result=hide --disable-ipv6=true --summary-interval=0 --console-log-level=error --auto-file-renaming=false \
1756
--continue=false --allow-overwrite=true --dir="${localdir}" ${server}${remotedir}/${filename}.asc $(webseed "$remotedir/${filename}.asc") -o "${filename}.asc"
1757
[[ $? -ne 0 ]] && display_alert "Failed to download control file" "" "wrn"
1758
fi
1759
1760
# download torrent first
1761
if [[ ${USE_TORRENT} == "yes" ]]; then
1762
1763
display_alert "downloading using torrent network" "$filename"
1764
local ariatorrent="--summary-interval=0 --auto-save-interval=0 --seed-time=0 --bt-stop-timeout=120 --console-log-level=error \
1765
--allow-overwrite=true --download-result=hide --rpc-save-upload-metadata=false --auto-file-renaming=false \
1766
--file-allocation=trunc --continue=true ${torrent} \
1767
--dht-file-path=$EXTER/cache/.aria2/dht.dat --disable-ipv6=true --stderr --follow-torrent=mem --dir=${localdir}"
1768
1769
# exception. It throws error if dht.dat file does not exists. Error suppress needed only at first download.
1770
if [[ -f $EXTER/cache/.aria2/dht.dat ]]; then
1771
# shellcheck disable=SC2086
1772
aria2c ${ariatorrent}
1773
else
1774
# shellcheck disable=SC2035
1775
aria2c ${ariatorrent} &> "${DEST}"/${LOG_SUBPATH}/torrent.log
1776
fi
1777
# mark complete
1778
[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete"
1779
1780
fi
1781
1782
1783
# direct download if torrent fails
1784
if [[ ! -f "${localdir}/${filename}.complete" ]]; then
1785
if [[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null` ]]; then
1786
display_alert "downloading using http(s) network" "$filename"
1787
aria2c --download-result=hide --rpc-save-upload-metadata=false --console-log-level=error \
1788
--dht-file-path="${SRC}"/cache/.aria2/dht.dat --disable-ipv6=true --summary-interval=0 --auto-file-renaming=false --dir="${localdir}" ${server}${remotedir}/${filename} $(webseed "${remotedir}/${filename}") -o "${filename}"
1789
# mark complete
1790
[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete" && echo ""
1791
1792
fi
1793
fi
1794
1795
if [[ -f ${localdir}/${filename}.asc ]]; then
1796
1797
if grep -q 'BEGIN PGP SIGNATURE' "${localdir}/${filename}.asc"; then
1798
1799
if [[ ! -d $EXTER/cache/.gpg ]]; then
1800
mkdir -p $EXTER/cache/.gpg
1801
chmod 700 $EXTER/cache/.gpg
1802
touch $EXTER/cache/.gpg/gpg.conf
1803
chmod 600 $EXTER/cache/.gpg/gpg.conf
1804
fi
1805
1806
# Verify archives with Linaro and Armbian GPG keys
1807
1808
if [ x"" != x"${http_proxy}" ]; then
1809
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1810
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1811
--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \
1812
--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1813
1814
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1815
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1816
--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \
1817
--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1818
else
1819
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1820
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1821
--keyserver hkp://keyserver.ubuntu.com:80 \
1822
--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1823
1824
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1825
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1826
--keyserver hkp://keyserver.ubuntu.com:80 \
1827
--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1828
fi
1829
1830
gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --verify \
1831
--trust-model always -q "${localdir}/${filename}.asc" >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1
1832
[[ ${PIPESTATUS[0]} -eq 0 ]] && verified=true && display_alert "Verified" "PGP" "info"
1833
1834
else
1835
1836
md5sum -c --status "${localdir}/${filename}.asc" && verified=true && display_alert "Verified" "MD5" "info"
1837
1838
fi
1839
1840
if [[ $verified == true ]]; then
1841
if [[ "${filename:(-6)}" == "tar.xz" ]]; then
1842
1843
display_alert "decompressing"
1844
pv -p -b -r -c -N "[ .... ] ${filename}" "${filename}" | xz -dc | tar xp --xattrs --no-same-owner --overwrite
1845
[[ $? -eq 0 ]] && touch "${localdir}/${dirname}/.download-complete"
1846
fi
1847
else
1848
exit_with_error "verification failed"
1849
fi
1850
1851
fi
1852
}
1853
1854
1855
1856
1857
show_developer_warning()
1858
{
1859
local temp_rc
1860
temp_rc=$(mktemp)
1861
cat <<-'EOF' > "${temp_rc}"
1862
screen_color = (WHITE,RED,ON)
1863
EOF
1864
local warn_text="You are switching to the \Z1EXPERT MODE\Zn
1865
1866
This allows building experimental configurations that are provided
1867
\Z1AS IS\Zn to developers and expert users,
1868
\Z1WITHOUT ANY RESPONSIBILITIES\Zn from the Armbian team:
1869
1870
- You are using these configurations \Z1AT YOUR OWN RISK\Zn
1871
- Bug reports related to the dev kernel, CSC, WIP and EOS boards
1872
\Z1will be closed without a discussion\Zn
1873
- Forum posts related to dev kernel, CSC, WIP and EOS boards
1874
should be created in the \Z2\"Community forums\"\Zn section
1875
"
1876
DIALOGRC=$temp_rc dialog --title "Expert mode warning" --backtitle "${backtitle}" --colors --defaultno --no-label "I do not agree" \
1877
--yes-label "I understand and agree" --yesno "$warn_text" "${TTY_Y}" "${TTY_X}"
1878
[[ $? -ne 0 ]] && exit_with_error "Error switching to the expert mode"
1879
SHOW_WARNING=no
1880
}
1881
1882
# is a formatted output of the values of variables
1883
# from the list at the place of the function call.
1884
#
1885
# The LOG_OUTPUT_FILE variable must be defined in the calling function
1886
# before calling the `show_checklist_variables` function and unset after.
1887
#
1888
show_checklist_variables ()
1889
{
1890
local checklist=$*
1891
local var pval
1892
local log_file=${LOG_OUTPUT_FILE:-"${SRC}"/output/${LOG_SUBPATH}/trash.log}
1893
local _line=${BASH_LINENO[0]}
1894
local _function=${FUNCNAME[1]}
1895
local _file=$(basename "${BASH_SOURCE[1]}")
1896
1897
echo -e "Show variables in function: $_function" "[$_file:$_line]\n" >>$log_file
1898
1899
for var in $checklist;do
1900
eval pval=\$$var
1901
echo -e "\n$var =:" >>$log_file
1902
if [ $(echo "$pval" | awk -F"/" '{print NF}') -ge 4 ];then
1903
printf "%s\n" $pval >>$log_file
1904
else
1905
printf "%-30s %-30s %-30s %-30s\n" $pval >>$log_file
1906
fi
1907
done
1908
}
1909
1910
get_orangepi_url()
1911
{
1912
if [[ ${GITEE_SERVER} == yes ]]; then
1913
echo "https://gitee.com/orangepi-xunlong"
1914
else
1915
echo "https://github.com/orangepi-xunlong"
1916
fi
1917
}
1918
1919
install_wiringop()
1920
{
1921
install_deb_chroot "$EXTER/cache/debs/${ARCH}/wiringpi-2.58-1.deb"
1922
chroot "${SDCARD}" /bin/bash -c "apt-mark hold wiringpi" >> "${DEST}"/${LOG_SUBPATH}/install.log 2>&1
1923
1924
if [[ ${IGNORE_UPDATES} != yes ]]; then
1925
1926
local url=$(get_orangepi_url)
1927
fetch_from_repo "${url}/wiringOP.git" "${EXTER}/cache/sources/wiringOP" "branch:next" "yes"
1928
fetch_from_repo "${url}/wiringOP-Python.git" "${EXTER}/cache/sources/wiringOP-Python" "branch:next" "yes"
1929
1930
fi
1931
1932
cp ${EXTER}/cache/sources/wiringOP/next ${SDCARD}/usr/src/wiringOP -rfa
1933
cp ${EXTER}/cache/sources/wiringOP-Python/next ${SDCARD}/usr/src/wiringOP-Python -rfa
1934
1935
rm $SDCARD/root/*.deb >/dev/null 2>&1
1936
}
1937
1938
1939
install_310b-npu-driver()
1940
{
1941
local driver_path="$EXTER/cache/sources/ascend-driver"
1942
local driver_name="Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"
1943
local driver=${driver_path}/${driver_name}
1944
1945
if [[ -f "${driver}" ]]; then
1946
display_alert "Installing" "$driver_name" "info"
1947
cp "${driver}" "${SDCARD}/opt/"
1948
chmod +x "${SDCARD}/opt/Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"
1949
chroot "${SDCARD}" /bin/bash -c "/opt/${driver_name} --chroot --full --install-username=orangepi --install-usergroup=orangepi --install-for-all"
1950
fi
1951
}
1952
1953
1954
install_docker() {
1955
1956
[[ $install_docker != yes ]] && return
1957
1958
display_alert "Installing" "docker" "info"
1959
chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq apt-transport-https ca-certificates curl gnupg2 software-properties-common >/dev/null 2>&1"
1960
1961
case ${RELEASE} in
1962
buster|bullseye|bookworm)
1963
distributor_id="debian"
1964
;;
1965
xenial|bionic|focal|jammy|noble)
1966
distributor_id="ubuntu"
1967
;;
1968
esac
1969
1970
#if [[ ${SELECTED_CONFIGURATION} == desktop ]]; then
1971
# mirror_url=https://repo.huaweicloud.com
1972
#else
1973
mirror_url=https://mirrors.aliyun.com
1974
#fi
1975
1976
chroot "${SDCARD}" /bin/bash -c "curl -fsSL ${mirror_url}/docker-ce/linux/${distributor_id}/gpg | apt-key add -"
1977
echo "deb [arch=${ARCH}] ${mirror_url}/docker-ce/linux/${distributor_id} ${RELEASE} stable" > "${SDCARD}"/etc/apt/sources.list.d/docker.list
1978
1979
chroot "${SDCARD}" /bin/bash -c "apt-get update"
1980
chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq docker-ce docker-ce-cli containerd.io"
1981
chroot "${SDCARD}" /bin/bash -c "sudo groupadd docker"
1982
1983
run_on_sdcard "systemctl --no-reload disable docker.service"
1984
}
1985
1986
1987
#function run_after_build()
1988
#{
1989
# chown -R $(logname).$(logname) $BOOTSOURCEDIR
1990
# chown -R $(logname).$(logname) $LINUXSOURCEDIR
1991
# chown -R $(logname).$(logname) $USERPATCHES_PATH
1992
# chown -R $(logname).$(logname) $DEST/{config,debs,debug,images,patch}
1993
#
1994
# if [[ $DEBUG_DEB == yes && $BUILD_OPT =~ u-boot|kernel ]]; then
1995
#
1996
# [[ -z $REMOTEIP ]] && exit_with_error "The remote IP address has not been set" ""
1997
# [[ -z $PASS_ROOT ]] && PASS_ROOT="orangepi"
1998
# [[ -z $MMC_DEV ]] && MMC_DEV="tfcard"
1999
#
2000
# #ssh-keygen -f "~/.ssh/known_hosts" -R ${REMOTEIP}
2001
# local num=0
2002
# while true;
2003
# do
2004
# ping ${REMOTEIP} -c 1 > /dev/null 2>&1
2005
#
2006
# if [[ $? == 0 ]]; then
2007
# echo " "
2008
# break
2009
# fi
2010
#
2011
# if [[ $num == 0 ]]; then
2012
# display_alert "${BOARD} network cannot be connected" "${REMOTEIP}" "wrn"
2013
# ((num++))
2014
# fi
2015
#
2016
# echo -e ".\c"
2017
# done
2018
# display_alert "${BOARD} network is connected" "${REMOTEIP}" "info"
2019
#
2020
# if [[ $BUILD_OPT == u-boot ]]; then
2021
# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/u-boot/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root
2022
# display_alert "Uninstall deb package" "linux-u-boot-${BOARD}-${BRANCH}" "info"
2023
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-u-boot-${BOARD}-${BRANCH}"
2024
# display_alert "Install deb package" "${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb" "info"
2025
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb"
2026
#
2027
# if [[ $MMC_DEV == emmc ]]; then
2028
# display_alert "Burn the U-Boot into EMMC" "" "info"
2029
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=8 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot0_sdcard.fex of=/dev/mmcblk0"
2030
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=16400 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot_package.fex of=/dev/mmcblk0"
2031
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
2032
# else
2033
# display_alert "Burn the U-Boot into TF card" "" "info"
2034
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "nand-sata-install DEBUG_UBOOT"
2035
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
2036
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"
2037
# fi
2038
# fi
2039
#
2040
# if [[ $BUILD_OPT == kernel ]]; then
2041
# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root
2042
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-image-${BRANCH}-${LINUXFAMILY}"
2043
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb"
2044
# if [[ $BRANCH == current && $BOARD =~ orangepizero2|orangepi400 ]]; then
2045
# sshpass -p ${PASS_ROOT} scp ${LINUXSOURCEDIR}/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-*.dtb root@${REMOTEIP}:/boot/dtb/allwinner/
2046
# fi
2047
#
2048
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
2049
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"
2050
# fi
2051
# fi
2052
#
2053
# if [[ $DEBUG_DEB == yes && $BUILD_OPT == image ]]; then
2054
# scp ${destimg}/*.img ${PC_NAME}@${PC_IP}:${PC_DIR}
2055
# fi
2056
#}
2057
2058