summaryrefslogtreecommitdiff
path: root/sh/upd
diff options
context:
space:
mode:
Diffstat (limited to 'sh/upd')
-rwxr-xr-xsh/upd284
1 files changed, 180 insertions, 104 deletions
diff --git a/sh/upd b/sh/upd
index d8defd0..133ba1c 100755
--- a/sh/upd
+++ b/sh/upd
@@ -12,39 +12,45 @@ all () {
if [ "$hostname" = xyzib ]; then
qb
fi
+ # After linux upgrade, if not reboot, plug in external HDD will not be
+ # detected, not sure about USD thumb drive tho. monthly_misc() needs to
+ # plug in USD thumb drive, so monthly_misc() needs to run before pac()
+ # which is in fast().
+ monthly_misc
fast
clean
- # don't run reflector if it is pp or aa or {it is studio and insp is in the same network}
- # somehow ping local hostname via ipv6 is very slow, so I use ipv4 to ping
- if ! { [ "$hostname" = xyzpp ] || [ "$hostname" = xyzaa ] || { [ "$hostname" = xyzstudio ] && ping -4qc1 xyzinsp > /dev/null;};}; then
+ # don't run reflector if it is pp or insp, because they all VPN to ba now
+ if ! { [ "$hostname" = xyzpp ] || [ "$hostname" = xyzinsp ];}; then
refl
fi
- monthly_misc
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
userjs
fi
}
backup () {
- case "$hostname" in
- xyzinsp) backup_branch=master;;
- *) backup_branch=${hostname#xyz};;
- esac
+ #case "$hostname" in
+ # xyzinsp) backup_branch=master;;
+ # *) backup_branch=${hostname#xyz};;
+ #esac
if [ "$hostname" = xyzinsp ]; then
- # Pull repos and branches from studio, so one more backup on insp or duplicity will backup to ib,for one more backup for 321 backup rule
- # cfgl master and pp branches already satisfy 321 rule. Here are three copies: 1. master/pp, 2. studio, 3. gitlab or alternative
- # But I still copy those on insp because one more copy will not hurt. And all my other data still satisfy 3 copies without backup to gitlab, so why left out cfgl? Also it may be nice if I what to use that local dir for searching things.
+ # Pull repos and branches to insp, so one more backup on insp or duplicity will backup to ib
+ # Also sync the local dir to work on it
cd "$HOME/programs/config_local_arch" || exit
git branch | awk '{print ($1=="*")?$2:$1}' | while read -r branch; do
git checkout "$branch"
git pull
- # Push any branches other than master/studio/pp to codeberg or any future alternative website for mirroring. Needed for public codes/data, considering my death may result in all copies on my computers being lost.
+ # Push any branches other than master/pp to codeberg or any future alternative website for mirroring. Needed for public codes/data, considering my death may result in all copies on my computers being lost.
case "$branch" in
- master|studio|pp) ;;
+ master|pp) ;;
*) git push;;
esac
done
+ # always go back to a specific branch, because future new branshes may
+ # make previous git checkout checkout to that new branch cause rsync
+ # have to sync that new branch instead
+ git checkout master
# git checkout will change mtime, need to change back so rsync won't think it needs to backup these files
# https://stackoverflow.com/q/1964470/9008720
# https://stackoverflow.com/q/21735435/9008720
@@ -54,37 +60,81 @@ backup () {
(
umask 077
- # backup studio and other branches' cfgs, note: it backup a branch by clone/checkout to that branch first
+ # need to pull VPS cfgs repos (ca, ib, ba, etc.) to /root/programs
+ # for insp to duplicity backup to ca and ib to satisfy 321 backup
+ # rule
for dir in $(sudo find /root/programs -maxdepth 1 -mindepth 1 -type d -name 'config_local_arch_secrets*'); do
sudo -E git -C "$dir" pull
done
)
- # rsync backup from studio to insp
- rsync -avPR --delete studio:/home/xyz/.config/qBittorrent :/home/xyz/.local/share/qBittorrent/BT_backup "$HOME/backup/studio"
-
- fi
-
- if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzstudio ]; then
- # rsync backup from ib
+ # rsync backup from ib to insp ~/backup/ib
+ # Note: don't use things like `:.config/qBittorrent` because it will sync
+ # to /home/xyz/backup/ib/.config, which is not what I want. I want
+ # /home/xyz/backup/ib/home/xyz/.config
rsync -avPR --delete xyz@ib.flylightning.xyz:/home/xyz/.config/qBittorrent :/home/xyz/.local/share/qBittorrent/BT_backup "$HOME/backup/ib"
- # In the future, maybe other VPS servers also need bakcup to insp and studio.
- fi
- if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
- # rsync backup to studio
+ # rsync backup from pp to insp ~/backup/pp
+ rsync -vPu pp:.config/myconf/upd_rsync_files "$XDG_CONFIG_HOME/myconf/upd_rsync_files_pp"
# --files-from make -a not imply -r, so need to specify explicitly
- rsync -avPRr --delete --files-from="$XDG_CONFIG_HOME/myconf/upd_rsync_files" / "studio:/home/xyz/backup/$backup_branch"
- fi
+ rsync -avPRr --delete --files-from="$XDG_CONFIG_HOME/myconf/upd_rsync_files_pp" pp:/ "$HOME/backup/pp"
+
+ # backup ib to pp, not much storage needed and I don't want to use duplicity to backup to ca yet again
+ rsync -avP --delete "$HOME/backup/ib" pp:backup
- if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzstudio ] || [ "$hostname" = xyzpp ]; then
# duplicity backup to ib
# https://wiki.archlinux.org/title/Duplicity
# Need // for absolute path, see manpage URL FORMAT section. If not use //, will store to /home/xyz/home/xyz/...
# --files-from has a bug, this bug makes source url that is / not working while /home works, more see vq notes
- # --use-agent not working when ssh to pp and insp, works on insp, not sure why
# --use-agent maybe timeout on gpg key and failed when do full backup, maybe due to key stored in gpg agent timeout, so I'm not using --use-agent on insp now
- sudo duplicity --ssh-askpass --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ib.flylightning.xyz//home/xyz/backup/$backup_branch"
+ #
+ # maybe due to upgrade duplicity from 3.0.5.1-1 to 3.0.6-1 at
+ # 2025-11-05T16:51:50+0800, without --use-agent will error gpg log "gpg:
+ # public key decryption failed: No passphrase given" and "gpg:
+ # decryption failed: No passphrase given", not sure why, maybe it is a
+ # bug. I decided to workaround it with --use-agent for now
+ sudo duplicity --ssh-askpass --use-agent --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include /root/archive --include /root/programs --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ib.flylightning.xyz//home/xyz/backup/master"
+
+ # duplicity backup to ca
+ sudo duplicity --ssh-askpass --use-agent --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include /root/archive --include /root/programs --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ca.flylightning.xyz//home/xyz/backup/master"
+ fi
+
+ if [ "$hostname" = xyzpp ]; then
+ # duplicity backup to ib
+ #
+ # --use-agent not working when ssh to pp and insp, works on insp, not
+ # sure why. The gpg error logs are "gpg: public key decryption failed:
+ # Inappropriate ioctl for device" and "gpg: decryption failed:
+ # Inappropriate ioctl for device". It seems related to I did not put
+ # `export GPG_TTY=$(tty)` in pp xyz user's .profile, putting that in
+ # .profile is suggested in `man gpg-agent`. After putting that, test
+ # with `echo | gpg -s >/dev/null` works. but `echo | sudo gpg -s >
+ # /dev/null` does not work even if I put `export GPG_TTY=$(tty)` in
+ # root user's .profile file. `sudo -i` then `echo | gpg -s >/dev/null`
+ # works for root if put `export GPG_TTY=$(tty)` in root .profile. With
+ # sudo, it needs `sudo --preserve-env=GPG_TTY`, test with `echo | sudo
+ # --preserve-env=GPG_TTY gpg -s >/dev/null` works even if I do not put
+ # `export GPG_TTY=$(tty)` in root .profile. using `sudo
+ # --preserve-env=GPG_TTY` with `duplicity --use-agent` for pp also
+ # works. However, using `sudo --preserve-env=GPG_TTY` may be not secure
+ # because I noticed that root user's `tty` output is different, but my
+ # root user and xyz user's gpg keys are the same, so whatever for now.
+ # There maybe a better way instead.
+ #
+ # Note that input password to `sudo --preserve-env=GPG_TTY duplicity
+ # --use-agent` with my mod-a dwm hotkey, which uses sth. like `pass
+ # master | ... | xdotool ...`, will have issues of timeout or it shows
+ # I input wrong password, I need to typing password manually instead,
+ # not sure why, maybe due to xdotool input too fast. It can be
+ # workarounded by add gpg key to agent before running duplicity,
+ # somehow doing this with xdotool does not have issues. This works
+ # because my root and xyz user use the same gpg key.
+ echo | sudo --preserve-env=GPG_TTY gpg -s >/dev/null
+ sudo --preserve-env=GPG_TTY duplicity --ssh-askpass --use-agent --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ib.flylightning.xyz//home/xyz/backup/pp"
+ # pp no need duplicity backup to ca, because 321 backup rule is already
+ # satisfied: pp files in ~/.config/myconf/upd_rsync_files are backed up
+ # in pp, insp, and ib duplicity; pp /etc/.cfgs is backed up in pp, insp
+ # gitolite, and ib duplicity.
fi
}
@@ -118,15 +168,23 @@ clean () {
# https://wiki.archlinux.org/title/Pacman/Tips_and_tricks#Removing_unused_packages_(orphans)
pacman -Qdttq | sudo pacman -Rns --noconfirm -
+ # https://wiki.archlinux.org/title/Pacman/Tips_and_tricks#Detecting_more_unneeded_packages
+ # remove special orphan pkgs: dependency cycle, excessive denpendencies etc.
+ pacman -Qqd | sudo pacman -Rsu --noconfirm -
}
# basic daily stuff
fast () {
- pac
- misc
- if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzstudio ] || [ "$hostname" = xyzpp ]; then
+ # After linux upgrade, if not reboot, plug in external HDD will not be
+ # detected. backup() needs to plugin external HDD, so backup() needs to run
+ # before pac(). Now, I do not use an external HDD for frequent bakcup any
+ # more, but I still would like to put it in this order to consider in the
+ # future I maybe use one again.
+ if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
backup
fi
+ pac
+ misc
}
userjs () {
@@ -139,6 +197,8 @@ userjs () {
}
misc () {
+ nv=
+
"$EDITOR" +PlugClean! +PlugUpdate +qa
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
@@ -146,6 +206,16 @@ misc () {
fi
if [ "$hostname" = xyzinsp ]; then
+ # nvchecker pkgctl updated pkgbuilds. Need test, commit, push, and aurpublish.
+ # Test shows pkgctl version upgrade need 2>&1, else awk not work, not
+ # sure why, maybe related to pkgctl's pretty outputs.
+ # Test shows awk need $3 instead of $2, not sure why.
+ nv="$(pkgctl version upgrade "$HOME/programs/repos/aur/stardict-wikt-en-all" \
+ "$HOME/programs/repos/aur/stardict-wikt-en-en" 2>&1 \
+ | tee /dev/tty \
+ | awk -F'[: ]' '/upgraded/{printf(" %s",$3)}')"
+ log="${log}nvchecker:$nv
+"
sudo hardcode-fixer
ssh pp '[ -s "$HOME/.local/share/sdcv_history" ] && cat "$HOME/.local/share/sdcv_history" && rm "$HOME/.local/share/sdcv_history"' >> "$XDG_DATA_HOME/sdcv_history"
awk '!a[$0]++' "$XDG_DATA_HOME/sdcv_history" | sponge "$XDG_DATA_HOME/sdcv_history"
@@ -157,11 +227,18 @@ misc () {
git -C "$HOME/programs/reminders" push
pass git push
argospm update
+ rsync -vPu "$HOME/.abook/addressbook" pp:.abook/addressbook
+ rsync -avP --delete "$XDG_DOCUMENTS_DIR/ids" pp:documents
+ rsync -vPu "$HOME/programs/roff/myresume/resume_chinese.odt" \
+ "$HOME/programs/roff/myresume/resume_chinese.pdf" \
+ pp:programs/roff/myresume
+ rsync -avP --delete "$XDG_DOCUMENTS_DIR/books/novels" pp:documents/books
fi
if [ "$hostname" = xyzpp ]; then
git -C "$XDG_DOCUMENTS_DIR/notes" pull
git -C "$HOME/programs/reminders" pull
+ git -C "$HOME/programs/roff/myresume" pull
pass git pull
fi
}
@@ -171,8 +248,9 @@ pac () {
pacpacs="$(echo "$pacout" | grep -m1 '^Packages' | cut -d' ' -f3-)"
# Update rust toolchains before paru so paru can compile things in newest rust if needed.
[ "$hostname" = xyzinsp ] && rustup update
- aurout="$(paru --color never --noconfirm -aSu | tee /dev/tty)"
+ aurout="$(paru --color never -aSu | tee /dev/tty)"
aurpacs="$(echo "$aurout" | grep '^Aur' | cut -d' ' -f3-)"
+ [ "$hostname" = xyzpp ] && sudo flatpak update -y
# /usr/share/libalpm/hooks/rebuild-detector.hook has a line `NeedsTargets` shows it maybe checkrebuild only upgraded packages by `printf 'zoom\nminiconda3' | checkrebuild` instead of maybe check all by `checkrebuild`, so I think query pacman hook will be faster than run another `checkrebuild`
# notes about awk f=1 things see https://git.flylightning.xyz/public_archive_codes/tree/sh/mrt
# about `/^(\(|:|=)/ {f=0}`:
@@ -185,7 +263,7 @@ pac () {
/^\([0-9]+\/[0-9]+\) Checking which packages need to be rebuilt$/ {f=1; next}
/^(\(|:)|==>/ {f=0}
f {
- if($2!~"zoom|miniconda3")
+ if($2!~"miniconda3")
if(!a[$2]++)
printf("%s ",$2)
}')"
@@ -207,54 +285,13 @@ pac () {
systemctl list-unit-files --state=enabled > "$XDG_CONFIG_HOME/myconf/sye"
systemctl --user list-unit-files --state=enabled > "$XDG_CONFIG_HOME/myconf/syue"
- if echo "$aurpacs" | grep -q 'zoom-[0-9]'; then
- new_schemes="$(grep -m1 '^X-KDE-Protocols=' /usr/share/applications/Zoom.desktop)"
- old_schemes="$(grep -m1 '^X-KDE-Protocols=' "$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop")"
-
- if [ "$new_schemes" != "$old_schemes" ]; then
- zoom_firejail_log="zoom-firejail updated MIME type, test then aurpublish"
- # aur/zoom-firejail author snowball did not include
- # application/x-zoom in MimeType, not sure why, I decided to follow
- # what he did anyway
- new_mime="$(awk -F';' '
- /^MimeType=/ {
- for(i=1;i<=NF;i++)
- if($i != "application/x-zoom" && $i != "")
- printf("%s;",$i)
- exit
- }' /usr/share/applications/Zoom.desktop)"
- schemes="$(echo "$new_schemes" | awk -F'[=;]' '
- {
- for(i=2;i<=NF;i++)
- if($i != "")
- printf(" %s",$i)
- }
- ')"
- old_pkgrel="$(awk -F= '/^pkgrel=/{printf("%d",$2);exit}' "$HOME/programs/repos/aur/zoom-firejail/PKGBUILD")"
-
- sed -i -e "s/^X-KDE-Protocols=.*/$new_schemes/" \
- -e "s#^MimeType=.*#$new_mime#" \
- "$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop"
- # must checksum after write new desktop entry because PKGBUILD need new checksum
- desktop_sum="$(sha512sum "$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop" | awk '{printf("%s",$1)}')"
- sed -i -e "s/^\( for scheme in\).*\(; do\)$/\1${schemes}\2/" \
- -e "s/^\( if \[ \$(vercmp \$2 [0-9]\+\.[0-9]\+\.[0-9]\+-\)[0-9]\+\() -le 0 \]; then\)$/\1${old_pkgrel}\2/" \
- "$HOME/programs/repos/aur/zoom-firejail/zoom-firejail.install"
- sed -i -e "s/^pkgrel=.*/pkgrel=$((old_pkgrel+1))/" \
- -e "s/^sha512sums=.*/sha512sums=('$desktop_sum'/" \
- "$HOME/programs/repos/aur/zoom-firejail/PKGBUILD"
- fi
- fi
-
# pacdiff default use pacman database, so no need `sudo -E` for find, but will be a little bit slower
# [^-] consider util-linux; \(^\|[^-]\) consider linux is the first package, ex: pacout is only 'linux-6.6.6'
- log="$log
-updated pacman packages: $pacpacs
+ log="${log}updated pacman packages: $pacpacs
updated aur packages: $aurpacs
pacdiff: $(pacdiff -o | tr '\n' ' ')
checkrebuild: $checkrebuild_pacs
$(if echo "$pacpacs" | grep -q '\(^\|[^-]\)linux-\(megi-\)\?[0-9]'; then echo 'kernel upgraded, need reboot'; fi)
-$zoom_firejail_log
"
}
@@ -274,50 +311,81 @@ refl () {
# need --delay so no super out of sync mirrors
case "$hostname" in
xyzib) sudo reflector --verbose --save /etc/pacman.d/mirrorlist --country ro --protocol https --delay 1 --fastest 3;;
- # aa now default wireguard to ca, so I choose to copy ca mirrorlist to aa instead
- # old cn mirrorlists code, only 2 cn mirror servers satisfied `--delay 1`, https://mirrors.jlu.edu.cn/archlinux/$repo/os/$arch is better than https://mirrors.neusoft.edu.cn/archlinux/$repo/os/$arch , the latter had issues
- #xyzaa) sudo reflector --verbose --save /etc/pacman.d/mirrorlist --country cn --protocol https --delay 1 --fastest 2;;
*) sudo reflector --verbose --save /etc/pacman.d/mirrorlist --country us --protocol https --delay 1 --latest 25 --score 25 --fastest 10;;
esac
}
music () {
yt-dlp -f 'bestaudio[ext=opus]/bestaudio' --match-filter 'license=cc-by' --match-filter 'title~=(?i)cc-by' -P "$XDG_MUSIC_DIR/cc-by/scott_buckley" https://soundcloud.com/scottbuckley/tracks
- rsync -avP --delete "$XDG_MUSIC_DIR/cc-by/scott_buckley" pp:/home/xyz/music/cc-by
- rsync -avP --delete "$XDG_MUSIC_DIR/favorite" pp:/home/xyz/music
+ rsync -avP --delete "$XDG_MUSIC_DIR/cc-by/scott_buckley" pp:music/cc-by
+ rsync -avP --delete "$XDG_MUSIC_DIR/gpl-3.0-only" pp:music
+ rsync -avP --delete "$XDG_MUSIC_DIR/favorite" pp:music
}
monthly_misc () {
if [ "$hostname" = xyzinsp ]; then
- if scp ib:/etc/pacman.d/mirrorlist aawg:/tmp; then
- echo 'Wait for aa sudo prompt and enter password:'
- ssh -t aa 'sudo mv /tmp/mirrorlist /etc/pacman.d/mirrorlist && sudo chown root:root /etc/pacman.d/mirrorlist'
- fi
- # if insp can ping studio, which means insp and studio is in the same network
- if ping -4qc1 xyzstudio > /dev/null && scp /etc/pacman.d/mirrorlist studio:/tmp; then
- echo 'Wait for studio sudo prompt and enter password:'
- ssh -t studio 'sudo mv /tmp/mirrorlist /etc/pacman.d/mirrorlist && sudo chown root:root /etc/pacman.d/mirrorlist'
- fi
+ sudo scp xyz@ba.flylightning.xyz:/etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist
+
+ # Monthly backup gpg key, ssh key, ~/.password-store, and notes for
+ # backup diaster recovery in case both my insp laptop and pp phone are
+ # lost. I prefer monthly backup those instead of backup those more
+ # frequently, because plug in and unplug drives more frequently is
+ # tedious and things backed up in bd2 does not need to be very recent.
+ while ! [ -d /run/media/xyz/bd2 ]; do
+ alarm 0 'Plug in bd2 usb flash drive'
+ echo 'Plug in bd2 usb flash drive'
+ sleep 10
+ done
+ git -C /run/media/xyz/bd2/backup/notes pull
+ git -C /run/media/xyz/bd2/backup/.password-store pull
+ # consider password change for the keys and I forget to backup the keys
+ # encrypted with new password
+ #
+ # these two files are not directory so I think --delete is unnecessary
+ rsync -avP "$XDG_DOCUMENTS_DIR/paperwork/sec_pgp_key.asc" "$HOME/.ssh/id_rsa" /run/media/xyz/bd2/backup
+ # bd2 is the label name of the unlocked ext4 fs, it was configured with
+ # sth. like `sudo e2label /dev/mapper/luks-... bd2`. ebd2 is the label
+ # name of the locked crypto_LUKS fs, it was configured with `sudo
+ # cryptsetup config /dev/sdx --label ebd2`.
+ #
+ # Another way to represent disk is using uuid. ebd2 locked crypto_LUKS
+ # fs uuid is 0c2fceb2-9743-4872-99f7-8d87d53c9270, so I can use
+ # /dev/disk/by-uuid/0c2fceb2-9743-4872-99f7-8d87d53c9270 to represent
+ # it. bd2 unlocked ext4 fs uuid is
+ # f7da70df-94a1-478d-98a2-86af8be54c56, so I can use
+ # /dev/disk/by-uuid/f7da70df-94a1-478d-98a2-86af8be54c56 for it, I can
+ # also use /dev/mapper/luks-0c2fceb2-9743-4872-99f7-8d87d53c9270 for
+ # bd2, note here 0c2fceb2-9743-4872-99f7-8d87d53c9270 is the ebd2
+ # locked crypto_LUKS fs uuid. However, udisksctl does not recognize
+ # /dev/mapper/luks-..., but recognize /dev/disk/...; cryptosetup close
+ # and umount recognize both. More ways see dirs in /dev/disk, and see
+ # output of press tab after `udiskiectl info -b`.
+ #
+ # Note `udisksctl lock` need ebd2 locked crypto_LUKS fs, but `sudo
+ # cryptsetup close` need bd2 unlocked ext4 fs.
+ umount /run/media/xyz/bd2
+ udisksctl lock -b /dev/disk/by-label/ebd2
+ udisksctl power-off -b /dev/disk/by-label/ebd2
+ alarm 0 'Unplug bd2 usb flash drive'
# https://github.com/qbittorrent/qBittorrent/wiki#webui-api
curl -sS -X POST 'ibwg:57151/api/v2/search/updatePlugins' &
- for qbt_wireguard_host in stwg ibwg; do
- # another way: `jq -r '.[]|"\(.added_on)\t\(.hash)\t\(.name)"'`
- curl -sS "$qbt_wireguard_host:57151/api/v2/torrents/info?category=useful" | jq -r '.[]|[.added_on,.hash,.name]|@tsv' | grep 'archlinux.*\.iso' | sort -n | head -n-1 | awk '{print $2}' | while read -r hash; do
- # need POST to delete torrents. -d default POST, so no need `-X POST`
- curl -sS -d "hashes=$hash" -d deleteFiles=true "$qbt_wireguard_host:57151/api/v2/torrents/delete"
- done &
- done
+ # in the past, I have both stwg and ibwg, now only one
+ # another way: `jq -r '.[]|"\(.added_on)\t\(.hash)\t\(.name)"'`
+ curl -sS 'ibwg:57151/api/v2/torrents/info?category=useful' | jq -r '.[]|[.added_on,.hash,.name]|@tsv' | grep 'archlinux.*\.iso' | sort -n | head -n-1 | awk '{print $2}' | while read -r hash; do
+ # need POST to delete torrents. -d default POST, so no need `-X POST`
+ curl -sS -d "hashes=$hash" -d deleteFiles=true 'ibwg:57151/api/v2/torrents/delete'
+ done &
wait
while ! [ -d /run/media/xyz/Ventoy ]; do
- alarm 0 'Plug in usb flash drive'
- echo 'Plug in usb flash drive'
+ alarm 0 'Plug in ventoy usb flash drive'
+ echo 'Plug in ventoy usb flash drive'
sleep 10
done
#[ -d /run/media/xyz/Ventoy ] || die "No usb flash drive"
- rsync -vPu studio:~/downloads/torrents/useful/archlinux*.iso /run/media/xyz/Ventoy/archlinux-x86_64.iso
+ rsync -vPu ib:downloads/torrents/useful/archlinux*.iso /run/media/xyz/Ventoy/archlinux-x86_64.iso
# only check checksum and gpg signature on insp is sufficient for me, else too much work
if curl -sS -o /run/media/xyz/Ventoy/archlinux-x86_64.iso.sig https://mirror.fcix.net/archlinux/iso/latest/archlinux-x86_64.iso.sig; then
gpg --verify /run/media/xyz/Ventoy/archlinux-x86_64.iso.sig || die 'Arch iso gpg signature check failed'
@@ -331,11 +399,19 @@ monthly_misc () {
# need to be after `wait`, because checksum need to be at ventoy dir
cd || exit
- disk="$(df /run/media/xyz/Ventoy/ | awk 'END{sub(/[[:digit:]]+$/,"",$1);print $1}')"
+ # busybox df if filesystem name too long, it will put the filesystem
+ # name in a separate line which nornally is in the second line and
+ # there's total three lines. So I use df -P option which seems busybox
+ # will also not put filesystem name in a separate line and behave same
+ # as gnu df.
+ disk="$(df -P /run/media/xyz/Ventoy/ | awk 'END{sub(/[[:digit:]]+$/,"",$1);print $1}')"
+ # another way:
+ #disk="$(realpath /dev/disk/by-label/Ventoy | awk 'END{sub(/[[:digit:]]+$/,"",$0);print}')"
sudo ventoy -l "$disk" | awk '/Ventoy:/{a=$2} /Ventoy Version in Disk:/{b=$NF;exit} END{exit((a==b)?1:0)}' && echo y | sudo ventoy -u "$disk"
umount /run/media/xyz/Ventoy /run/media/xyz/FAT32DIR
+ udisksctl power-off -b /dev/disk/by-label/Ventoy
- alarm 0 'Unplug usb flash drive'
+ alarm 0 'Unplug ventoy usb flash drive'
fi
}
@@ -361,7 +437,7 @@ else
esac
done
fi
-[ "$log" ] && printf '%s' "$log" | tee "$XDG_DOCUMENTS_DIR/logs/upd.log"
+[ "$log" ] && printf '\n%s' "$log" | tee "$XDG_DOCUMENTS_DIR/logs/upd.log"
[ "$hostname" = xyzpp ] && gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-type suspend
# `[ "$hostname" = xyzpp ] && ...` if check failed will have exit status of 1, unlike check with `if`
# I decided to always `exit 0` if reached end, so commands like `upd -p && ...` can keep running