1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
|
#!/bin/sh
die () {
echo "error: $*" >&2
exit 1
}
all () {
# pp maybe finish upd -a first and start to suspend, and it cause insp music() failed to rsync to pp, so I need to finish rsync to ssh before pp finish upd, so I put music() here, not perfect but good enough
[ "$hostname" = xyzinsp ] && music
# monthly_misc() needs to use qbittorrent-nox APIs, so I need to restart qbt before that, so I put qb() at top here, not perfect but good enough
if [ "$hostname" = xyzib ]; then
qb
fi
fast
clean
# don't run reflector if it is pp or insp, because they all VPN to ba now
if ! { [ "$hostname" = xyzpp ] || [ "$hostname" = xyzinsp ];}; then
refl
fi
monthly_misc
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
userjs
fi
}
backup () {
#case "$hostname" in
# xyzinsp) backup_branch=master;;
# *) backup_branch=${hostname#xyz};;
#esac
if [ "$hostname" = xyzinsp ]; then
# Pull repos and branches to insp, so one more backup on insp or duplicity will backup to ib
# Also sync the local dir to work on it
cd "$HOME/programs/config_local_arch" || exit
git branch | awk '{print ($1=="*")?$2:$1}' | while read -r branch; do
git checkout "$branch"
git pull
# Push any branches other than master/pp to codeberg or any future alternative website for mirroring. Needed for public codes/data, considering my death may result in all copies on my computers being lost.
case "$branch" in
master|pp) ;;
*) git push;;
esac
done
# always go back to a specific branch, because future new branshes may
# make previous git checkout checkout to that new branch cause rsync
# have to sync that new branch instead
git checkout master
# git checkout will change mtime, need to change back so rsync won't think it needs to backup these files
# https://stackoverflow.com/q/1964470/9008720
# https://stackoverflow.com/q/21735435/9008720
# https://github.com/MestreLion/git-tools
# aur/git-tools-git
git restore-mtime
(
umask 077
# need to pull VPS cfgs repos (ca, ib, ba, etc.) to /root/programs for insp to rsync to external HDD to satisfy 321 backup rule
for dir in $(sudo find /root/programs -maxdepth 1 -mindepth 1 -type d -name 'config_local_arch_secrets*'); do
sudo -E git -C "$dir" pull
done
)
# rsync backup from ib to insp ~/backup/ib
rsync -avPR --delete xyz@ib.flylightning.xyz:/home/xyz/.config/qBittorrent :/home/xyz/.local/share/qBittorrent/BT_backup "$HOME/backup/ib"
# rsync backup from pp to insp ~/backup/pp
rsync -vPu pp:.config/myconf/upd_rsync_files "$XDG_CONFIG_HOME/myconf/upd_rsync_files_pp"
# --files-from make -a not imply -r, so need to specify explicitly
rsync -avPRr --delete --files-from="$XDG_CONFIG_HOME/myconf/upd_rsync_files_pp" pp:/ "$HOME/backup/pp"
# rsync backup to externel drive
while ! [ -d /run/media/xyz/bd0 ]; do
alarm 0 'Plug in external HDD'
echo 'Plug in external HDD'
sleep 10
done
# --files-from make -a not imply -r, so need to specify explicitly
# TODO: first time mkdir, chown, chmod proper dirs in /run/media/xyz/bd0, see vq notes
# backup insp
rsync -avPRr --delete --files-from="$XDG_CONFIG_HOME/myconf/upd_rsync_files" / "/run/media/xyz/bd0/master"
# need to backup VPS cfgs repos (ca, ib, ba, etc.) in /root/programs to external HDD to satisfy 321 backup rule
# need to backup insp cfgs repo to external HDD to satisfy 321 backup rule
sudo rsync -avPR --delete /etc/.cfgs /root/archive /root/programs "/run/media/xyz/bd0/master_root"
# backup ib
rsync -avP --delete "$HOME/backup/ib/" "/run/media/xyz/bd0/ib"
# backup pp
rsync -avP --delete "$HOME/backup/pp/" "/run/media/xyz/bd0/pp"
# bd0 is the label name of the unlocked ext4 fs, maybe was configured
# with sth. like `sudo e2label /dev/mapper/luks-... bd0`. ebd0 is the
# label name of the locked crypto_LUKS fs, maybe was configured with
# `sudo cryptsetup config /dev/sdx --label ebd0`.
#
# Another way to represent disk is using uuid. ebd0 locked crypto_LUKS
# fs uuid is cf91dfaa-35dc-4ce4-b333-df15966f34a2, so I can use
# /dev/disk/by-uuid/cf91dfaa-35dc-4ce4-b333-df15966f34a2 to represent
# it. bd0 unlocked ext4 fs uuid is
# 93670fb4-af1d-43aa-a8cc-b7ac5c05e766, so I can use
# /dev/disk/by-uuid/93670fb4-af1d-43aa-a8cc-b7ac5c05e766 for it, I can
# also use /dev/mapper/luks-cf91dfaa-35dc-4ce4-b333-df15966f34a2 for
# bd0, note here cf91dfaa-35dc-4ce4-b333-df15966f34a2 is the ebd0
# locked crypto_LUKS fs uuid. However, udisksctl does not recognize
# /dev/mapper/luks-..., but recognize /dev/disk/...; cryptosetup close
# and umount recognize both. More ways see dirs in /dev/disk, and see
# output of press tab after `udiskiectl info -b`.
#
# Note `udisksctl lock` need ebd0 locked crypto_LUKS fs, but `sudo
# cryptsetup close` need bd0 unlocked ext4 fs.
umount /run/media/xyz/bd0
udisksctl lock -b /dev/disk/by-label/ebd0
udisksctl power-off -b /dev/disk/by-label/ebd0
alarm 0 'Unplug external HDD'
# duplicity backup to ib
# note no need to duplicity backup /root/programs to ib for now, because /root/programs now only has cfgs repos which already satisfied 321 backup rule, e.g, insp cfgs is backed up with: insp, duplicty backuped with /etc/.cfgs, rsync backup to external HDD; ib cfgs is backed up with: ib, insp gitolite, insp rsync backup /root/programs to external HDD
# https://wiki.archlinux.org/title/Duplicity
# Need // for absolute path, see manpage URL FORMAT section. If not use //, will store to /home/xyz/home/xyz/...
# --files-from has a bug, this bug makes source url that is / not working while /home works, more see vq notes
# --use-agent not working when ssh to pp and insp, works on insp, not sure why
# --use-agent maybe timeout on gpg key and failed when do full backup, maybe due to key stored in gpg agent timeout, so I'm not using --use-agent on insp now
sudo duplicity --ssh-askpass --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include /root/archive --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ib.flylightning.xyz//home/xyz/backup/master"
fi
if [ "$hostname" = xyzpp ]; then
# duplicity backup to ib
sudo duplicity --ssh-askpass --encrypt-key 9790577D2BE328D46838117ED3F54FE03F3C68D6 --sign-key 05899270DF25BB1EEDF57BE824F769E5D08C9E9A --full-if-older-than 2Y --include /etc/.cfgs --include-filelist "/home/xyz/.config/myconf/upd_rsync_files" --exclude / / "sftp://xyz@ib.flylightning.xyz//home/xyz/backup/pp"
fi
}
clean () {
if [ "$hostname" = xyzinsp ]; then
nsxiv -c
# my ways
# -exec can't replace -execdir here
find "$XDG_CACHE_HOME/nsxiv/" -depth -type d -empty -execdir rmdir -- '{}' \+
# -exec can replace -execdir here
#find "$XDG_CACHE_HOME/nsxiv/" -depth -type d -execdir rmdir --ignore-fail-on-non-empty -- '{}' \+
# nsxiv man page way
#find "$XDG_CACHE_HOME/nsxiv/" -depth -type d -empty ! -name '.' -exec rmdir -- '{}' \;
fi
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
cd "$HOME/.mozilla/firefox/xxxxxxxx.fly/prefsjs_backups" || exit
# https://stackoverflow.com/a/34862475/9008720
ls -t | tail -n +11 | tr '\n' '\0' | xargs -0 rm --
cd "$HOME/.mozilla/firefox/xxxxxxxx.fly/userjs_backups" || exit
ls -t | tail -n +11 | tr '\n' '\0' | xargs -0 rm --
# https://unix.stackexchange.com/questions/92095/reset-atq-list-to-zero
sudo systemctl stop atd
echo 0 | sudo tee /var/spool/atd/.SEQ > /dev/null
sudo systemctl start atd
rm -rf "$XDG_VIDEOS_DIR/recordings/tmp/"
fi
paru -aSc --noconfirm
# https://wiki.archlinux.org/title/Pacman/Tips_and_tricks#Removing_unused_packages_(orphans)
pacman -Qdttq | sudo pacman -Rns --noconfirm -
}
# basic daily stuff
fast () {
pac
misc
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
backup
fi
}
userjs () {
kill $(pidof "$BROWSER")
# change working dir for cleaner
cd "$HOME/.mozilla/firefox/xxxxxxxx.fly" || exit
arkenfox-cleaner -s
# when multiple firefox profiles, it will prompt me to choose, which breaks automation, so I explicitly specify one profile
arkenfox-updater -s -p "$HOME/.mozilla/firefox/xxxxxxxx.fly"
}
misc () {
"$EDITOR" +PlugClean! +PlugUpdate +qa
if [ "$hostname" = xyzinsp ] || [ "$hostname" = xyzpp ]; then
tldr --update
fi
if [ "$hostname" = xyzinsp ]; then
sudo hardcode-fixer
ssh pp '[ -s "$HOME/.local/share/sdcv_history" ] && cat "$HOME/.local/share/sdcv_history" && rm "$HOME/.local/share/sdcv_history"' >> "$XDG_DATA_HOME/sdcv_history"
awk '!a[$0]++' "$XDG_DATA_HOME/sdcv_history" | sponge "$XDG_DATA_HOME/sdcv_history"
# temperory solution before find a way of using git submodule or subtree with `cfg -l`
git -C "$HOME/.mozilla/firefox/xxxxxxxx.fly/chrome/firefox-csshacks" pull
git -C "$XDG_DOCUMENTS_DIR/notes" commit --all -m 'update'
git -C "$XDG_DOCUMENTS_DIR/notes" push
git -C "$HOME/programs/reminders" commit --all -m 'update'
git -C "$HOME/programs/reminders" push
pass git push
argospm update
fi
if [ "$hostname" = xyzpp ]; then
git -C "$XDG_DOCUMENTS_DIR/notes" pull
git -C "$HOME/programs/reminders" pull
pass git pull
fi
}
pac () {
pacout="$(sudo pacman --noconfirm -Syu | tee /dev/tty)"
pacpacs="$(echo "$pacout" | grep -m1 '^Packages' | cut -d' ' -f3-)"
# Update rust toolchains before paru so paru can compile things in newest rust if needed.
[ "$hostname" = xyzinsp ] && rustup update
aurout="$(paru --color never --noconfirm -aSu | tee /dev/tty)"
aurpacs="$(echo "$aurout" | grep '^Aur' | cut -d' ' -f3-)"
# /usr/share/libalpm/hooks/rebuild-detector.hook has a line `NeedsTargets` shows it maybe checkrebuild only upgraded packages by `printf 'zoom\nminiconda3' | checkrebuild` instead of maybe check all by `checkrebuild`, so I think query pacman hook will be faster than run another `checkrebuild`
# notes about awk f=1 things see https://git.flylightning.xyz/public_archive_codes/tree/sh/mrt
# about `/^(\(|:|=)/ {f=0}`:
# - consider $aurout start with `^:: Looking for devel upgrades...` , rebuild-detector.hook maybe the last hook to run for $pacout
# - consider ^(4/5), the hook is not the last
# - consider paru `==> Making package: ...`, the hook maybe followed by this. Note: paru somehow still gives color output even if I use --color never, so I can't check with ^=, so I choose to check with ==>
# awk use `if(!a[$2]++)` to check if package name is repeated in multiple checkrebuild pacman hook run, happened when upgrade python cause all python packages need to be rebuilt
# TODO: Some packages maybe are rebuilt later on when paru upgrade packages, but those will still got shown in upd log. Try consider this situation. e.g., when pacman upgrade packages, checkrebuild hook output a b c d packages, then paru upgrade d, now checkrebuild hook output a b c, the final upd log will have a b c d all packages instead of a b c
checkrebuild_pacs="$(echo "$pacout$aurout" | awk '
/^\([0-9]+\/[0-9]+\) Checking which packages need to be rebuilt$/ {f=1; next}
/^(\(|:)|==>/ {f=0}
f {
if($2!~"zoom|miniconda3")
if(!a[$2]++)
printf("%s ",$2)
}')"
# part steal from aur comment
# sometimes "ERROR: Failure while downloading": https://github.com/neovim/neovim/issues/15709
# echo 1, printf 1 and yes 1 all works? not sure why
# aur neovim-nightly-bin has some issue on 12/26/2021? switch to community repo neovim temporary
#rm -rf ~/.cache/paru/clone/neovim-nightly-bin/ && echo 1 | PARU_PAGER=cat paru --rebuild --redownload neovim-nightly-bin
if [ "$hostname" = xyzinsp ]; then
case "$pacpacs" in
*qt5-base*) echo 1 | PARU_PAGER=cat paru --rebuild qt5-styleplugins;;
esac
case "$pacpacs" in
*qt6-base*) echo 1 | PARU_PAGER=cat paru --rebuild qt6gtk2;;
esac
fi
pacman -Qqme > "$XDG_CONFIG_HOME/myconf/pacman_Qqme"
pacman -Qqne > "$XDG_CONFIG_HOME/myconf/pacman_Qqne"
systemctl list-unit-files --state=enabled > "$XDG_CONFIG_HOME/myconf/sye"
systemctl --user list-unit-files --state=enabled > "$XDG_CONFIG_HOME/myconf/syue"
if echo "$aurpacs" | grep -q 'zoom-[0-9]'; then
new_schemes="$(grep -m1 '^X-KDE-Protocols=' /usr/share/applications/Zoom.desktop)"
old_schemes="$(grep -m1 '^X-KDE-Protocols=' "$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop")"
if [ "$new_schemes" != "$old_schemes" ]; then
zoom_firejail_log="zoom-firejail updated MIME type, test then aurpublish"
# aur/zoom-firejail author snowball did not include
# application/x-zoom in MimeType, not sure why, I decided to follow
# what he did anyway
new_mime="$(awk -F';' '
/^MimeType=/ {
for(i=1;i<=NF;i++)
if($i != "application/x-zoom" && $i != "")
printf("%s;",$i)
exit
}' /usr/share/applications/Zoom.desktop)"
schemes="$(echo "$new_schemes" | awk -F'[=;]' '
{
for(i=2;i<=NF;i++)
if($i != "")
printf(" %s",$i)
}
')"
old_pkgrel="$(awk -F= '/^pkgrel=/{printf("%d",$2);exit}' "$HOME/programs/repos/aur/zoom-firejail/PKGBUILD")"
sed -i -e "s/^X-KDE-Protocols=.*/$new_schemes/" \
-e "s#^MimeType=.*#$new_mime#" \
"$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop"
# must checksum after write new desktop entry because PKGBUILD need new checksum
desktop_sum="$(sha512sum "$HOME/programs/repos/aur/zoom-firejail/ZoomFirejail.desktop" | awk '{printf("%s",$1)}')"
sed -i -e "s/^\( for scheme in\).*\(; do\)$/\1${schemes}\2/" \
-e "s/^\( if \[ \$(vercmp \$2 [0-9]\+\.[0-9]\+\.[0-9]\+-\)[0-9]\+\() -le 0 \]; then\)$/\1${old_pkgrel}\2/" \
"$HOME/programs/repos/aur/zoom-firejail/zoom-firejail.install"
sed -i -e "s/^pkgrel=.*/pkgrel=$((old_pkgrel+1))/" \
-e "s/^sha512sums=.*/sha512sums=('$desktop_sum'/" \
"$HOME/programs/repos/aur/zoom-firejail/PKGBUILD"
fi
fi
# pacdiff default use pacman database, so no need `sudo -E` for find, but will be a little bit slower
# [^-] consider util-linux; \(^\|[^-]\) consider linux is the first package, ex: pacout is only 'linux-6.6.6'
log="$log
updated pacman packages: $pacpacs
updated aur packages: $aurpacs
pacdiff: $(pacdiff -o | tr '\n' ' ')
checkrebuild: $checkrebuild_pacs
$(if echo "$pacpacs" | grep -q '\(^\|[^-]\)linux-\(megi-\)\?[0-9]'; then echo 'kernel upgraded, need reboot'; fi)
$zoom_firejail_log
"
}
qb () {
sudo systemctl stop qbittorrent-nox@xyz.service
find "$XDG_DATA_HOME/qBittorrent/nova3/engines" -maxdepth 1 -type f ! -name 'jackett*' -a ! -name '__init__.py' -delete
curlqb "$XDG_DATA_HOME/qBittorrent/nova3/engines"
sudo systemctl start qbittorrent-nox@xyz.service
}
refl () {
# why not use http:
# https://www.reddit.com/r/archlinux/comments/kx149z/should_i_use_http_mirrors/
# https://www.reddit.com/r/archlinux/comments/ej4k4d/is_it_safe_to_use_not_secured_http_pacman_mirrors/
# rsync may need to change XferCommand in /etc/pacman.conf
# https://www.reddit.com/r/archlinux/comments/mynw6e/rsync_mirrors_with_pacman/
# need --delay so no super out of sync mirrors
case "$hostname" in
xyzib) sudo reflector --verbose --save /etc/pacman.d/mirrorlist --country ro --protocol https --delay 1 --fastest 3;;
*) sudo reflector --verbose --save /etc/pacman.d/mirrorlist --country us --protocol https --delay 1 --latest 25 --score 25 --fastest 10;;
esac
}
music () {
yt-dlp -f 'bestaudio[ext=opus]/bestaudio' --match-filter 'license=cc-by' --match-filter 'title~=(?i)cc-by' -P "$XDG_MUSIC_DIR/cc-by/scott_buckley" https://soundcloud.com/scottbuckley/tracks
rsync -avP --delete "$XDG_MUSIC_DIR/cc-by/scott_buckley" pp:/home/xyz/music/cc-by
rsync -avP --delete "$XDG_MUSIC_DIR/favorite" pp:/home/xyz/music
}
monthly_misc () {
if [ "$hostname" = xyzinsp ]; then
sudo scp xyz@ba.flylightning.xyz:/etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist
# https://github.com/qbittorrent/qBittorrent/wiki#webui-api
curl -sS -X POST 'ibwg:57151/api/v2/search/updatePlugins' &
# in the past, I have both stwg and ibwg, now only one
# another way: `jq -r '.[]|"\(.added_on)\t\(.hash)\t\(.name)"'`
curl -sS 'ibwg:57151/api/v2/torrents/info?category=useful' | jq -r '.[]|[.added_on,.hash,.name]|@tsv' | grep 'archlinux.*\.iso' | sort -n | head -n-1 | awk '{print $2}' | while read -r hash; do
# need POST to delete torrents. -d default POST, so no need `-X POST`
curl -sS -d "hashes=$hash" -d deleteFiles=true 'ibwg:57151/api/v2/torrents/delete'
done &
wait
while ! [ -d /run/media/xyz/Ventoy ]; do
alarm 0 'Plug in usb flash drive'
echo 'Plug in usb flash drive'
sleep 10
done
#[ -d /run/media/xyz/Ventoy ] || die "No usb flash drive"
rsync -vPu ib:~/downloads/torrents/useful/archlinux*.iso /run/media/xyz/Ventoy/archlinux-x86_64.iso
# only check checksum and gpg signature on insp is sufficient for me, else too much work
if curl -sS -o /run/media/xyz/Ventoy/archlinux-x86_64.iso.sig https://mirror.fcix.net/archlinux/iso/latest/archlinux-x86_64.iso.sig; then
gpg --verify /run/media/xyz/Ventoy/archlinux-x86_64.iso.sig || die 'Arch iso gpg signature check failed'
else
die 'Arch iso gpg signature download failed'
fi
# need to cd to iso file dir to checksum
cd /run/media/xyz/Ventoy || exit
curl -sS https://mirror.fcix.net/archlinux/iso/latest/sha256sums.txt | grep archlinux-x86_64\.iso | tee /run/media/xyz/Ventoy/archlinux-x86_64.iso.sha256 | sha256sum -c || die 'Arch iso checksum does not match'
# if stay at /run/media/xyz/Ventoy, will cause it be busy and can't be umount, which will cause `ventoy -u` fail
# need to be after `wait`, because checksum need to be at ventoy dir
cd || exit
# busybox df if filesystem name too long, it will put the filesystem
# name in a separate line which nornally is in the second line and
# there's total three lines. So I use df -P option which seems busybox
# will also not put filesystem name in a separate line and behave same
# as gnu df.
disk="$(df -P /run/media/xyz/Ventoy/ | awk 'END{sub(/[[:digit:]]+$/,"",$1);print $1}')"
# another way:
#disk="$(realpath /dev/disk/by-label/Ventoy | awk 'END{sub(/[[:digit:]]+$/,"",$0);print}')"
sudo ventoy -l "$disk" | awk '/Ventoy:/{a=$2} /Ventoy Version in Disk:/{b=$NF;exit} END{exit((a==b)?1:0)}' && echo y | sudo ventoy -u "$disk"
umount /run/media/xyz/Ventoy /run/media/xyz/FAT32DIR
udisksctl power-off -b /dev/disk/by-label/Ventoy
alarm 0 'Unplug usb flash drive'
fi
}
hostname="$(hostname)"
[ "$hostname" = xyzpp ] && gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-type nothing
if [ $# -eq 0 ]; then
fast
else
while getopts abcfjmMopqr opt; do
case $opt in
a)all;;
b)backup;;
c)clean;;
f)fast;;
j)userjs;;
m)misc;;
M)music;;
o)monthly_misc;;
p)pac;;
q)qb;;
r)refl;;
\?)exit 1;;
esac
done
fi
[ "$log" ] && printf '%s' "$log" | tee "$XDG_DOCUMENTS_DIR/logs/upd.log"
[ "$hostname" = xyzpp ] && gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-type suspend
# `[ "$hostname" = xyzpp ] && ...` if check failed will have exit status of 1, unlike check with `if`
# I decided to always `exit 0` if reached end, so commands like `upd -p && ...` can keep running
exit 0
|