dash-contrib-docset-feeds/update.sh

63 lines
2.8 KiB
Bash
Raw Normal View History

2016-10-18 13:40:01 +06:00
#!/bin/bash
INBUILT_FEED='https://github.com/Kapeli/feeds'
CONTRIB_FEED='http://sanfrancisco.kapeli.com/feeds/zzz/user_contributed/build/index.json'
# Update the INBUILT_FEED from upstream
if [ -d "$(basename "$INBUILT_FEED")" ]; then
2018-11-14 23:27:44 +06:00
cd "$(basename "$INBUILT_FEED")" && git checkout master && git pull && cd ..
2016-10-18 13:40:01 +06:00
else
2018-11-14 23:27:44 +06:00
git clone "$INBUILT_FEED"
2016-10-18 13:40:01 +06:00
fi
# Update the CONTRIB_FEED from upstream
wget -qO - "$CONTRIB_FEED" | \
sed -n -e '/^ \{4\}"/p' \
-e '/^ "archive" :.*tgz"/p' \
-e '/^ "version" :/p' | \
awk -F '"' 'NR%3==1 { nm = $2 ; next }
NR%3==2 { ar = $4 ; ; next }
NR%3==0 { vr = $4 ;
of = nm ".xml"
print "<entry>" > of
print "<version>" vr "</version>" >> of
print "<url>http://frankfurt.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://london.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://newyork.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://sanfrancisco.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://singapore.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://tokyo.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "<url>http://sydney.kapeli.com/feeds/zzz/user_contributed/build/" nm "/" ar "</url>" >> of
print "</entry>" >> of
2018-11-14 23:27:44 +06:00
close(of)
2016-10-18 13:40:01 +06:00
ar = ""; vr = ""; nm = ""; next ;
}'
# Remove duplicate files and keep only the more recent versions
2018-11-14 23:27:44 +06:00
DUPLICATED_FILES=( $(find . -type f -name "*.xml" -printf "%f\n" | sort | uniq -d) )
for file in "${DUPLICATED_FILES[@]}"; do
rm "$file"
done
2016-10-18 13:40:01 +06:00
# This is bound to have some errors
# Detect erroneous files
# Get all files that have malformed URLs
2018-11-14 23:27:44 +06:00
MALFORMED_FILES=( $(grep -L "http://.*\.tgz" ./*.xml) )
2016-10-18 13:40:01 +06:00
# Fix MALFORMED_FILES using some regex magic (need to make this better and not look stupid)
2018-11-14 23:27:44 +06:00
for file in "${MALFORMED_FILES[@]}"; do
vim "$file" -u ./.vimrc +'call FixFileUrl()' +wq
2016-10-18 13:40:01 +06:00
done
# Extract URLs from all files and creat a wget input file
WGET_URLS='/tmp/docsets_url'
grep "http://london\..*\.tgz" ./**/*.xml -o --no-filename > "$WGET_URLS"
# Download the archives and extract them to proper docsets directory
2017-04-06 06:48:09 +06:00
#cd "${1='/tmp/'}" && \
2018-11-14 23:27:44 +06:00
# wget --continue -i "$WGET_URLS"
2016-10-18 13:40:01 +06:00
#&& \
2018-11-14 23:27:44 +06:00
# tar xzf ./*.tgz -C "$HOME/.local/share/Zeal/Zeal/docsets/"