mirror of
https://github.com/foo-dogsquared/dotfiles.git
synced 2025-01-30 22:57:54 +00:00
Update custom scripts
This commit is contained in:
parent
888b452c9a
commit
1cf031e170
54258
bangs/config.json
Normal file
54258
bangs/config.json
Normal file
File diff suppressed because it is too large
Load Diff
93
bin/bangs
Executable file
93
bin/bangs
Executable file
@ -0,0 +1,93 @@
|
||||
#!/usr/bin/env nix-shell
|
||||
#! nix-shell -i oil -p coreutils ripgrep handlr gnused xxd
|
||||
|
||||
# A ripoff from Duckduckgo bangs.
|
||||
|
||||
# Examples:
|
||||
# ```
|
||||
# bangs hello there !g !aw
|
||||
# ```
|
||||
# will open a search result page on Google and Arch Wiki
|
||||
|
||||
# These are the default bangs available.
|
||||
# Bangs are alphanumeric keys that shouldn't have whitespace characters.
|
||||
const config_dir = "${XDG_CONFIG_HOME:-"$HOME/.config"}/bangs"
|
||||
const config_file = "${config_dir}/config.json"
|
||||
const default_config = {
|
||||
'aw': {
|
||||
'name': 'Arch Wiki',
|
||||
'url': 'https://wiki.archlinux.org/index.php?title=Special%3ASearch&search={{{s}}}'
|
||||
},
|
||||
'gh': {
|
||||
'name': 'GitHub',
|
||||
'url': 'https://github.com/search?utf8=%E2%9C%93&q={{{s}}}'
|
||||
},
|
||||
'g': {
|
||||
'name': 'Google',
|
||||
'url': 'https://www.google.com/search?q={{{s}}}'
|
||||
},
|
||||
'so': {
|
||||
'name': 'Stack Overflow',
|
||||
'url': 'http://stackoverflow.com/search?q={{{s}}}'
|
||||
},
|
||||
'w': {
|
||||
'name': 'Wikipedia',
|
||||
'url': 'https://en.wikipedia.org/wiki/Special:Search?search={{{s}}}'
|
||||
}
|
||||
}
|
||||
|
||||
const bangs_prefix = "${BANGS_PREFIX:-~}"
|
||||
const bangs_placeholder = "${BANGS_PLACEHOLDER:-{{{s}}}}"
|
||||
const bangs_format = / %start $bangs_prefix word+ %end /
|
||||
const valid_bangs = %()
|
||||
const search_query = %()
|
||||
if test -f $config_file {
|
||||
json read :bangs < $config_file
|
||||
} else {
|
||||
var bangs = default_config
|
||||
}
|
||||
|
||||
# Stolen from https://gist.github.com/cdown/1163649 and https://gist.github.com/cdown/1163649#gistcomment-1256298
|
||||
proc urlencode(msg) {
|
||||
for (i in 0:len(msg)) {
|
||||
var char = msg[i - 1]
|
||||
|
||||
case $char {
|
||||
[a-zA-Z0-9.~_-])
|
||||
printf '%s' $char
|
||||
;;
|
||||
*)
|
||||
printf '%s' $char | xxd -plain -cols 1 | while read :hex { printf '%%%s' $hex }
|
||||
;;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proc warnf(format, @msg) {
|
||||
>&2 printf "$format\\n" @msg
|
||||
}
|
||||
|
||||
for i in @ARGV {
|
||||
write -- $i | rg --quiet $bangs_format || {
|
||||
push :search_query $i
|
||||
continue
|
||||
}
|
||||
|
||||
var bang = $(write -- $i | sed --regexp-extended --expression "s/^${bangs_prefix}//")
|
||||
if (bang in bangs) {
|
||||
push :valid_bangs $bang
|
||||
warnf "%s will be used to search." $bang
|
||||
} else {
|
||||
warnf "%s is not found in the database." $bang
|
||||
}
|
||||
}
|
||||
|
||||
var query = $(printf "%s " @search_query)
|
||||
warnf "Search query is '%s'" $query
|
||||
|
||||
for bang in @valid_bangs {
|
||||
var metadata = bangs[bang]
|
||||
var url = $(write -- ${metadata['url']} | sed --expression "s/${bangs_placeholder}/$(urlencode $query)/")
|
||||
|
||||
handlr open $url
|
||||
}
|
@ -39,38 +39,38 @@ var skip = false
|
||||
var json_data = false
|
||||
var move = false
|
||||
|
||||
while test $# -gt 0 {
|
||||
case $1 {
|
||||
while test $len(ARGV) -gt 0 {
|
||||
case $[ARGV[0]] {
|
||||
-h|--help)
|
||||
echo $help_section
|
||||
exit 0
|
||||
;;
|
||||
-a|--author)
|
||||
set author = $2
|
||||
setvar author = ARGV[1]
|
||||
shift 2
|
||||
;;
|
||||
-t|--title)
|
||||
set title = $2
|
||||
setvar title = ARGV[1]
|
||||
shift 2
|
||||
;;
|
||||
-d|--date)
|
||||
set pub_date = $2
|
||||
setvar pub_date = ARGV[1]
|
||||
shift 2
|
||||
;;
|
||||
--skip)
|
||||
set skip = true
|
||||
setvar skip = true
|
||||
shift
|
||||
;;
|
||||
--json)
|
||||
set json_data = true
|
||||
setvar json_data = true
|
||||
shift
|
||||
;;
|
||||
--move)
|
||||
set move = true
|
||||
setvar move = true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
set path = $1
|
||||
setvar path = ARGV[0]
|
||||
shift
|
||||
;;
|
||||
}
|
||||
@ -78,13 +78,13 @@ while test $# -gt 0 {
|
||||
|
||||
proc kebab-case(word) {
|
||||
# Convert into lower case.
|
||||
set word = $(echo $word | tr '[:upper:]' '[:lower:]')
|
||||
setvar word = $(echo $word | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# What happens to this line:
|
||||
# * Convert all whitespace and dashes into a single dash.
|
||||
# * Remove all invalid characters (all that are not alphanumeric characters and dashes).
|
||||
# * Remove leading and trailing dashes.
|
||||
set word = $(echo $word | sed --regexp-extended --expression 's/\s+|-+/-/g' --expression 's/[^.a-z0-9-]//g' --expression 's/^-+|-+\$//g')
|
||||
setvar word = $(echo $word | sed --regexp-extended --expression 's/\s+|-+/-/g' --expression 's/[^.a-z0-9-]//g' --expression 's/^-+|-+\$//g')
|
||||
|
||||
echo $word
|
||||
}
|
||||
@ -98,10 +98,10 @@ proc prompt(string, :out, prefix = ">> ") {
|
||||
proc file_parse(path, :out) {
|
||||
var extension_regex = / '.' ![ '.' ]+ %end /
|
||||
var file = {}
|
||||
set file['dir'] = $(dirname $path)
|
||||
set file['name'] = $(basename $path | sed --regexp-extended "s|(.+)${extension_regex}|\\1|")
|
||||
set file['ext'] = $(basename $path | sed --regexp-extended "s|.+(${extension_regex})|\\1|")
|
||||
set file['mime_type'] = $(file --mime-type --brief $path)
|
||||
setvar file['dir'] = $(dirname $path)
|
||||
setvar file['name'] = $(basename $path | sed --regexp-extended "s|(.+)${extension_regex}|\\1|")
|
||||
setvar file['ext'] = $(basename $path | sed --regexp-extended "s|.+(${extension_regex})|\\1|")
|
||||
setvar file['mime_type'] = $(file --mime-type --brief $path)
|
||||
|
||||
setref out = file
|
||||
}
|
||||
@ -135,10 +135,10 @@ file_parse $path :file_info
|
||||
|
||||
if (json_data) {
|
||||
var metadata = {}
|
||||
set metadata['file'] = file_info
|
||||
set metadata['title'] = $title
|
||||
set metadata['author'] = $author
|
||||
set metadata['date'] = $pub_date
|
||||
setvar metadata['file'] = file_info
|
||||
setvar metadata['title'] = $title
|
||||
setvar metadata['author'] = $author
|
||||
setvar metadata['date'] = $pub_date
|
||||
json write :metadata
|
||||
}
|
||||
|
||||
|
23
bin/generate-bangs
Executable file
23
bin/generate-bangs
Executable file
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env nix-shell
|
||||
#! nix-shell -i oil -p coreutils curl jq
|
||||
|
||||
# Generate the config with Duckduckgo bangs.
|
||||
# Very useful if you really want just to search as you would in Duckduckgo.
|
||||
|
||||
const bang_url = "${BANGS_URL:-https://duckduckgo.com/bang.js}"
|
||||
const config = {}
|
||||
|
||||
# We still have to use some external tools like jq since Oil doesn't support nested objects yet.
|
||||
# For more information, see https://github.com/oilshell/oil/issues/741
|
||||
curl --silent --location $bang_url | jq 'sort_by(.t) | .[]' --raw-output --compact-output --sort-keys | while read --line {
|
||||
write -- $_line | json read :bang
|
||||
var _data = {}
|
||||
var trigger = bang['t']
|
||||
|
||||
setvar _data['name'] = bang['s']
|
||||
setvar _data['url'] = bang['u']
|
||||
setvar config[trigger] = _data
|
||||
}
|
||||
|
||||
json write :config
|
||||
|
@ -1,24 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
#!/usr/bin/env nix-shell
|
||||
#! nix-shell --pure -i bash -p curl jq fzf coreutils findutils iputils
|
||||
|
||||
# A quick command line interface for creating a gitignore with the API from https://gitignore.io.
|
||||
# This script comes with a simple caching to avoid creating too much requests.
|
||||
|
||||
# Dependencies:
|
||||
# * bash
|
||||
# * curl
|
||||
# * fzf
|
||||
# * jq
|
||||
# * paste
|
||||
# * xargs
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
CACHE_FILE="${XDG_CACHE_DIR:-$HOME/.cache}/gitignore-io.langs.json"
|
||||
|
||||
# Check if the language list is downloaded for the last hour (3600 seconds).
|
||||
if [ ! -e $CACHE_FILE ] || test $(expr $(date "+%s") - $(date -r $CACHE_FILE "+%s")) -gt 3600
|
||||
if [[ ! -e $CACHE_FILE ]] || test $(expr $(date "+%s") - $(date -r $CACHE_FILE "+%s")) -gt 3600
|
||||
then
|
||||
ping "gitignore.io" --count 4 && curl --silent --location --output $CACHE_FILE "https://gitignore.io/api/list?format=json"
|
||||
ping -q -c 4 "gitignore.io" && curl --silent --location "https://gitignore.io/api/list?format=json" --output $CACHE_FILE
|
||||
fi
|
||||
|
||||
KEYS=$(jq 'keys | .[] | @text' --raw-output $CACHE_FILE | fzf --multi | while read lang; do echo " .[\"$lang\"].contents"; done | paste -s -d ',')
|
||||
|
@ -10,8 +10,8 @@ split-album [options...] [\$ALBUM_FILE \$TIMESTAMP_FILE]
|
||||
Options:
|
||||
-h, --help Show the help section.
|
||||
--tutorial Show a helpful tutorial-esque description of the program.
|
||||
-af, --audio-file [file] Set the audio file to be split.
|
||||
-tf, --timestamp-file [file] Set the timestamp file to be used for splitting.
|
||||
--audio-file [file] Set the audio file to be split.
|
||||
--timestamp-file [file] Set the timestamp file to be used for splitting.
|
||||
-t, --title [title] Set the title of the album.
|
||||
-d, --date [date] Set the publication date of the album.
|
||||
-a, --author [author] Set the author of the album.
|
||||
|
Loading…
Reference in New Issue
Block a user