summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorTheSiahxyz <164138827+TheSiahxyz@users.noreply.github.com>2025-06-30 21:14:58 +0900
committerTheSiahxyz <164138827+TheSiahxyz@users.noreply.github.com>2025-06-30 21:14:58 +0900
commitd8661bdfa20db5f682bc7aa9dd212c59e3073ef4 (patch)
tree4f45c06c75a39e93fa8376ba3e96644818aaa01c
parent34dde4c6819f59c2bac300cf98fffb2349d09aba (diff)
updates
-rw-r--r--ar/.config/newsboat/urls62
-rw-r--r--ar/.config/qutebrowser/config.py113
-rwxr-xr-xar/.config/qutebrowser/userscripts/code_select64
-rwxr-xr-xar/.config/qutebrowser/userscripts/gitclone74
-rwxr-xr-xar/.config/qutebrowser/userscripts/qute-pass6
-rwxr-xr-xar/.config/qutebrowser/userscripts/tab-manager529
-rw-r--r--ar/.config/zsh/scripts.zsh6
-rwxr-xr-xar/.local/bin/rssadd2
-rwxr-xr-xar/.local/bin/rssget149
9 files changed, 890 insertions, 115 deletions
diff --git a/ar/.config/newsboat/urls b/ar/.config/newsboat/urls
index a142b08..656947f 100644
--- a/ar/.config/newsboat/urls
+++ b/ar/.config/newsboat/urls
@@ -7,45 +7,47 @@
"query:No Content:content = \"\"" "Query"
" "
"--- TheSiahxyz ---"
-https://github.com/TheSiahxyz/.dotfiles/commits/master.atom "~TheSiahxyz dotfiles" Git
-https://github.com/TheSiahxyz/suckless/commits/master.atom "~TheSiahxyz suckless" Git
+https://github.com/TheSiahxyz/.dotfiles/commits/master.atom "~TheSiahxyz dotfiles" Git
+https://github.com/TheSiahxyz/suckless/commits/master.atom "~TheSiahxyz suckless" Git
" "
"--- News ---"
-http://rss.cnn.com/rss/edition_us.rss "~US" News
-http://rss.cnn.com/rss/edition_world.rss "~World" News
-https://feeds.bbci.co.uk/news/technology/rss.xml "~AI" AI News
-https://www.koreaherald.com/rss/kh_Business "~Business (KOR)" Business News
-https://www.archlinux.org/feeds/news/ "~Arch" Distro Linux Tech
-https://artixlinux.org/feed.php "~Artix" Distro Linux Tech
+http://rss.cnn.com/rss/edition_us.rss "~US" News
+http://rss.cnn.com/rss/edition_world.rss "~World" News
+https://feeds.bbci.co.uk/news/technology/rss.xml "~AI" AI News
+https://www.koreaherald.com/rss/kh_Business "~Business (KOR)" Business News
+https://www.archlinux.org/feeds/news/ "~Arch" Distro Linux Tech
+https://artixlinux.org/feed.php "~Artix" Distro Linux Tech
" "
"--- Blog ---"
-https://lukesmith.xyz/rss.xml "~Luke Smith" Blog
+https://lukesmith.xyz/rss.xml "~Luke Smith" Blog
" "
"--- Git ---"
-https://github.com/LukeSmithxyz/voidrice/commits/master.atom "~Luke Smith dotfiles" Git
-https://github.com/LukeSmithxyz/mutt-wizard/commits/master.atom "~Luke Smith mutt-wizard" Git
-https://github.com/LukeSmithxyz/emailwiz/commits/master.atom "~Luke Smith email-wizard" Git
-https://github.com/Piotr1215/dotfiles/commits/master.atom "~Piotr1215 dotfiles" Git
-https://github.com/linkarzu/dotfiles-latest/commits/main.atom "~Linkarzu dotfiles" Git
-https://github.com/Gavinok/scripts/commits/master.atom "~Gavin scripts" Git
-https://gitlab.com/rwxrob/dotfiles.atom "~Rob Muhlestein dotfiles" Git
-https://github.com/sunaku/glove80-keymaps/commits/main.atom "~Glove80 keymaps"
+https://github.com/LukeSmithxyz/voidrice/commits/master.atom "~Luke Smith dotfiles" Git
+https://github.com/LukeSmithxyz/mutt-wizard/commits/master.atom "~Luke Smith mutt-wizard" Git
+https://github.com/LukeSmithxyz/emailwiz/commits/master.atom "~Luke Smith email-wizard" Git
+https://github.com/BreadOnPenguins/dots/commits.atom "~Bread on Penguins dotfiles" Git
+https://github.com/Piotr1215/dotfiles/commits/master.atom "~Piotr1215 dotfiles" Git
+https://github.com/linkarzu/dotfiles-latest/commits/main.atom "~Linkarzu dotfiles" Git
+https://github.com/Gavinok/scripts/commits/master.atom "~Gavin scripts" Git
+https://gitlab.com/rwxrob/dotfiles.atom "~Rob Muhlestein dotfiles" Git
+https://github.com/sunaku/glove80-keymaps/commits/main.atom "~Glove80 keymaps" Git
" "
"--- Suckless ---"
-https://git.suckless.org/dmenu/atom.xml "~Dmenu" Suckless
-https://git.suckless.org/dwm/atom.xml "~Dwm" Suckless
-https://git.suckless.org/slock/atom.xml "~Slock" Suckless
-https://git.suckless.org/st/atom.xml "~St" Suckless
-https://git.suckless.org/surf/atom.xml "~Surf" Suckless
-https://git.suckless.org/tabbed/atom.xml "~Tabbed" Suckless
+https://git.suckless.org/dmenu/atom.xml "~Dmenu" Suckless
+https://git.suckless.org/dwm/atom.xml "~Dwm" Suckless
+https://git.suckless.org/slock/atom.xml "~Slock" Suckless
+https://git.suckless.org/st/atom.xml "~St" Suckless
+https://git.suckless.org/surf/atom.xml "~Surf" Suckless
+https://git.suckless.org/tabbed/atom.xml "~Tabbed" Suckless
" "
"--- Odysee ---"
-https://odysee.com/$/rss/@Odysee:8 "~Odysee"
-https://odysee.com/$/rss/@Luke:7 "~Luke Smith"
-https://odysee.com/$/rss/@AlphaNerd:8 "~Mental Outlaw"
-https://odysee.com/$/rss/@DistroTube:2 "~DistroTube"
+https://odysee.com/$/rss/@Odysee:8 "~Odysee" Odysee
+https://odysee.com/$/rss/@Luke:7 "~Luke Smith" Odysee
+https://odysee.com/$/rss/@AlphaNerd:8 "~Mental Outlaw" Odysee
+https://odysee.com/$/rss/@DistroTube:2 "~DistroTube" Odysee
" "
"--- Youtube ---"
-https://www.youtube.com/feeds/videos.xml?channel_id=UCevUmOfLTUX9MNGJQKsPdIA "~Neetcode" Algorithm Python Tech Youtube
-https://www.youtube.com/feeds/videos.xml?channel_id=UCkWVN7H3JqGtJ5Pv5bvCrAw "~Piotr1215" Neovim Shell Tech Youtube
-https://www.youtube.com/feeds/videos.xml?channel_id=UCrSIvbFncPSlK6AdwE2QboA "~Linkarzu" Mac Neovim Tech Youtube
+https://www.youtube.com/feeds/videos.xml?channel_id=UCwHwDuNd9lCdA7chyyquDXw "~Linux" Linux Shell Tech Youtube
+https://www.youtube.com/feeds/videos.xml?channel_id=UCevUmOfLTUX9MNGJQKsPdIA "~Neetcode" Algorithm Python Tech Youtube
+https://www.youtube.com/feeds/videos.xml?channel_id=UCkWVN7H3JqGtJ5Pv5bvCrAw "~Piotr1215" Neovim Shell Tech Youtube
+https://www.youtube.com/feeds/videos.xml?channel_id=UCrSIvbFncPSlK6AdwE2QboA "~Linkarzu" Mac Neovim Tech Youtube
diff --git a/ar/.config/qutebrowser/config.py b/ar/.config/qutebrowser/config.py
index ae4dccd..8ace1af 100644
--- a/ar/.config/qutebrowser/config.py
+++ b/ar/.config/qutebrowser/config.py
@@ -10,6 +10,8 @@
## qute://help/configuring.html
## qute://help/settings.html
+import os
+
# pylint: disable=C0111
c = c # noqa: F821 pylint: disable=E0602,C0103
config = config # noqa: F821 pylint: disable=E0602,C0103
@@ -2325,7 +2327,7 @@ config.bind("<Ctrl-C>", "cmd-set-text :")
# config.bind('[[', 'navigate prev')
# config.bind(']]', 'navigate next')
# config.bind('`', 'mode-enter set_mark')
-# config.bind('ad', 'download-cancel')
+config.bind("xd", "download-cancel")
# config.bind('b', 'cmd-set-text -s :quickmark-load')
# config.bind('cd', 'download-clear')
# config.bind('co', 'tab-only')
@@ -2423,16 +2425,16 @@ config.bind("<Ctrl-C>", "cmd-set-text :")
# config.bind('yp', 'yank pretty-url')
# config.bind('yt', 'yank title')
# config.bind('yy', 'yank')
-config.bind("YD", "hint yank domain -s")
-config.bind("YM", "hint yank inline [{title}]({url:yank}) -s")
-config.bind("YP", "hint yank pretty-url -s")
-config.bind("YT", "hint yank title -s")
-config.bind("YY", "hint yank -s")
-config.bind("Yd", "hint yank domain")
-config.bind("Ym", "hint yank inline [{title}]({url:yank})")
-config.bind("Yp", "hint yank pretty-url")
-config.bind("Yt", "hint yank title")
-config.bind("Yy", "hint yank")
+config.bind("YD", "hint links yank domain -s")
+config.bind("YM", "hint links yank inline [{title}]({url:yank}) -s")
+config.bind("YP", "hint links yank pretty-url -s")
+config.bind("YT", "hint links yank title -s")
+config.bind("YY", "hint links yank -s")
+config.bind("Yd", "hint links yank domain")
+config.bind("Ym", "hint links yank inline [{title}]({url:yank})")
+config.bind("Yp", "hint links yank pretty-url")
+config.bind("Yt", "hint links yank title")
+config.bind("Yy", "hint links yank")
# config.bind('{{', 'navigate prev -t')
# config.bind('}}', 'navigate next -t')
@@ -2561,6 +2563,8 @@ config.bind("<Ctrl-]>", "command-history-prev", mode="command")
# config.bind('y', 'prompt-accept yes', mode='yesno')
## userscripts
+os.environ["QUTE_POST_CLONE"] = 'notify-send "cloned!" "${QUTE_URL}"'
+config.bind("gc", "spawn -u -- gitclone")
config.bind(",msg", "open -t qute://log/?level=info")
config.bind(",nb", "spawn --userscript add-nextcloud-bookmarks")
config.bind(",nB", "hint links userscript add-nextcloud-bookmarks")
@@ -2579,8 +2583,8 @@ config.bind(",qc", "open -t qute://help/commands.html#cmd-repeat")
config.bind(",qr", "spawn --userscript qr")
config.bind(",rs", "restart")
config.bind(",tk", "spawn --userscript translate --target_lang ko")
-config.bind(",tw", "spawn --userscript translate")
config.bind(",tk", "spawn --userscript translate --target_lang ko", mode="caret")
+config.bind(",tw", "spawn --userscript translate")
config.bind(",tw", "spawn --userscript translate", mode="caret")
config.bind(",vd", "spawn qndl -v {url}")
config.bind(",vD", "hint links spawn qndl -v {hint-url}")
@@ -2590,7 +2594,92 @@ config.bind(
"za",
"config-cycle statusbar.show always never;; config-cycle tabs.show always never",
)
+c.hints.selectors["code"] = [
+ # Selects all code tags whose direct parent is not a pre tag
+ ":not(pre) > code",
+ "pre",
+]
+config.bind("yc", "hint code userscript code_select")
config.bind("zs", "config-cycle statusbar.show always never")
config.bind("zt", "config-cycle tabs.show always never")
+session_path = os.path.join(os.environ["HOME"], ".local/share/qutebrowser/sessions/")
+
+# general bind, to manually enter commands, flags and arguments
+config.bind(
+ ",sg",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path}",
+)
+
+# append current focused tab to specified session
+config.bind(
+ ",ss",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} save -f",
+)
+
+# remove
+config.bind(
+ ",sd",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} remove -f",
+)
+
+# save all and overwrite specified session (update session, don't append):
+config.bind(
+ ",sa",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} save-all -o -f",
+)
+
+# open one or more sessions as HTML, or open index
+config.bind(
+ ",so",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} open -f",
+)
+
+# restore specified sessions, or the current open HTML file if it is a valid session
+config.bind(
+ ",sr",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} restore -f",
+)
+
+# restore, same as above but close all open tabs first
+config.bind(
+ ",sR",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} restore -c -f",
+)
+
+# merge
+config.bind(
+ ",sm",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} merge -f",
+)
+
+# delete session
+config.bind(
+ ",sx",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} delete -f",
+)
+
+# rename
+config.bind(
+ ",sn",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} rename -f",
+)
+config.bind(
+ ",sN",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} rename -f -n",
+)
+
+# export
+config.bind(
+ ",se",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} export -f",
+)
+config.bind(
+ ",sE",
+ f"cmd-set-text -s :spawn --userscript tab-manager {session_path} export -f -w",
+)
+
+# open help file
+config.bind(",sh", f"spawn --userscript tab-manager {session_path} help")
+
config.source("themes/gruvbox.py")
diff --git a/ar/.config/qutebrowser/userscripts/code_select b/ar/.config/qutebrowser/userscripts/code_select
new file mode 100755
index 0000000..8f7fc31
--- /dev/null
+++ b/ar/.config/qutebrowser/userscripts/code_select
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+
+import os
+import html
+import re
+import sys
+import xml.etree.ElementTree as ET
+try:
+ import pyperclip
+except ImportError:
+ try:
+ import pyclip as pyperclip
+ except ImportError:
+ PYPERCLIP = False
+ else:
+ PYPERCLIP = True
+else:
+ PYPERCLIP = True
+
+
+def parse_text_content(element):
+ # https://stackoverflow.com/a/35591507/15245191
+ magic = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd" [
+ <!ENTITY nbsp ' '>
+ ]>'''
+ root = ET.fromstring(magic + element)
+ text = ET.tostring(root, encoding="unicode", method="text")
+ text = html.unescape(text)
+ return text
+
+
+def send_command_to_qute(command):
+ with open(os.environ.get("QUTE_FIFO"), "w") as f:
+ f.write(command)
+
+
+def main():
+ delimiter = sys.argv[1] if len(sys.argv) > 1 else ";"
+ # For info on qute environment vairables, see
+ # https://github.com/qutebrowser/qutebrowser/blob/master/doc/userscripts.asciidoc
+ element = os.environ.get("QUTE_SELECTED_HTML")
+ code_text = parse_text_content(element)
+ re_remove_dollars = re.compile(r"^(\$ )", re.MULTILINE)
+ code_text = re.sub(re_remove_dollars, '', code_text)
+ if PYPERCLIP:
+ pyperclip.copy(code_text)
+ send_command_to_qute(
+ "message-info 'copied to clipboard: {info}{suffix}'".format(
+ info=code_text.splitlines()[0].replace("'", "\""),
+ suffix="..." if len(code_text.splitlines()) > 1 else ""
+ )
+ )
+ else:
+ # Qute's yank command won't copy accross multiple lines so we
+ # compromise by placing lines on a single line seperated by the
+ # specified delimiter
+ code_text = re.sub("(\n)+", delimiter, code_text)
+ code_text = code_text.replace("'", "\"")
+ send_command_to_qute("yank inline '{code}'\n".format(code=code_text))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ar/.config/qutebrowser/userscripts/gitclone b/ar/.config/qutebrowser/userscripts/gitclone
new file mode 100755
index 0000000..ad62d17
--- /dev/null
+++ b/ar/.config/qutebrowser/userscripts/gitclone
@@ -0,0 +1,74 @@
+#!/bin/sh
+#
+# Author: palb91
+# Date: 2022
+#
+# Clone a git repository directly from qutebrowser
+#
+# In config.py:
+# bind('gc', 'spawn -u -- gitclone')
+#
+# # Run a shell command after successful clone
+# import os
+# os.environ['QUTE_POST_CLONE'] = 'notify-send "git clone" "${QUTE_URL}"'
+
+set -e
+
+# Local storage
+BASE_DIR="${HOME}"/Public/repos
+TEMP_DIR="$(mktemp -d)"
+
+# Get informations from userscripts variables
+QUTE_URL="${QUTE_URL%%\#*}"
+QUTE_URL="${QUTE_URL%%\?*}"
+QUTE_URL="${QUTE_URL%%\&*}"
+
+DOMAIN="${QUTE_URL#*//}"
+DOMAIN="${DOMAIN%%/*}"
+
+REPO="${QUTE_URL#*"${DOMAIN}"/}"
+REPO="${REPO%/}"
+REPO="${REPO##*/}"
+[ "${REPO#\.}" != "${REPO}" ] && REPO="_${REPO}"
+
+BASE_REPO="${BASE_DIR}/${DOMAIN}/${REPO}"
+TEMP_REPO="${TEMP_DIR}/${DOMAIN}/${REPO}"
+
+# logging
+info() { printf 'message-info "%s"\n' "${*}" >>"${QUTE_FIFO}"; }
+warn() { printf 'message-warning "%s"\n' "${*}" >>"${QUTE_FIFO}"; }
+err() {
+ printf 'message-error "%s"\n' "${*}" >>"${QUTE_FIFO}"
+ return 1
+}
+clean() {
+ rm -rf "${TEMP_DIR}"
+ exit "${1:-0}"
+}
+
+# Check repo exists
+if [ -d "${BASE_REPO}"/.git ]; then
+ warn "${REPO} already cloned in ${BASE_REPO}"
+ clean 0
+
+# Try cloning
+else
+ info "Cloning ${DOMAIN}/${REPO}..."
+ git clone "${QUTE_URL}" "${TEMP_REPO}" ||
+ err "Error while cloning ${DOMAIN}/${REPO}, is it a repository?"
+
+ if [ ! -d "${TEMP_REPO}"/.git ]; then
+ err 'An error occured, cloning failed...'
+ clean 2
+ fi
+fi
+
+# Move the temp folder to its final destination
+[ -d "${BASE_REPO%/*}" ] || mkdir -p "${BASE_REPO%/*}"
+mv "${TEMP_REPO}" "${BASE_REPO}"
+info "${REPO} successfully cloned in ${BASE_REPO}"
+
+# Run post hook
+if [ -n "${QUTE_POST_CLONE}" ]; then
+ eval "${QUTE_POST_CLONE}"
+fi
diff --git a/ar/.config/qutebrowser/userscripts/qute-pass b/ar/.config/qutebrowser/userscripts/qute-pass
index 6b071b8..064bd88 100755
--- a/ar/.config/qutebrowser/userscripts/qute-pass
+++ b/ar/.config/qutebrowser/userscripts/qute-pass
@@ -332,7 +332,11 @@ def main(arguments):
None,
[
extract_result.fqdn,
- extract_result.registered_domain,
+ (
+ extract_result.top_domain_under_public_suffix
+ if hasattr(extract_result, "top_domain_under_public_suffix")
+ else extract_result.registered_domain
+ ),
extract_result.ipv4,
private_domain,
netloc,
diff --git a/ar/.config/qutebrowser/userscripts/tab-manager b/ar/.config/qutebrowser/userscripts/tab-manager
new file mode 100755
index 0000000..fc244f7
--- /dev/null
+++ b/ar/.config/qutebrowser/userscripts/tab-manager
@@ -0,0 +1,529 @@
+#!/usr/bin/python3
+
+from datetime import datetime as datetime
+from os import environ as environ
+from os import listdir as listdir
+from os import path as path
+from os import remove as remove
+from sys import argv as argv
+from sys import exit as exit
+from time import sleep as sleep
+
+from yaml import safe_load as yaml_load
+
+
+# this splits all args passed to the script so they can be used.
+# args are passed like this "/path/to/session/files/dir/ COMMAND flag <?filename> flag <?filename> flag <filename> <?filename>"
+# since some flags can be given filenames, they must be split out to be parsed
+def split_out_args(args):
+ split_args = []
+ # flip args list so as to iterate backwards, saves iteration
+ args.reverse()
+ # split the list by flag
+ for arg in args:
+ if arg[0] == "-":
+ split_args.append(args[: (args.index(arg) + 1)])
+ args = args[(args.index(arg) + 1) :]
+ # flip every list in the master list back to normal
+ for lst in split_args:
+ lst.reverse()
+ # reverse master list so that all elements are in order
+ split_args.reverse()
+ return split_args
+
+
+# generic function to check for a specified flag and return it's arguments
+def check_flags(args, flag):
+ flag_found = False
+ flag_args = []
+ for arg in args:
+ if arg[0] == flag:
+ flag_found = True
+ flag_args = arg
+ break
+ else:
+ flag_args = []
+ return flag_found, flag_args
+
+
+# read HTML, return list of tabs
+def parse_html(html):
+ # get the tabs from the body
+ tabs = (
+ html.split('<p style="word-wrap: break-word; word-break: break-all;">')[1]
+ .split("</p>")[0]
+ .split("<br>\n<br>\n")
+ )
+ tabs = tabs[:-1]
+ tabs_list = []
+ # parse out the tabs from the body into list of tabs, [[url,title],[url,title]...]
+ for tab in tabs:
+ stuff = tab.split(" | ")[0].split(">")
+ title = stuff[1]
+ url = stuff[0].split('"')[1]
+ tab_info = [url, title]
+ tabs_list.append(tab_info)
+ return tabs_list
+
+
+# open and read an HTML file
+def read_html(session_path):
+ with open(session_path + ".html", "r") as html_file:
+ html = html_file.read()
+ return html
+
+
+# build HTML from list of tabs
+def build_html(session_path, open_tabs):
+ # get the file name from session path
+ doc_title = session_path.split("/")[-1].split(".")[0]
+ # build html document title from document title, number of tabs and date and time
+ title = str(
+ doc_title
+ + ", "
+ + str(len(open_tabs))
+ + " tabs, last updated "
+ + str(datetime.now()).split(".")[0]
+ )
+ doc_body = str()
+ # iterate over tabs, build HTML for the body of the HTML file
+ for tab in open_tabs:
+ tab_line = (
+ str(open_tabs.index(tab) + 1)
+ + '. <a href="'
+ + tab[0]
+ + '">'
+ + tab[1]
+ + " | "
+ + tab[0]
+ + "</a>\n<br>\n<br>\n"
+ )
+ doc_body = doc_body + tab_line
+ # build the HTML document
+ html_doc = (
+ "<html>\n<head>\n<title>"
+ + title
+ + '</title>\n</head>\n<body>\n<p style="word-wrap: break-word; word-break: break-all;">\n'
+ + doc_body
+ + "</p>\n</body>\n</html>"
+ )
+ return html_doc
+
+
+# writes HTML to specified file
+def write_html(session_path, html):
+ with open(session_path + ".html", "w+") as html_file:
+ html_file.write(html)
+
+
+# takes 1 list of tabs, checks for duplicate tabs in the same list
+# turns tabs list [[url,title],[url,title]...] into dict with URLs as keys, since duplicate keys are automatically removed from dicts
+def check_for_duplicate_tabs(tabs_list):
+ tabs_list_dict = {}
+ tabs_list_new = []
+ for tab in tabs_list:
+ tabs_list_dict[tab[0]] = tab[1]
+ for item in tabs_list_dict.items():
+ tabs_list_new.append([item[0], item[1]])
+ return tabs_list_new
+
+
+# update the index file, to be called any time a modification to a tab session occurs
+def update_index_file(session_path):
+ sessions_list = []
+ for item in listdir(session_path):
+ filename = item.split(".")
+ if len(filename) > 1 and filename[1] == "html" and item != "index.html":
+ sessions_list.append(
+ [
+ "file:///" + session_path + item,
+ read_html(session_path + filename[0])
+ .split("</title>")[0]
+ .split("<title>")[1],
+ ]
+ )
+ index_path = session_path + "index"
+ write_html(index_path, build_html(index_path, sessions_list))
+
+
+# inform of an error, open the usage.txt file in browser, exit the program
+def inform_error(toast):
+ usage_file_path = script_path.replace("tab-manager.py", "usage.txt")
+ run_command("open -t file://" + usage_file_path)
+ run_command("message-error '" + toast + "'")
+ exit()
+
+
+# run a command in qutebrowser
+def run_command(command):
+ with open(environ["QUTE_FIFO"], "a") as output:
+ output.write(command)
+ sleep(wait_time)
+
+
+# reads the qutebrowser session file, return the yaml inside
+def read_qutebrowser_session_file(session_path):
+ with open(session_path, "r") as session:
+ session_data = session.read()
+ return session_data
+
+
+# pass session data as yaml, "history" and "all_windows" as bool, return required tabs
+# if export_history is true, returns history, otherwise only returns active tabs, if all_windows is true, returns all windows, otherwise only returns the active window
+# yaml ugh, fucking hyphen delimited dict
+def get_qbsession_tabs(session_data, export_history=False, all_windows=False):
+ # for history in ['windows']['tabs']['history']
+ yaml_content = yaml_load(session_data)
+ tabs = []
+ for window in yaml_content["windows"]:
+ if all_windows == False:
+ if "active" in window:
+ for tab in window["tabs"]:
+ for history in tab["history"]:
+ if export_history == False:
+ if "active" in history:
+ tabs.append([history["url"], history["title"]])
+ else:
+ tabs.append([history["url"], history["title"]])
+ else:
+ for tab in window["tabs"]:
+ for history in tab["history"]:
+ if export_history == False:
+ if "active" in history:
+ tabs.append([history["url"], history["title"]])
+ else:
+ tabs.append([history["url"], history["title"]])
+ return tabs
+
+
+# saves focused tab to specified session file unless it is already in there
+def save(session_path, args):
+ tab = [[environ["QUTE_URL"], environ["QUTE_TITLE"]]]
+ check_f = check_flags(args, "-f")
+ if not check_f[0] or len(check_f[1]) < 2:
+ inform_error("no -f or no output session specified!")
+ if "index" in check_f[1]:
+ inform_error("cannot modify index!")
+ file_path = session_path + check_f[1][1]
+ if path.exists(file_path + ".html"):
+ tabs_list = parse_html(read_html(file_path))
+ tabs_list = tabs_list + tab
+ tabs_list = check_for_duplicate_tabs(tabs_list)
+ else:
+ tabs_list = tab
+ write_html(file_path, build_html(file_path, tabs_list))
+ update_index_file(session_path)
+ run_command(
+ "message-info 'focused tab successfully saved to " + check_f[1][1] + "'"
+ )
+
+
+# saves all open tabs to a session file, removes duplicates
+def save_all(session_path, args):
+ open_tabs = []
+ check_f = check_flags(args, "-f")
+ if not check_f[0] or len(check_f[1]) < 2:
+ inform_error("no -f or no output session specified!")
+ if "index" in check_f[1]:
+ inform_error("cannot modify index!")
+ file_path = session_path + check_f[1][1]
+ close_tabs = check_flags(args, "-c")[0]
+ overwrite = check_flags(args, "-o")[0]
+ run_command("session-save " + file_path)
+ open_tabs = get_qbsession_tabs(read_qutebrowser_session_file(file_path))
+ # remove the recently created qutebrowser session file that has no extension, not the .html file.
+ remove(file_path)
+ if overwrite == True:
+ open_tabs = check_for_duplicate_tabs(open_tabs)
+ write_html(file_path, build_html(file_path, open_tabs))
+ run_command("message-info '-o found, overwriting specified session'")
+ else:
+ if not path.exists(file_path + ".html"):
+ run_command(
+ "message-info 'session "
+ + check_f[1][1]
+ + " does not exist; creating...'"
+ )
+ with open(file_path + ".html", "w"):
+ pass
+ else:
+ open_tabs = parse_html(read_html(file_path)) + open_tabs
+ open_tabs = check_for_duplicate_tabs(open_tabs)
+ write_html(file_path, build_html(file_path, open_tabs))
+ update_index_file(session_path)
+ run_command(
+ "message-info 'all open tabs sucessfully saved to " + check_f[1][1] + "'"
+ )
+ if close_tabs == True:
+ run_command("tab-only")
+ run_command("open " + file_path + ".html")
+ run_command
+ else:
+ run_command("open -t " + file_path + ".html")
+ pass
+
+
+# open command, opens one or more html files
+def open_session_files(session_path, args):
+ check_f = check_flags(args, "-f")
+ files_specified = check_f[0]
+ if len(check_f[1]) < 2:
+ files_specified = False
+ if files_specified == True:
+ for file in check_f[1][1:]:
+ run_command("open -t " + "file:///" + session_path + file + ".html")
+ run_command("message-info 'successfully opened " + file + "'")
+ else:
+ run_command("open -t " + "file:///" + session_path + "index.html")
+ run_command("message-info 'no session file specified, opening index.'")
+
+
+# restore command, restores one or more sessions to one window
+def restore_sessions(session_path, args):
+ check_f = check_flags(args, "-f")
+ files_list = []
+ # if no file specified, attempt to restore the current focused tab, works if current focused tab is a session file in the specified session files directory.
+ if not check_f[0] or len(check_f[1]) < 2:
+ open_session_file = environ["QUTE_URL"]
+ if session_path in open_session_file:
+ files_list = [open_session_file.split("/")[-1].split(".")[0]]
+ else:
+ inform_error(
+ "you must specify sessions to restore or have a session file open in browser and in focus!"
+ )
+ else:
+ files_list = check_f[1][1:]
+ close_tabs = check_flags(args, "-c")[0]
+ new_window = check_flags(args, "-n")[0]
+ if close_tabs == True:
+ run_command("tab-only")
+ run_command(
+ "message-info '-c found, closing all open tabs before restoring...'"
+ )
+ for file in files_list:
+ tab_list = parse_html(read_html(session_path + file))
+ for tab in tab_list:
+ if close_tabs == True:
+ run_command("open " + tab[0])
+ close_tabs = False
+ else:
+ run_command("open -t " + tab[0])
+ run_command("message-info 'successfully restored " + file + "'")
+
+
+# removes the specified session files
+def delete_sessions(session_path, args):
+ check_f = check_flags(args, "-f")
+ if not check_f[0] or len(check_f[1]) < 2:
+ inform_error("you must specify sessions to delete!")
+ for file in check_f[1][1:]:
+ if file == "index":
+ inform_error("cannot modify index!")
+ file_path = session_path + file
+ remove(file_path + ".html")
+ run_command("message-info 'session " + file + " successfully deleted.'")
+ update_index_file(session_path)
+
+
+# merge specified sessions, or unspecified sessions in dir, based on -i or -a flag respectively
+def merge_sessions(session_path, args):
+ sessions_to_merge = []
+ final_session_tabs = []
+ check_f = check_flags(args, "-f")
+ if not check_f[0] or len(check_f[1]) < 2:
+ inform_error("missing -f or no output session name specified!")
+ file_path = session_path + check_f[1][1]
+ check_i = check_flags(args, "-i")
+ check_a = check_flags(args, "-a")
+ if "index" in check_i[1]:
+ inform_error("cannot modify index!")
+ if "index" in check_f[1]:
+ inform_error("cannot modify index!")
+ if check_i[0] and check_a[0]:
+ inform_error("cannot use -i and -a at the same time!")
+ elif not check_i[0]:
+ if not check_a[0]:
+ inform_error("must use -a or -i flag in merge command!")
+ else:
+ if len(check_a[1]) < 2:
+ inform_error("-a found but no files specified")
+ for item in listdir(session_path):
+ if (
+ item.split(".")[1] == "html"
+ and item != "index.html"
+ and item.split(".")[0] not in check_a[1][1:]
+ ):
+ sessions_to_merge.append(item.split(".")[0])
+ else:
+ if len(check_i[1]) < 2:
+ inform_error("-i found but no files specified!")
+ for item in check_i[1][1:]:
+ sessions_to_merge.append(item)
+ for item in sessions_to_merge:
+ for tab in parse_html(read_html(session_path + item)):
+ final_session_tabs.append(tab)
+ # if -k flag not found, delete merged sessions
+ if not check_flags(args, "-k")[0]:
+ for item in sessions_to_merge:
+ remove(session_path + item + ".html")
+ run_command("message-info '" + item + " deleted.'")
+ else:
+ run_command("message-info '-k found, input sessions not deleted.'")
+ final_session_tabs = check_for_duplicate_tabs(final_session_tabs)
+ write_html(file_path, build_html(file_path, final_session_tabs))
+ run_command("message-info 'specified sessions merged to " + check_f[1][1] + "'")
+ update_index_file(session_path)
+
+
+# export specified qutebrowser session file into html session file
+def export_session(session_path, args):
+ check_f = check_flags(args, "-f")
+ check_p = check_flags(args, "-p")
+ if not check_f[0] or len(check_f[1]) < 2:
+ file_path = session_path + check_p[1][1].split("/")[-1].split(".")[0]
+ else:
+ file_path = session_path + check_f[1][1]
+ if "index" == file_path.split("/")[-1]:
+ inform_error("cannot modify index!")
+ if path.exists(file_path + ".html"):
+ inform_error(
+ "a file with the same name as the output file already exists. Please specify a different file name for export."
+ )
+ if not check_p[0] or len(check_p[1]) < 2:
+ inform_error("missing -p or no qutebrowser session file specified!")
+ session_file = check_p[1][1]
+ session_tabs = []
+ export_history = check_flags(args, "-h")[0]
+ all_windows = check_flags(args, "-w")[0]
+ session_tabs = get_qbsession_tabs(
+ read_qutebrowser_session_file(session_file), export_history, all_windows
+ )
+ session_tabs = check_for_duplicate_tabs(session_tabs)
+ write_html(file_path, build_html(file_path, session_tabs))
+ run_command(
+ "message-info 'specified qutebrowser session successfully exported to "
+ + file_path
+ + ".html'"
+ )
+ if check_flags(args, "-r")[0] == True:
+ remove(session_file)
+ run_command("message-info '-r found, deleting specified qutebrowser session'")
+ update_index_file(session_path)
+ run_command("open -t file:///" + file_path + ".html")
+
+
+# remove a tab from a session file by it's index
+def remove_tab(session_path, args):
+ check_f = check_flags(args, "-f")
+ check_t = check_flags(args, "-t")
+ if not check_t[0]:
+ inform_error(
+ "-t missing or no index specified! You must specify one or more links to remove from the session!"
+ )
+ if not check_f[0] or len(check_f[1]) < 2:
+ open_tab = environ["QUTE_URL"]
+ if session_path in open_tab:
+ file_path = open_tab.split(".")[0].split("//")[1]
+ else:
+ inform_error(
+ "you must specify a session to modify or have a session file open in browser and in focus!"
+ )
+ else:
+ file_path = session_path + check_f[1][1]
+ if "index" == file_path.split("/")[-1]:
+ inform_error("cannot modify index!")
+ tab_list = parse_html(read_html(file_path))
+ indexes_list = check_t[1][1:]
+ indexes_int = []
+ for ind in indexes_list:
+ indexes_int.append(int(ind))
+ indexes_int.sort(reverse=True)
+ for ind in indexes_int:
+ tab_list.pop(ind - 1)
+ write_html(file_path, build_html(file_path, tab_list))
+ update_index_file(session_path)
+ if not check_f[0] or len(check_f[1]) < 2:
+ run_command("reload")
+ # TODO check if session is now empty, if so load index file and delete the session
+
+
+# changes the title in the file, changes the name of the file, updates index
+def rename_session(session_path, args):
+ check_f = check_flags(args, "-f")
+ if not check_f[0] or len(check_f[1]) < 2:
+ open_tab = environ["QUTE_URL"]
+ if session_path in open_tab:
+ file_path = open_tab.split(".")[0].split("//")[1]
+ old_filename = file_path.split("/")[-1]
+ else:
+ inform_error(
+ "you must specify a session to modify or have a session file open in browser and in focus!"
+ )
+ else:
+ old_filename = check_f[1][1]
+ file_path = session_path + old_filename
+ if "index" == old_filename:
+ inform_error("cannot modify index!")
+ check_n = check_flags(args, "-n")
+ if not check_n[0] or len(check_n[1]) < 2:
+ inform_error(
+ "missing -n or no output session specified! what do you want to change the name to?"
+ )
+ new_filename = check_n[1][1]
+ html_doc = read_html(file_path).replace(
+ "<title>" + old_filename, "<title>" + new_filename
+ )
+ write_html(session_path + new_filename, html_doc)
+ remove(file_path + ".html")
+ update_index_file(session_path)
+ run_command(
+ "message-info '"
+ + old_filename
+ + " successfully renamed to "
+ + new_filename
+ + "'"
+ )
+ if not check_f[0] or len(check_f[1]) < 2:
+ run_command("open " + session_path + new_filename + ".html")
+
+
+def run():
+ sessions_path = argv[1]
+ if len(argv) < 3:
+ inform_error("no command given!")
+ command = argv[2]
+ if len(argv) > 3:
+ args = split_out_args(argv[3:])
+ else:
+ args = []
+ if command == "save":
+ save(sessions_path, args)
+ elif command == "save-all":
+ save_all(sessions_path, args)
+ elif command == "open":
+ open_session_files(sessions_path, args)
+ elif command == "restore":
+ restore_sessions(sessions_path, args)
+ elif command == "merge":
+ merge_sessions(sessions_path, args)
+ elif command == "delete":
+ delete_sessions(sessions_path, args)
+ elif command == "export":
+ export_session(sessions_path, args)
+ elif command == "remove":
+ remove_tab(sessions_path, args)
+ elif command == "rename":
+ rename_session(sessions_path, args)
+ elif command == "update-index":
+ update_index_file(sessions_path)
+ run_command("message-info 'index updated.'")
+ elif command == "help":
+ inform_error("everybody needs a little help sometimes.")
+ else:
+ inform_error("invalid command!")
+
+
+script_path = argv[0]
+wait_time = 0.3
+print(argv)
+run()
diff --git a/ar/.config/zsh/scripts.zsh b/ar/.config/zsh/scripts.zsh
index 3de8a14..6c217e8 100644
--- a/ar/.config/zsh/scripts.zsh
+++ b/ar/.config/zsh/scripts.zsh
@@ -450,8 +450,10 @@ function fzf_kill_process() {
awk '{print $2}' |
xargs -r bash -c '
if ! kill "$1" 2>/dev/null; then
- echo "Regular kill failed. Attempting with sudo..."
- sudo kill "$1" || echo "Failed to kill process $1" >&2
+ if ! kill -9 "$1"; then
+ echo "Regular kill failed. Attempting with sudo..."
+ sudo kill "$1" || echo "Failed to kill process $1" >&2
+ fi
fi
' --
}
diff --git a/ar/.local/bin/rssadd b/ar/.local/bin/rssadd
index a49c5f8..f78a538 100755
--- a/ar/.local/bin/rssadd
+++ b/ar/.local/bin/rssadd
@@ -14,5 +14,5 @@ rssfile="${XDG_CONFIG_HOME:-${HOME}/.config}/newsboat/urls"
if awk '{print $1}' "$rssfile" | grep "^$url$" >/dev/null; then
notify-send "You already have this RSS feed."
else
- echo "$url" >>"$rssfile" && notify-send "RSS feed added."
+ echo "$url $2" >>"$rssfile" && notify-send "RSS feed added."
fi
diff --git a/ar/.local/bin/rssget b/ar/.local/bin/rssget
index 5d470b4..f51ecb3 100755
--- a/ar/.local/bin/rssget
+++ b/ar/.local/bin/rssget
@@ -12,104 +12,115 @@
# This script requires rssadd to add feeds to the list.
-getlink () {
- local url="$1"
- feeds="$(curl -s "$url" | grep -Ex '.*type=.*(rss|rdf|atom).*' | sed 's/ //g')"
- url="$(echo $url | sed 's|^\(https://[^/]*/\).*|\1|')"
-
- for rsspath in $feeds; do
- rsspath="$(echo $rsspath | sed -n "s|.*href=['\"]\([^'\"]*\)['\"].*|\1|p")"
- if echo "$rsspath" | grep "http" > /dev/null; then
- link="$rsspath"
- elif echo "$rsspath" | grep -E "^/" > /dev/null; then
- link="$url$(echo $rsspath | sed 's|^/||')"
- else
- link="$url$rsspath"
- fi
- echo $link
- done
+getlink() {
+ local url="$1"
+ feeds="$(curl -s "$url" | grep -Ex '.*type=.*(rss|rdf|atom).*' | sed 's/ //g')"
+ url="$(echo $url | sed 's|^\(https://[^/]*/\).*|\1|')"
+
+ for rsspath in $feeds; do
+ rsspath="$(echo $rsspath | sed -n "s|.*href=['\"]\([^'\"]*\)['\"].*|\1|p")"
+ if echo "$rsspath" | grep "http" >/dev/null; then
+ link="$rsspath"
+ elif echo "$rsspath" | grep -E "^/" >/dev/null; then
+ link="$url$(echo $rsspath | sed 's|^/||')"
+ else
+ link="$url$rsspath"
+ fi
+ echo $link
+ done
}
getRedditRss() {
- echo "${1%/}.rss"
+ echo "${1%/}.rss"
}
getYoutubeRss() {
- local url="$1"
- path=$(echo "$url" | sed -e 's|^http[s]*://||')
- case "$path" in
- *"/channel/"*) channel_id="$(echo $path | sed -r 's|.*channel/([^/]*).*|\1|')" && feed="https://www.youtube.com/feeds/videos.xml?channel_id=${channel_id}" ;;
- *"/c/"*|*"/user/"*)
- feed=$(wget -q "$url" -O tmp_rssget_yt \
- && sed -n 's|.*\("rssUrl":"[^"]*\).*|\1|; p' tmp_rssget_yt \
- | grep rssUrl \
- | sed 's|"rssUrl":"||') ;;
- esac
- echo "$feed"
+ local url="$1"
+ path=$(echo "$url" | sed -e 's|^http[s]*://||')
+ case "$path" in
+ *"/channel/"*) channel_id="$(echo $path | sed -r 's|.*channel/([^/]*).*|\1|')" && feed="https://www.youtube.com/feeds/videos.xml?channel_id=${channel_id}" ;;
+ *"/c/"* | *"/user/"*)
+ feed=$(wget -q "$url" -O tmp_rssget_yt &&
+ sed -n 's|.*\("rssUrl":"[^"]*\).*|\1|; p' tmp_rssget_yt |
+ grep rssUrl |
+ sed 's|"rssUrl":"||')
+ ;;
+ *)
+ channel_id="$(curl -sA "Mozilla/5.0" "$url" | grep -Po '"rssUrl":"https://www.youtube.com/feeds/videos.xml\?channel_id=\K(UC[0-9A-Za-z_-]+)')"
+ feed="https://www.youtube.com/feeds/videos.xml?channel_id=${channel_id}"
+ ;;
+ esac
+ echo "$feed"
}
getVimeoRss() {
- local url="$1"
- if echo "$url" | grep -q "/videos$"; then
- feed_url=$(echo "$url" | sed 's/\/videos$//' | sed 's/\/$/\/rss/')
- else
- feed_url="${url}/videos/rss"
- fi
- echo "$feed_url"
+ local url="$1"
+ if echo "$url" | grep -q "/videos$"; then
+ feed_url=$(echo "$url" | sed 's/\/videos$//' | sed 's/\/$/\/rss/')
+ else
+ feed_url="${url}/videos/rss"
+ fi
+ echo "$feed_url"
}
-getGithubRss () {
- local url="${1%/}"
- if echo $url | grep -E "github.com/[^/]*/[a-zA-Z0-9].*" >/dev/null ; then
- echo "${url}/commits.atom"
- echo "${url}/releases.atom"
- echo "${url}/tags.atom"
- elif echo $url | grep -E "github.com/[^/]*(/)" >/dev/null ; then
- echo "${url}.atom"
- fi
+getGithubRss() {
+ local url="${1%/}"
+ if echo $url | grep -E "github.com/[^/]*/[a-zA-Z0-9].*" >/dev/null; then
+ echo "${url}/commits.atom"
+ echo "${url}/releases.atom"
+ echo "${url}/tags.atom"
+ elif echo $url | grep -E "github.com/[^/]*(/)" >/dev/null; then
+ echo "${url}.atom"
+ fi
}
-getGitlabRss () {
- local url="${1%/}"
- echo "${url}.atom"
+getGitlabRss() {
+ local url="${1%/}"
+ echo "${url}.atom"
}
-getMediumRss () {
- echo $1 | sed 's|/tag/|/feed/|'
+getMediumRss() {
+ echo $1 | sed 's|/tag/|/feed/|'
}
-
-if [ -n "$1" ] ; then
- url="$1"
+if [ -n "$1" ]; then
+ url="$1"
else
- url="$(xclip -selection clipboard -o)"
- [ -z "$url" ] && echo "usage: $0 url 'tag1 tag2 tag3'" && exit 1
+ url="$(xclip -selection clipboard -o)"
+ [ -z "$url" ] && echo "usage: $0 url 'tag1 tag2 tag3'" && exit 1
fi
+tags="$2"
+
declare -a list=()
-yt_regex="^(http(s)?://)?((w){3}\.)?(youtube\.com|invidio\.us|invidious\.flokinet\.to|invidious\.materialio\.us|iv\.datura\.network|invidious\.perennialte\.ch|invidious\.fdn\.fr|invidious\.private\.coffee|invidious\.protokolla\.fi|invidious\.privacyredirect\.com|yt\.artemislena\.eu|yt\.drgnz\.club|invidious\.incogniweb\.net|yewtu\.be|inv\.tux\.pizza|invidious\.reallyaweso\.me|iv\.melmac\.space|inv\.us\.projectsegfau\.lt|inv\.nadeko\.net|invidious\.darkness\.services|invidious\.jing\.rocks|invidious\.privacydev\.net|inv\.in\.projectsegfau\.lt|invidious\.drgns\.space)/(channel|user|c).+"
+yt_regex="^(http(s)?://)?((w){3}\.)?(youtube\.com|invidio\.us|invidious\.flokinet\.to|invidious\.materialio\.us|iv\.datura\.network|invidious\.perennialte\.ch|invidious\.fdn\.fr|invidious\.private\.coffee|invidious\.protokolla\.fi|invidious\.privacyredirect\.com|yt\.artemislena\.eu|yt\.drgnz\.club|invidious\.incogniweb\.net|yewtu\.be|inv\.tux\.pizza|invidious\.reallyaweso\.me|iv\.melmac\.space|inv\.us\.projectsegfau\.lt|inv\.nadeko\.net|invidious\.darkness\.services|invidious\.jing\.rocks|invidious\.privacydev\.net|inv\.in\.projectsegfau\.lt|invidious\.drgns\.space)/(@|(channel|user|c)).+"
reddit_regex="^(http(s)?://)?((w){3}\.)?reddit\.com.*"
vimeo_regex="^(http(s)?://)?((w){3}.)?vimeo\.com.*"
-if echo $url | grep -Ex "$yt_regex" >/dev/null ; then
- list="$(getYoutubeRss "$url")"
-elif echo $url | grep -Ex "$reddit_regex" >/dev/null ; then
- list="$(getRedditRss "$url")"
+if echo $url | grep -Ex "$yt_regex" >/dev/null; then
+ list="$(getYoutubeRss "$url")"
+ channel_name="${url##*@}"
+ [ -z "$tags" ] && tags="\"~$channel_name\" Youtube"
+elif echo $url | grep -Ex "$reddit_regex" >/dev/null; then
+ list="$(getRedditRss "$url")"
# vimeo actually works with getlink
-elif echo $url | grep -E "$vimeo_regex" >/dev/null ; then
- list="$(getVimeoRss "$url")"
-elif echo $url | grep -E "github.com" >/dev/null ; then
- list="$(getGithubRss "$url")"
+elif echo $url | grep -E "$vimeo_regex" >/dev/null; then
+ list="$(getVimeoRss "$url")"
+elif echo $url | grep -E "github.com" >/dev/null; then
+ list="$(getGithubRss "$url")"
+ repo="${url##*/}"
+ author="${url%/*}"
+ author="${author##*/}"
+ [ -z "$tags" ] && tags="\"~$author's $repo\" Git"
# gitlab also works with getlink
-elif echo $url | grep -E "gitlab.com/[a-zA-Z0-9].*" >/dev/null ; then
- list="$(getGitlabRss "$url")"
-elif echo $url | grep -E "medium.com/tag" >/dev/null ; then
- list="$(getMediumRss "$url")"
+elif echo $url | grep -E "gitlab.com/[a-zA-Z0-9].*" >/dev/null; then
+ list="$(getGitlabRss "$url")"
+elif echo $url | grep -E "medium.com/tag" >/dev/null; then
+ list="$(getMediumRss "$url")"
else
- list="$(getlink "$url")"
+ list="$(getlink "$url")"
fi
[ "$(echo "$list" | wc -l)" -eq 1 ] && chosen_link="$list" || chosen_link=$(printf '%s\n' "${list[@]}" | dmenu -p "Choose a feed:")
-tags="$2"
ifinstalled rssadd && rssadd "$chosen_link" "$tags"
echo "$chosen_link" "$tags"