1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
import requests
import sys
import subprocess
from datetime import date, datetime
from bs4 import BeautifulSoup
from config import config
def get_old_news():
print("getting old news")
year = int(date.today().strftime("%Y")) - 100
century_ago = date.today().strftime("%d%%2F%m%%2F") + str(year)
url = f"https://chroniclingamerica.loc.gov/search/pages/results/?state=Illinois&lccn=sn84031489&lccn=sn84031490&lccn=sn84031492&lccn=sn84025828&lccn=sn90053126&lccn=sn91055768&lccn=sn87062321&lccn=sn82014064&lccn=sn86056950&lccn=sn84025819&lccn=sn91055455&dateFilterType=range&date1={century_ago}&date2={century_ago}&language=&ortext=&andtext=&phrasetext=&proxtext=&proxdistance=5&rows=1&searchType=advanced&sequence=1"
r = requests.get(url)
soup = BeautifulSoup(r.text, features="lxml")
try:
href = soup.select(".highlite a")[0]["href"]
link = "https://chroniclingamerica.loc.gov" + href
return f'<div><a href="{link}">Old Chicago News from {year}</a></div>'
except:
pass
return "No old news"
def get_today_wikipedia():
print("getting today's wikipedia")
full_url = "https://en.wikipedia.org/wiki/%s" % date.today().strftime("%B_%d")
return '<div><a href="%s">Today\'s Wikipedia</a></div>' % (full_url)
def get_week_holidays():
print("getting holidays")
command = "calendar -f /data/calendar.mark -A 14".split(" ")
output = subprocess.check_output(command)
output = output.decode("utf-8").strip().split("\n")
return "</li>\n<li>".join(output) or "No holidays this fortnight."
def get_crossword():
date_str = date.today().strftime("%Y-%m-%d")
url = f"https://simplydailypuzzles.com/daily-cryptic/index.html?puzz=dc1-{date_str}"
return f'<div><a href="{url}">Cryptic Crossword</a></div>'
def get_today_wikiquote():
print("getting today's wikiquote")
full_url = "https://en.wikiquote.org/wiki/%s" % date.today().strftime("%B_%d")
r = requests.get(full_url)
soup = BeautifulSoup(r.text, features="lxml")
table = str(soup.find(text="2020").parent.parent.next_sibling.next_sibling)
table = table.replace('href="/', 'href="https://en.wikiquote.org/')
return '<div style="border: 1px solid black">%s</div>' % table
def get_calvin_and_hobbes():
print("getting calvin and hobbes")
year = int(date.today().strftime("%Y")) % 9 + 1986
comic_date = str(year) + date.today().strftime("/%m/%d")
full_url = "https://www.gocomics.com/calvinandhobbes/%s" % comic_date
r = requests.get(full_url)
soup = BeautifulSoup(r.text, features="lxml")
if not "Today on" in str(soup.title): # gocomics gives you today if 404
comic_src = soup.select(".item-comic-image")[0].img["src"]
return '<div><a href="%s">Calvin and Hobbes</a></div>' % (comic_src)
else:
return ""
def get_peanuts():
print("getting peanuts")
comic_date = date.today().strftime("%Y/%m/%d")
full_url = "https://www.gocomics.com/peanuts/%s" % comic_date
r = requests.get(full_url)
soup = BeautifulSoup(r.text, features="lxml")
comic_src = soup.select(".item-comic-image")[0].img["src"]
return '<div><a href="%s">Peanuts</a></div>' % (comic_src)
# Sites without feeds/need to be checked themselves
def get_homepage_links():
pages = [
{"url": "https://aldaily.com/", "title": "Arts and Letters Daily"},
{"url": "https://www.powerlanguage.co.uk/wordle/", "title": "Wordle"},
]
page_links = [
f'<div><a href="{item["url"]}">{item["title"]}</a></div>'
for item in pages
]
return "</li>\n<li>".join(page_links)
def check_for_starts_with_line(lst, line):
for md in lst:
if line.startswith(md):
return True
return False
def get_thoreau():
print("getting thoreau")
year_int = int(date.today().strftime("%Y")) - 183
year = str(year_int)
year_stop = str(year_int+1)
month_days = [
date.today().strftime("_%b %-d."),
date.today().strftime("_%b. %-d."),
date.today().strftime("_%B %-d."),
date.today().strftime("_%B. %-d.")
]
filename = config["thoreau"]["journal1"]
with open(filename) as f:
lines = f.readlines()
# Find lines that the year lies on
i= 0
year_start_idx = -1
for i in range(len(lines)):
if lines[i].startswith(year):
year_start_idx = i+1
break
year_stop_idx = -1
for i in range(year_start_idx, len(lines)):
if lines[i].startswith(year_stop):
year_stop_idx = i - 2
break
entry_start_idx = -1
# Find the lines inside the year that the date lies on
i = year_start_idx
while i < year_stop_idx:
if check_for_starts_with_line(month_days, lines[i]):
entry_start_idx = i - 2
i += 1
break
i += 1
entry_end_idx = -1
while i < year_stop_idx:
if lines[i].startswith("_"):
entry_end_idx = i - 2
break
i += 1
# If found date, join the strings
if entry_start_idx != -1 and entry_end_idx != -1:
return "".join(lines[entry_start_idx:entry_end_idx])
return "No Thoreau entry on " + month_days[0] + year
def year_progress_bar(width=50):
day_of_year = float(datetime.now().strftime('%j'))
count = width * (day_of_year / 365)
year_string = "["
for i in range(width):
if i < count:
year_string += "#"
else:
year_string += "_"
year_string += "]"
return f"<pre>Year: {year_string}</pre>"
|