You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

452 lines
26 KiB
Python

This file contains ambiguous Unicode characters!

This file contains ambiguous Unicode characters that may be confused with others in your current locale. If your use case is intentional and legitimate, you can safely ignore this warning. Use the Escape button to highlight these characters.

import requests, os, re
from bs4 import BeautifulSoup
from datetime import date, datetime
# needed for nsfw content
login_url = "https://www.dreamwidth.org/login?ret=1"
data = {
"user": "fakeapi",
"password": "thisap1isfalse"
}
if os.path.exists("prompts.org"):
os.remove("prompts.org")
thefile = open("prompts.org", "a")
today = int(date.today().strftime("%d"))
month = str(date.today().strftime("%B"))
monthstring = ".*" + month + ".*"
try:
cent = "https://100words.dreamwidth.org/tag/!prompt?style=light&tag=%21prompt"
centpage = requests.get(cent)
centsoup = BeautifulSoup(centpage.content, "html.parser")
centprompts = centsoup.find_all("h3", string=lambda text: "prompt:" in text.lower())
centsubsoup = BeautifulSoup(str(centprompts[0]), "html.parser")
centurl = centsubsoup.find("a")
centprompt = (centurl["href"])
centpromptnew = (centurl["href"] + "?style=light")
centpromptpage = requests.get(centpromptnew)
centpromptsoup = BeautifulSoup(centpromptpage.content, "html.parser")
centprompttext = centpromptsoup.find(class_="entry-content")
centtheprompt = centprompttext.find("strong")
print("100words (100 words): \033[1m" + centtheprompt.text.lower() + "\033[0m (" + centprompt + ")\n")
thefile.write("- [[" + centprompt + "][100words]] (100 words): *" + centtheprompt.text.lower() + "*\n")
except:
pass
# for this one we need to extract the right entry from a list, which may be an <ol> but may not be. also, need to use the right month, as next months prompts are posted in advance
# now defunct??
# monthstring = ".*" + month + ".*"
# thirtyone = "https://31-days.dreamwidth.org/tag/!prompts?style=light&tag=%21prompts"
# thirtyonepage = requests.get(thirtyone)
# thirtyonesoup = BeautifulSoup(thirtyonepage.content, "html.parser")
# thirtyoneprompts = thirtyonesoup.find_all("h3", string = re.compile(monthstring))
# thirtyonesubsoup = BeautifulSoup(str(thirtyoneprompts[0]), "html.parser")
# thirtyoneurl = thirtyonesubsoup.find("a")
# thirtyoneprompt = (thirtyoneurl["href"])
# thirtyonepromptnew = (thirtyoneurl["href"] + "?style=light")
# thirtyonepromptpage = requests.get(thirtyonepromptnew)
# thirtyonepromptsoup = BeautifulSoup(thirtyonepromptpage.content, "html.parser")
# thirtyoneprompttext = thirtyonepromptsoup.find(class_="entry-content")
# if "<ol>" in str(thirtyoneprompttext):
# thirtyonetheprompt = thirtyoneprompttext.select("ol > li")[today - 1].get_text(strip=True)
# else:
# interprompt = list(thirtyoneprompttext.stripped_strings)
# thirtyonelist = []
# for prompt in interprompt:
# if len(prompt) < 5:
# promptnum = interprompt.index(prompt)
# newnum = promptnum + 1
# thirtyonelist.append(prompt + interprompt[newnum])
# else:
# thirtyonelist.append(prompt)
# intsearch = str(today) + "."
# thirtyonetheprompt = str([item for item in thirtyonelist if item.startswith(intsearch)])[2:-2]
# print("31-days (any): \033[1m" + thirtyonetheprompt.lower() + "\033[0m (" + thirtyoneprompt + ")\n")
# thefile.write("- [[" + thirtyoneprompt + "][31-days]] (any): *" + thirtyonetheprompt.lower() + "*\n")
try:
ad = "https://anythingdrabble.dreamwidth.org/tag/mod!+post?style=light&tag=mod%21+post"
adpage = requests.get(ad)
adsoup = BeautifulSoup(adpage.content, "html.parser")
adprompts = adsoup.find_all("h3", string=lambda text: "prompt post" in text.lower())
adsubsoup = BeautifulSoup(str(adprompts[0]), "html.parser")
adurl = adsubsoup.find("a")
adprompt = (adurl["href"])
adpromptnew = (adurl["href"] + "?style=light")
adpromptpage = requests.get(adpromptnew)
adpromptsoup = BeautifulSoup(adpromptpage.content, "html.parser")
adprompttext = adpromptsoup.find(class_="entry-content")
adtheprompt = adprompttext.find("center")
adstrippable = str(adtheprompt.text)
while adstrippable[-1] == " ":
adstrippable = adstrippable[:-1]
print("anythingdrabble (100, 200, 300, 400, or 500 words): \033[1m" + adstrippable.lower() + "\033[0m (" + adprompt + ")\n")
thefile.write("- [[" + adprompt + "][anythingdrabble]] (100, 200, 300, 400, or 500 words): *" + adstrippable.lower() + "*\n")
except:
pass
try:
dove = "https://dove-drabbles.dreamwidth.org/?style=light"
dovepage = requests.get(dove)
dovesoup = BeautifulSoup(dovepage.content, "html.parser")
doveprompts = dovesoup.find_all("h3", string=lambda text: "prompt post" in text.lower())
dovesubsoup = BeautifulSoup(str(doveprompts[0]), "html.parser")
doveurl = dovesubsoup.find("a")
doveprompt = (doveurl["href"])
dovepromptnew = (doveurl["href"] + "?style=light")
dovepromptpage = requests.get(dovepromptnew)
dovepromptsoup = BeautifulSoup(dovepromptpage.content, "html.parser")
doveprompttext = dovepromptsoup.find(class_="entry-content")
dovetheprompt = doveprompttext.find("i")
print("dove-drabbles (any): \033[1m" + dovetheprompt.text.lower() + "\033[0m (" + doveprompt + ")\n")
thefile.write("- [[" + doveprompt + "][dove-drabbles]] (any): *" + dovetheprompt.text.lower() + "*\n")
except:
pass
try:
with requests.Session() as s:
response = s.post(login_url , data)
zone = "https://drabble-zone.dreamwidth.org/tag/mod-post?style=light&tag=mod-post"
zonepage = s.get(zone)
zonesoup = BeautifulSoup(zonepage.content, "html.parser")
zoneprompts = zonesoup.find_all("h3", string=lambda text: "challenge" in text.lower())
zonesubsoup = BeautifulSoup(str(zoneprompts[0]), "html.parser")
zoneurl = zonesubsoup.find("a")
zoneprompt = (zoneurl["href"])
zonepromptnew = (zoneurl["href"] + "?style=light")
zonepromptpage = s.get(zonepromptnew)
zonepromptsoup = BeautifulSoup(zonepromptpage.content, "html.parser")
zoneprompttext = zonepromptsoup.find(class_="entry-content")
zonetheprompt = zoneprompttext.find("strong")
print("drabble-zone (100 or 200 words): \033[1m" + zonetheprompt.text.lower() + "\033[0m (" + zoneprompt + ")\n")
thefile.write("- [[" + zoneprompt + "][drabble-zone]] (100 or 200 words): *" + zonetheprompt.text.lower() + "*\n")
emotion = "https://emotion100.dreamwidth.org/tag/*modpost?style=light&tag=%2Amodpost"
emotionpage = s.get(emotion)
emotionsoup = BeautifulSoup(emotionpage.content, "html.parser")
emotionprompts = emotionsoup.find_all("h3", string=lambda text: "prompt" in text.lower())
emotionsubsoup = BeautifulSoup(str(emotionprompts[0]), "html.parser")
emotionurl = emotionsubsoup.find("a")
emotionprompt = (emotionurl["href"])
emotionpromptnew = (emotionurl["href"] + "?style=light")
emotionpromptpage = s.get(emotionpromptnew)
emotionpromptsoup = BeautifulSoup(emotionpromptpage.content, "html.parser")
emotionprompttext = emotionpromptsoup.find(class_="entry-content")
emotiontheprompt = emotionprompttext.find_all("span")[-1]
print("emotion100 (100 words or a multiple of 100): \033[1m" + emotiontheprompt.text.lower() + "\033[0m (" + emotionprompt + ")\n")
thefile.write("- [[" + emotionprompt + "][emotion100]] (100 words or a multiple of 100): *" + emotiontheprompt.text.lower() + "*\n")
except:
pass
# for this one, have to get prompts from comments
try:
ffa = "https://fail-fandomanon.dreamwidth.org/?style=light"
ffapage = requests.get(ffa)
ffasoup = BeautifulSoup(ffapage.content, "html.parser")
ffaprompts = ffasoup.find_all("h3", string=lambda text: "ffa dw post" in text.lower())
ffapromptstrim = [x for x in ffaprompts if "Placeholder" not in str(x)]
ffasubsoup = BeautifulSoup(str(ffapromptstrim[0]), "html.parser")
ffaurl = ffasubsoup.find("a")
ffaprompt = (ffaurl["href"])
ffapromptnew = (ffaprompt + "?style=light")
ffapromptpage = requests.get(ffapromptnew)
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
ffaprompttext = ffapromptsoup.find(id="comments")
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
ffatheprompt = ffaresoup.find_all("h4",text=True)
ffacent = []
i = 1
while i < 8:
ffapromptnew = (ffaprompt + "?page=" + str(i) + "&style=light")
ffapromptpage = requests.get(ffapromptnew)
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
ffaprompttext = ffapromptsoup.find(id="comments")
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
ffatheprompt = ffaresoup.find_all("h4",text=True)
for each in ffatheprompt:
if "100 words of" in (str(each.get_text())) or "100 Words of" in (str(each.get_text())) or "100 Words Of" in (str(each.get_text())):
if "Re:" not in (str(each.get_text())) and "catch-up" not in (str(each.get_text())) and "Catch-Up" not in (str(each.get_text())):
ffacent.append(str(each.get_text()))
i += 1
if ffacent:
ffacent = list(dict.fromkeys(ffacent))
ffacentnew = []
for x in ffacent:
x = x[13:]
if x != "" and not x.startswith(" Fills"):
ffacentnew.append(x)
ffaformat = "; ".join(ffacentnew)
print("fail-fandomanon (any): \033[1m" + ffaformat.lower() + "\033[0m (" + ffaprompt + ")\n")
thefile.write("- [[" + ffaprompt + "][fail-fandomanon]] (any): *" + ffaformat.lower() + "*\n")
except:
pass
# for this one, prompts are unavailable on tuesdays and wednesdays
try:
weekprogress = datetime.now().weekday()
if not 0 < weekprogress < 3:
fandom = "https://fandomweekly.dreamwidth.org/?style=light&tag=%23challenge"
fandompage = requests.get(fandom)
fandomsoup = BeautifulSoup(fandompage.content, "html.parser")
fandomprompts = fandomsoup.find_all("h3", string=lambda text: "challenge post" in text.lower())
fandomsubsoup = BeautifulSoup(str(fandomprompts[0]), "html.parser")
fandomurl = fandomsubsoup.find("a")
fandomprompt = (fandomurl["href"])
fandompromptnew = (fandomurl["href"] + "?style=light")
fandompromptpage = requests.get(fandompromptnew)
fandompromptsoup = BeautifulSoup(fandompromptpage.content, "html.parser")
fandomprompttext = fandompromptsoup.find(class_="entry-content")
fandomtheprompt = fandomprompttext.find("td")
print("fandomweekly (any, competitive): \033[1m" + fandomtheprompt.text.lower() + "\033[0m (" + fandomprompt + ")\n")
thefile.write("- [[" + fandomprompt + "][fandomweekly]] (any, competitive): *" + fandomtheprompt.text.lower() + "*\n")
except:
pass
try:
flash = "https://fan-flashworks.dreamwidth.org/?style=light&tag=admin"
flashpage = requests.get(flash)
flashsoup = BeautifulSoup(flashpage.content, "html.parser")
flashprompts = flashsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
flashsubsoup = BeautifulSoup(str(flashprompts[0]), "html.parser")
flashurl = flashsubsoup.find("a")
flashprompt = (flashurl["href"])
flashpromptnew = (flashurl["href"] + "?style=light")
flashpromptpage = requests.get(flashpromptnew)
flashpromptsoup = BeautifulSoup(flashpromptpage.content, "html.parser")
flashprompttext = flashpromptsoup.find(class_="entry-content")
flashtheprompt = flashprompttext.find("center")
print("fan-flashworks (any, cant post elsewhere until round is closed): \033[1m" + flashtheprompt.text.lower() + "\033[0m (" + flashprompt + ")\n")
thefile.write("- [[" + flashprompt + "][fan-flashworks]] (any, cant post elsewhere until round is closed): *" + flashtheprompt.text.lower() + "*\n")
except:
pass
try:
femslash = "https://femslashficlets.dreamwidth.org/tag/challenges?style=light&tag=challenges"
femslashpage = requests.get(femslash)
femslashsoup = BeautifulSoup(femslashpage.content, "html.parser")
femslashprompts = femslashsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
femslashsubsoup = BeautifulSoup(str(femslashprompts[0]), "html.parser")
femslashurl = femslashsubsoup.find("a")
femslashprompt = (femslashurl["href"])
femslashpromptnew = (femslashurl["href"] + "?style=light")
femslashpromptpage = requests.get(femslashpromptnew)
femslashpromptsoup = BeautifulSoup(femslashpromptpage.content, "html.parser")
femslashprompttext = femslashpromptsoup.find(class_="entry-content")
femslashtheprompt = femslashprompttext.find("i")
if femslashtheprompt is not None:
print("femslash-ficlets (1001000 words, F/F): \033[1m" + femslashtheprompt.text.lower() + "\033[0m (" + femslashprompt + ")\n")
thefile.write("- [[" + femslashprompt + "][femslashficlets]] (100 words or a multiple of 100): *" + femslashtheprompt.text.lower() + "*\n")
except:
pass
try:
with requests.Session() as s:
response = s.post(login_url , data)
fffc = "https://fffc.dreamwidth.org/tag/!challenges?style=light&tag=%21challenges"
fffcpage = s.get(fffc)
fffcsoup = BeautifulSoup(fffcpage.content, "html.parser")
if 18 > today > 9:
fffclittleprompts = fffcsoup.find_all("h3", string=lambda text: "little special" in text.lower())
fffclittlesubsoup = BeautifulSoup(str(fffclittleprompts[0]), "html.parser")
fffclittleurl = fffclittlesubsoup.find("a")
fffclittleprompt = (fffclittleurl["href"])
fffclittlepromptnew = (fffclittleurl["href"] + "?style=light")
fffclittlepromptpage = s.get(fffclittlepromptnew)
fffclittlepromptsoup = BeautifulSoup(fffclittlepromptpage.content, "html.parser")
fffclittleprompttext = fffclittlepromptsoup.find("h3")
print("fffc little special (at least 100 words): \033[1m" + fffclittleprompttext.text.lower() + "\033[0m (" + fffclittleprompt + ")\n")
thefile.write("- [[" + fffclittleprompt + "][fffc little special]] (at least 100 words): *" + fffclittleprompttext.text.lower() + "*\n")
fffcmadnessprompts = fffcsoup.find_all("h3", string=lambda text: "froday madness" in text.lower())
fffcmadnesssubsoup = BeautifulSoup(str(fffcmadnessprompts[0]), "html.parser")
fffcmadnessurl = fffcmadnesssubsoup.find("a")
fffcmadnessprompt = (fffcmadnessurl["href"])
fffcmadnesspromptnew = (fffcmadnessurl["href"] + "?style=light")
fffcmadnesspromptpage = s.get(fffcmadnesspromptnew)
fffcmadnesspromptsoup = BeautifulSoup(fffcmadnesspromptpage.content, "html.parser")
fffcmadnessprompttext = fffcmadnesspromptsoup.find(class_="entry-content")
fffcmadnesstheprompt = fffcmadnessprompttext.find("b")
print("fffc madness (at least 2000 words): \033[1m" + fffcmadnesstheprompt.text.lower() + "\033[0m (" + fffcmadnessprompt + ")\n")
thefile.write("- [[" + fffcmadnessprompt + "][fffc madness]] (at least 2000 words): *" + fffcmadnesstheprompt.text.lower() + "*\n")
fffcmonthlyprompts = fffcsoup.find_all("h3", string=re.compile(monthstring))
fffcmonthlysubsoup = BeautifulSoup(str(fffcmonthlyprompts[0]), "html.parser")
fffcmonthlyurl = fffcmonthlysubsoup.find("a")
fffcmonthlyprompt = (fffcmonthlyurl["href"])
fffcmonthlypromptnew = (fffcmonthlyurl["href"] + "?style=light")
fffcmonthlypromptpage = s.get(fffcmonthlypromptnew)
fffcmonthlypromptsoup = BeautifulSoup(fffcmonthlypromptpage.content, "html.parser")
fffcmonthlyprompttext = fffcmonthlypromptsoup.find("h3")
print("fffc monthly special (usually at least 500 words): \033[1m" + fffcmonthlyprompttext.text.lower() + "\033[0m (" + fffcmonthlyprompt + ")\n")
thefile.write("- [[" + fffcmonthlyprompt + "][fffc monthly special]] (usually at least 500 words): *" + fffcmonthlyprompttext.text.lower() + "*\n")
fffcregularprompts = fffcsoup.find_all("h3", string=lambda text: "regular challenge" in text.lower())
fffcregularsubsoup = BeautifulSoup(str(fffcregularprompts[0]), "html.parser")
fffcregularurl = fffcregularsubsoup.find("a")
fffcregularprompt = (fffcregularurl["href"])
fffcregularpromptnew = (fffcregularurl["href"] + "?style=light")
fffcregularpromptpage = s.get(fffcregularpromptnew)
fffcregularpromptsoup = BeautifulSoup(fffcregularpromptpage.content, "html.parser")
fffcregularprompttext = fffcregularpromptsoup.find(class_="entry-content")
fffcregulartheprompt = fffcregularprompttext.find("b")
print("fffc regular challenge (at least 100 words): \033[1m" + fffcregulartheprompt.text.lower() + "\033[0m (" + fffcregularprompt + ")\n")
thefile.write("- [[" + fffcregularprompt + "][fffc regular challenge]] (at least 100 words): *" + fffcregulartheprompt.text.lower() + "*\n")
except:
pass
try:
ficlet = "https://ficlet-zone.dreamwidth.org/tag/challenge+post?style=light&tag=challenge+post"
ficletpage = requests.get(ficlet)
ficletsoup = BeautifulSoup(ficletpage.content, "html.parser")
ficletprompts = ficletsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
ficletsubsoup = BeautifulSoup(str(ficletprompts[0]), "html.parser")
ficleturl = ficletsubsoup.find("a")
ficletprompt = (ficleturl["href"])
ficletpromptnew = (ficleturl["href"] + "?style=light")
ficletpromptpage = requests.get(ficletpromptnew)
ficletpromptsoup = BeautifulSoup(ficletpromptpage.content, "html.parser")
ficletprompttext = ficletpromptsoup.find(class_="entry-content")
ficlettheprompt = ficletprompttext.find("a")
print("ficlet-zone (any): \033[1m" + ficlettheprompt.text.lower() + "\033[0m (" + ficletprompt + ")\n")
thefile.write("- [[" + ficletprompt + "][ficlet-zone]] (any): *" + ficlettheprompt.text.lower() + "*\n")
except:
pass
# first calculate the hour of the month …
try:
hourselapsed = (today - 1) * 24
hourstoday = int(datetime.now().strftime("%H"))
currenthour = (hourselapsed + hourstoday)
with requests.Session() as s:
response = s.post(login_url , data)
hourly = "https://hourlyprompts.dreamwidth.org/?style=light"
hourlypage = s.get(hourly)
hourlysoup = BeautifulSoup(hourlypage.content, "html.parser")
hourlyprompts = hourlysoup.find_all("h3", string=re.compile(monthstring))
hourlysubsoup = BeautifulSoup(str(hourlyprompts[0]), "html.parser")
hourlyurl = hourlysubsoup.find("a")
hourlyprompt = (hourlyurl["href"])
hourlypromptnew = (hourlyurl["href"] + "?style=light")
hourlypromptpage = s.get(hourlypromptnew)
hourlypromptsoup = BeautifulSoup(hourlypromptpage.content, "html.parser")
hourlyprompttext = hourlypromptsoup.find(class_="entry-content")
searchstring = r"<br/>" + re.escape(str(currenthour)) + r"\. .*?<br/>"
hourlypromptmedian = re.findall(searchstring, str(hourlyprompttext))
hourlypromptthishour = str(hourlypromptmedian[0])[5:-5]
print("hourlyprompts (any): \033[1m" + hourlypromptthishour.lower() + "\033[0m (" + hourlyprompt + ")\n")
thefile.write("- [[" + hourlyprompt + "][hourlyprompts]] (any): *" + hourlypromptthishour.lower() + "*\n")
except:
pass
try:
if 30 > today > 21:
ssbingo = "https://sweetandshort.dreamwidth.org/tag/challenge:+bingo?style=light&tag=challenge:+bingo"
ssbingopage = requests.get(ssbingo)
ssbingosoup = BeautifulSoup(ssbingopage.content, "html.parser")
ssbingoprompts = ssbingosoup.find_all("h3")
ssbingosubsoup = BeautifulSoup(str(ssbingoprompts[0]), "html.parser")
ssbingourl = ssbingosubsoup.find("a")
ssbingoprompt = (ssbingourl["href"])
ssbingopromptnew = (ssbingourl["href"] + "?style=light")
ssbingopromptpage = requests.get(ssbingopromptnew)
ssbingopromptsoup = BeautifulSoup(ssbingopromptpage.content, "html.parser")
ssbingoprompttext = ssbingopromptsoup.find(class_="entry-content")
ssbingotheprompt = ssbingoprompttext.find_all("td")
ssbingoclean = []
for prompt in ssbingotheprompt:
newprompt = re.sub("<.*?>","",str(prompt))
ssbingoclean.append(newprompt)
ssbingofinal = "; ".join(ssbingoclean).lower()
print("sweet and short bingo (up to 300 words for two prompts, up to 600 words for four prompts): \033[1m" + ssbingofinal + "\033[0m (" + ssbingoprompt + ")\n")
thefile.write("- [[" + ssbingoprompt + "][sweet and short bingo]] (up to 300 words for two prompts, up to 600 words for four prompts): *" + ssbingofinal + "*\n")
except:
pass
try:
if 16 > today > 7:
ssquicky = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+comment+quicky?mode=and&style=light&tag=%21new+challenge,challenge:+comment+quicky"
ssquickypage = requests.get(ssquicky)
ssquickysoup = BeautifulSoup(ssquickypage.content, "html.parser")
ssquickyprompts = ssquickysoup.find_all("h3")
ssquickysubsoup = BeautifulSoup(str(ssquickyprompts[0]), "html.parser")
ssquickyurl = ssquickysubsoup.find("a")
ssquickyprompt = (ssquickyurl["href"])
# deliberately not using style=light here so we can get at the comment contents
ssquickypromptnew = (ssquickyurl["href"])
ssquickypromptpage = requests.get(ssquickypromptnew)
ssquickypromptsoup = BeautifulSoup(ssquickypromptpage.content, "html.parser")
promptcatch = ".*New Prompts Here"
# ssquickytheprompt = ssquickypromptsoup.find_all("h4",string = re.compile(promptcatch))
ssquickytheprompt = ssquickypromptsoup.find_all(class_="comment")
ssquickycomments = []
for comment in ssquickytheprompt:
if re.search("New Prompts Here",str(comment)):
commenttext = re.findall(r"<div class=\"comment-content\".*?</div>",str(comment))
commentprompt = re.sub("<.*?>","",str(commenttext))
ssquickycomments.append(str(commentprompt)[2:-2])
ssquickycprompt = "; ".join(ssquickycomments)
print("sweet and short comment quicky (up to 99 words): \033[1m" + ssquickycprompt.lower() + "\033[0m (" + ssquickyprompt + ")\n")
thefile.write("- [[" + ssquickyprompt + "][sweet and short comment quicky]] (up to 99 words): *" + ssquickycprompt.lower() + "*\n")
except:
pass
try:
ssmonthly = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+10+out+of+20?mode=and&style=light&tag=%21new+challenge,challenge:+10+out+of+20"
ssmonthlypage = requests.get(ssmonthly)
ssmonthlysoup = BeautifulSoup(ssmonthlypage.content, "html.parser")
ssmonthlyprompts = ssmonthlysoup.find_all("h3")
ssmonthlysubsoup = BeautifulSoup(str(ssmonthlyprompts[0]), "html.parser")
ssmonthlyurl = ssmonthlysubsoup.find("a")
ssmonthlyprompt = (ssmonthlyurl["href"])
ssmonthlypromptnew = (ssmonthlyurl["href"] + "?style=light")
ssmonthlypromptpage = requests.get(ssmonthlypromptnew)
ssmonthlypromptsoup = BeautifulSoup(ssmonthlypromptpage.content, "html.parser")
ssmonthlyprompttext = ssmonthlypromptsoup.find(class_="entry-content")
ssmonthlypromptmedian = re.findall(r"<a name=\"cutid1\">.*", str(ssmonthlyprompttext))
ssmonthlypromptstripone = re.sub("<.*?>","",str(ssmonthlypromptmedian))
ssmonthlypromptstriptwo = re.sub("([a-z])- ","\\1; ",str(ssmonthlypromptstripone))
ssmonthlypromptstripthree = re.sub("- ","",str(ssmonthlypromptstriptwo))
ssmonthlypromptfinal = str(ssmonthlypromptstripthree)[2:-2]
print("sweet and short monthly prompts (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): \033[1m" + ssmonthlypromptfinal + "\033[0m (" + ssmonthlyprompt + ")\n")
thefile.write("- [[" + ssmonthlyprompt + "][sweet and short monthly prompts]] (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): *" + ssmonthlypromptfinal + "*\n")
except:
pass
try:
if today > 14:
sspicture = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+picture+prompt+fun?mode=and&style=light&tag=%21new+challenge,challenge:+picture+prompt+fun"
sspicturepage = requests.get(sspicture)
sspicturesoup = BeautifulSoup(sspicturepage.content, "html.parser")
monthstring = ".*" + month + ".*"
sspictureprompts = sspicturesoup.find_all("h3", string=re.compile(monthstring))
sspicturesubsoup = BeautifulSoup(str(sspictureprompts[0]), "html.parser")
sspictureurl = sspicturesubsoup.find("a")
sspictureprompt = (sspictureurl["href"])
sspicturepromptnew = (sspictureurl["href"] + "?style=light")
sspicturepromptpage = requests.get(sspicturepromptnew)
sspicturepromptsoup = BeautifulSoup(sspicturepromptpage.content, "html.parser")
sspictureprompttext = sspicturepromptsoup.find("h3")
print("sweet and short picture prompts (up to 300 words): \033[1m" + sspictureprompttext.text.lower() + "\033[0m (" + sspictureprompt + ")\n")
thefile.write("- [[" + sspictureprompt + "][sweet and short picture prompts]] (up to 300 words): *" + sspictureprompttext.text.lower() + "*\n")
except:
pass
try:
vocab = "https://vocab-drabbles.dreamwidth.org/?style=light&tag=challenge"
vocabpage = requests.get(vocab)
vocabsoup = BeautifulSoup(vocabpage.content, "html.parser")
vocabprompts = vocabsoup.find_all("h3")
vocabsubsoup = BeautifulSoup(str(vocabprompts[0]), "html.parser")
vocaburl = vocabsubsoup.find("a")
vocabprompt = (vocaburl["href"])
vocabpromptnew = (vocaburl["href"] + "?style=light")
vocabpromptpage = requests.get(vocabpromptnew)
vocabpromptsoup = BeautifulSoup(vocabpromptpage.content, "html.parser")
vocabprompttext = vocabpromptsoup.find(class_="entry-content")
vocabtheprompt = vocabprompttext.find("strong")
print("vocab-drabbles (50500 words): \033[1m" + vocabtheprompt.text.lower() + "\033[0m (" + vocabprompt + ")\n")
thefile.write("- [[" + vocabprompt + "][vocab-drabbles]] (50500 words): *" + vocabtheprompt.text.lower() + "*\n")
except:
pass