|
|
import requests, os, re
|
|
|
from bs4 import BeautifulSoup
|
|
|
from datetime import date, datetime
|
|
|
|
|
|
# needed for nsfw content
|
|
|
login_url = "https://www.dreamwidth.org/login?ret=1"
|
|
|
data = {
|
|
|
"user": "fakeapi",
|
|
|
"password": "thisap1isfalse"
|
|
|
}
|
|
|
|
|
|
if os.path.exists("prompts.org"):
|
|
|
os.remove("prompts.org")
|
|
|
|
|
|
thefile = open("prompts.org", "a")
|
|
|
|
|
|
today = int(date.today().strftime("%d"))
|
|
|
month = str(date.today().strftime("%B"))
|
|
|
monthstring = ".*" + month + ".*"
|
|
|
|
|
|
prompts = []
|
|
|
|
|
|
try:
|
|
|
cent = "https://100words.dreamwidth.org/tag/!prompt?style=light&tag=%21prompt"
|
|
|
centpage = requests.get(cent)
|
|
|
centsoup = BeautifulSoup(centpage.content, "html.parser")
|
|
|
centprompts = centsoup.find_all("h3", string=lambda text: "prompt:" in text.lower())
|
|
|
centsubsoup = BeautifulSoup(str(centprompts[0]), "html.parser")
|
|
|
centurl = centsubsoup.find("a")
|
|
|
centprompt = (centurl["href"])
|
|
|
centpromptnew = (centurl["href"] + "?style=light")
|
|
|
centpromptpage = requests.get(centpromptnew)
|
|
|
centpromptsoup = BeautifulSoup(centpromptpage.content, "html.parser")
|
|
|
centprompttext = centpromptsoup.find(class_="entry-content")
|
|
|
centtheprompt = centprompttext.find("strong")
|
|
|
print("100words (100 words): \033[1m" + centtheprompt.text.lower() + "\033[0m (" + centprompt + ")\n")
|
|
|
thefile.write("- [[" + centprompt + "][100words]] (100 words): *" + centtheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"100words","type":"comm","notes":"100 words","prompt":centtheprompt.text.lower(),"url":centprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
# for this one we need to extract the right entry from a list, which may be an <ol> but may not be. also, need to use the right month, as next month’s prompts are posted in advance
|
|
|
# now defunct??
|
|
|
# monthstring = ".*" + month + ".*"
|
|
|
# thirtyone = "https://31-days.dreamwidth.org/tag/!prompts?style=light&tag=%21prompts"
|
|
|
# thirtyonepage = requests.get(thirtyone)
|
|
|
# thirtyonesoup = BeautifulSoup(thirtyonepage.content, "html.parser")
|
|
|
# thirtyoneprompts = thirtyonesoup.find_all("h3", string = re.compile(monthstring))
|
|
|
# thirtyonesubsoup = BeautifulSoup(str(thirtyoneprompts[0]), "html.parser")
|
|
|
# thirtyoneurl = thirtyonesubsoup.find("a")
|
|
|
# thirtyoneprompt = (thirtyoneurl["href"])
|
|
|
# thirtyonepromptnew = (thirtyoneurl["href"] + "?style=light")
|
|
|
# thirtyonepromptpage = requests.get(thirtyonepromptnew)
|
|
|
# thirtyonepromptsoup = BeautifulSoup(thirtyonepromptpage.content, "html.parser")
|
|
|
# thirtyoneprompttext = thirtyonepromptsoup.find(class_="entry-content")
|
|
|
# if "<ol>" in str(thirtyoneprompttext):
|
|
|
# thirtyonetheprompt = thirtyoneprompttext.select("ol > li")[today - 1].get_text(strip=True)
|
|
|
# else:
|
|
|
# interprompt = list(thirtyoneprompttext.stripped_strings)
|
|
|
# thirtyonelist = []
|
|
|
# for prompt in interprompt:
|
|
|
# if len(prompt) < 5:
|
|
|
# promptnum = interprompt.index(prompt)
|
|
|
# newnum = promptnum + 1
|
|
|
# thirtyonelist.append(prompt + interprompt[newnum])
|
|
|
# else:
|
|
|
# thirtyonelist.append(prompt)
|
|
|
# intsearch = str(today) + "."
|
|
|
# thirtyonetheprompt = str([item for item in thirtyonelist if item.startswith(intsearch)])[2:-2]
|
|
|
# print("31-days (any): \033[1m" + thirtyonetheprompt.lower() + "\033[0m (" + thirtyoneprompt + ")\n")
|
|
|
# thefile.write("- [[" + thirtyoneprompt + "][31-days]] (any): *" + thirtyonetheprompt.lower() + "*\n")
|
|
|
|
|
|
|
|
|
try:
|
|
|
ad = "https://anythingdrabble.dreamwidth.org/tag/mod!+post?style=light&tag=mod%21+post"
|
|
|
adpage = requests.get(ad)
|
|
|
adsoup = BeautifulSoup(adpage.content, "html.parser")
|
|
|
adprompts = adsoup.find_all("h3", string=lambda text: "prompt post" in text.lower())
|
|
|
adsubsoup = BeautifulSoup(str(adprompts[0]), "html.parser")
|
|
|
adurl = adsubsoup.find("a")
|
|
|
adprompt = (adurl["href"])
|
|
|
adpromptnew = (adurl["href"] + "?style=light")
|
|
|
adpromptpage = requests.get(adpromptnew)
|
|
|
adpromptsoup = BeautifulSoup(adpromptpage.content, "html.parser")
|
|
|
adprompttext = adpromptsoup.find(class_="entry-content")
|
|
|
adtheprompt = adprompttext.find("center")
|
|
|
adstrippable = str(adtheprompt.text)
|
|
|
while adstrippable[-1] == " ":
|
|
|
adstrippable = adstrippable[:-1]
|
|
|
while adstrippable[0] == " ":
|
|
|
adstrippable = adstrippable[1:]
|
|
|
print("anythingdrabble (100, 200, 300, 400, or 500 words): \033[1m" + adstrippable.lower() + "\033[0m (" + adprompt + ")\n")
|
|
|
thefile.write("- [[" + adprompt + "][anythingdrabble]] (100, 200, 300, 400, or 500 words): *" + adstrippable.lower() + "*\n")
|
|
|
prompts.append({"source":"anythingdrabble","type":"comm","notes":"100, 200, 300, 400, or 500 words","prompt":adstrippable.lower(),"url":adprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
dove = "https://dove-drabbles.dreamwidth.org/?style=light"
|
|
|
dovepage = requests.get(dove)
|
|
|
dovesoup = BeautifulSoup(dovepage.content, "html.parser")
|
|
|
doveprompts = dovesoup.find_all("h3", string=lambda text: "prompt" in text.lower())
|
|
|
dovesubsoup = BeautifulSoup(str(doveprompts[0]), "html.parser")
|
|
|
doveurl = dovesubsoup.find("a")
|
|
|
doveprompt = (doveurl["href"])
|
|
|
dovepromptnew = (doveurl["href"] + "?style=light")
|
|
|
dovepromptpage = requests.get(dovepromptnew)
|
|
|
dovepromptsoup = BeautifulSoup(dovepromptpage.content, "html.parser")
|
|
|
doveprompttext = dovepromptsoup.find(class_="entry-content")
|
|
|
dovetheprompt = doveprompttext.find("i")
|
|
|
print("dove-drabbles (any): \033[1m" + dovetheprompt.text.lower() + "\033[0m (" + doveprompt + ")\n")
|
|
|
thefile.write("- [[" + doveprompt + "][dove-drabbles]] (any): *" + dovetheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"dove-drabbles","type":"comm","notes":"any length","prompt":dovetheprompt.text.lower(),"url":doveprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
with requests.Session() as s:
|
|
|
response = s.post(login_url , data)
|
|
|
zone = "https://drabble-zone.dreamwidth.org/tag/mod-post?style=light&tag=mod-post"
|
|
|
zonepage = s.get(zone)
|
|
|
zonesoup = BeautifulSoup(zonepage.content, "html.parser")
|
|
|
zoneprompts = zonesoup.find_all("h3", string=lambda text: "challenge" in text.lower())
|
|
|
zonesubsoup = BeautifulSoup(str(zoneprompts[0]), "html.parser")
|
|
|
zoneurl = zonesubsoup.find("a")
|
|
|
zoneprompt = (zoneurl["href"])
|
|
|
zonepromptnew = (zoneurl["href"] + "?style=light")
|
|
|
zonepromptpage = s.get(zonepromptnew)
|
|
|
zonepromptsoup = BeautifulSoup(zonepromptpage.content, "html.parser")
|
|
|
zoneprompttext = zonepromptsoup.find(class_="entry-content")
|
|
|
zonetheprompt = zoneprompttext.find("div")
|
|
|
print("drabble-zone (100 or 200 words): \033[1m" + zonetheprompt.text.lower() + "\033[0m (" + zoneprompt + ")\n")
|
|
|
thefile.write("- [[" + zoneprompt + "][drabble-zone]] (100 or 200 words): *" + zonetheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"drabble-zone","type":"comm","notes":"100 or 200 words","prompt":zonetheprompt.text.lower(),"url":zoneprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
with requests.Session() as s:
|
|
|
response = s.post(login_url , data)
|
|
|
emotion = "https://emotion100.dreamwidth.org/tag/*modpost?style=light&tag=%2Amodpost"
|
|
|
emotionpage = s.get(emotion)
|
|
|
emotionsoup = BeautifulSoup(emotionpage.content, "html.parser")
|
|
|
emotionprompts = emotionsoup.find_all("h3", string=lambda text: "prompt" in text.lower())
|
|
|
emotionsubsoup = BeautifulSoup(str(emotionprompts[0]), "html.parser")
|
|
|
emotionurl = emotionsubsoup.find("a")
|
|
|
emotionprompt = (emotionurl["href"])
|
|
|
emotionpromptnew = (emotionurl["href"] + "?style=light")
|
|
|
emotionpromptpage = s.get(emotionpromptnew)
|
|
|
emotionpromptsoup = BeautifulSoup(emotionpromptpage.content, "html.parser")
|
|
|
emotionprompttext = emotionpromptsoup.find(class_="entry-content")
|
|
|
emotiontheprompt = emotionprompttext.find_all("span")[-1]
|
|
|
print("emotion100 (100 words or a multiple of 100): \033[1m" + emotiontheprompt.text.lower() + "\033[0m (" + emotionprompt + ")\n")
|
|
|
thefile.write("- [[" + emotionprompt + "][emotion100]] (100 words or a multiple of 100): *" + emotiontheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"emotion100","type":"comm","notes":"100 words or a multiple of 100","prompt":emotiontheprompt.text.lower(),"url":emotionprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
# for this one, have to get prompts from comments
|
|
|
try:
|
|
|
ffa = "https://fail-fandomanon.dreamwidth.org/?style=light"
|
|
|
ffapage = requests.get(ffa)
|
|
|
ffasoup = BeautifulSoup(ffapage.content, "html.parser")
|
|
|
ffaprompts = ffasoup.find_all("h3")
|
|
|
ffapromptstrim = [x for x in ffaprompts if "Placeholder" not in str(x)]
|
|
|
ffasubsoup = BeautifulSoup(str(ffapromptstrim[0]), "html.parser")
|
|
|
ffaurl = ffasubsoup.find("a")
|
|
|
ffaprompt = (ffaurl["href"])
|
|
|
ffapromptnew = (ffaprompt + "?style=light")
|
|
|
ffapromptpage = requests.get(ffapromptnew)
|
|
|
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
|
|
|
ffaprompttext = ffapromptsoup.find(id="comments")
|
|
|
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
|
|
|
ffatheprompt = ffaresoup.find_all("h4",text=True)
|
|
|
ffacent = []
|
|
|
i = 1
|
|
|
while i < 8:
|
|
|
ffapromptnew = (ffaprompt + "?page=" + str(i) + "&style=light")
|
|
|
ffapromptpage = requests.get(ffapromptnew)
|
|
|
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
|
|
|
ffaprompttext = ffapromptsoup.find(id="comments")
|
|
|
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
|
|
|
ffatheprompt = ffaresoup.find_all("h4",text=True)
|
|
|
for each in ffatheprompt:
|
|
|
if "100 words of" in (str(each.get_text())) or "100 Words of" in (str(each.get_text())) or "100 Words Of" in (str(each.get_text())):
|
|
|
if "Re:" not in (str(each.get_text())) and "catch-up" not in (str(each.get_text())) and "Catch-Up" not in (str(each.get_text())):
|
|
|
ffacent.append(str(each.get_text()))
|
|
|
i += 1
|
|
|
if ffacent:
|
|
|
ffacent = list(dict.fromkeys(ffacent))
|
|
|
ffacentnew = []
|
|
|
for x in ffacent:
|
|
|
x = x[13:]
|
|
|
if x != "" and not x.startswith(" Fills"):
|
|
|
ffacentnew.append(x)
|
|
|
ffaformat = "; ".join(ffacentnew)
|
|
|
print("fail-fandomanon (any): \033[1m" + ffaformat.lower() + "\033[0m (" + ffaprompt + ")\n")
|
|
|
thefile.write("- [[" + ffaprompt + "][fail-fandomanon]] (any): *" + ffaformat.lower() + "*\n")
|
|
|
prompts.append({"source":"fail-fandomanon","type":"comm","notes":"any length","prompt":ffacentnew,"url":ffaprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
|
|
|
# for this one, prompts are unavailable on tuesdays and wednesdays
|
|
|
try:
|
|
|
weekprogress = datetime.now().weekday()
|
|
|
if not 0 < weekprogress < 3:
|
|
|
fandom = "https://fandomweekly.dreamwidth.org/?style=light&tag=%23challenge"
|
|
|
fandompage = requests.get(fandom)
|
|
|
fandomsoup = BeautifulSoup(fandompage.content, "html.parser")
|
|
|
fandomprompts = fandomsoup.find_all("h3", string=lambda text: "challenge post" in text.lower())
|
|
|
fandomsubsoup = BeautifulSoup(str(fandomprompts[0]), "html.parser")
|
|
|
fandomurl = fandomsubsoup.find("a")
|
|
|
fandomprompt = (fandomurl["href"])
|
|
|
fandompromptnew = (fandomurl["href"] + "?style=light")
|
|
|
fandompromptpage = requests.get(fandompromptnew)
|
|
|
fandompromptsoup = BeautifulSoup(fandompromptpage.content, "html.parser")
|
|
|
fandomprompttext = fandompromptsoup.find(class_="entry-content")
|
|
|
fandomtheprompt = fandomprompttext.find("td")
|
|
|
print("fandomweekly (any, competitive): \033[1m" + fandomtheprompt.text.lower() + "\033[0m (" + fandomprompt + ")\n")
|
|
|
thefile.write("- [[" + fandomprompt + "][fandomweekly]] (any, competitive): *" + fandomtheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fandomweekly","type":"comm","notes":"any length, competitive","prompt":fandomtheprompt.text.lower(),"url":fandomprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
flash = "https://fan-flashworks.dreamwidth.org/?style=light&tag=admin"
|
|
|
flashpage = requests.get(flash)
|
|
|
flashsoup = BeautifulSoup(flashpage.content, "html.parser")
|
|
|
flashprompts = flashsoup.find_all(lambda tag: tag.name == "h3" and "Challenge" in tag.text)
|
|
|
flashsubsoup = BeautifulSoup(str(flashprompts[0]), "html.parser")
|
|
|
flashurl = flashsubsoup.find("a")
|
|
|
flashprompt = (flashurl["href"])
|
|
|
flashpromptnew = (flashurl["href"] + "?style=light")
|
|
|
flashpromptpage = requests.get(flashpromptnew)
|
|
|
flashpromptsoup = BeautifulSoup(flashpromptpage.content, "html.parser")
|
|
|
flashprompttext = flashpromptsoup.find(class_="entry-content")
|
|
|
flashtheprompt = flashprompttext.find("center")
|
|
|
print("fan-flashworks (any, can’t post elsewhere until round is closed): \033[1m" + flashtheprompt.text.lower() + "\033[0m (" + flashprompt + ")\n")
|
|
|
thefile.write("- [[" + flashprompt + "][fan-flashworks]] (any, can’t post elsewhere until round is closed): *" + flashtheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fan-flashworks","type":"comm","notes":"any length, can’t post elsewhere until round is closed","prompt":flashtheprompt.text.lower(),"url":flashprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
with requests.Session() as s:
|
|
|
response = s.post(login_url , data)
|
|
|
fsf = "https://femslashfete.dreamwidth.org/tag/admin?style=light&tag=admin"
|
|
|
fsfpage = s.get(fsf)
|
|
|
fsfsoup = BeautifulSoup(fsfpage.content, "html.parser")
|
|
|
fsfprompts = fsfsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
|
|
|
fsfsubsoup = BeautifulSoup(str(fsfprompts[0]), "html.parser")
|
|
|
fsfurl = fsfsubsoup.find("a")
|
|
|
fsfprompt = (fsfurl["href"])
|
|
|
fsfpromptnew = (fsfurl["href"] + "?style=light")
|
|
|
fsfpromptpage = s.get(fsfpromptnew)
|
|
|
fsfpromptsoup = BeautifulSoup(fsfpromptpage.content, "html.parser")
|
|
|
fsfprompttext = fsfpromptsoup.find(class_="entry-content")
|
|
|
fsftheprompt = fsfprompttext.find("b")
|
|
|
print("femslashfete (at least 100 words, must be femslash): \033[1m" + fsftheprompt.text.lower() + "\033[0m (" + fsfprompt + ")\n")
|
|
|
thefile.write("- [[" + fsfprompt + "][femslashfete]] (at least 100 words, must be femslash): *" + fsftheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"femslashfete","type":"comm","notes":"at least 100 words, must be femslash","prompt":fsftheprompt.text.lower(),"url":fsfprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
# seems dead
|
|
|
# try:
|
|
|
# femslash = "https://femslashficlets.dreamwidth.org/tag/challenges?style=light&tag=challenges"
|
|
|
# femslashpage = requests.get(femslash)
|
|
|
# femslashsoup = BeautifulSoup(femslashpage.content, "html.parser")
|
|
|
# femslashprompts = femslashsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
|
|
|
# femslashsubsoup = BeautifulSoup(str(femslashprompts[0]), "html.parser")
|
|
|
# femslashurl = femslashsubsoup.find("a")
|
|
|
# femslashprompt = (femslashurl["href"])
|
|
|
# femslashpromptnew = (femslashurl["href"] + "?style=light")
|
|
|
# femslashpromptpage = requests.get(femslashpromptnew)
|
|
|
# femslashpromptsoup = BeautifulSoup(femslashpromptpage.content, "html.parser")
|
|
|
# femslashprompttext = femslashpromptsoup.find(class_="entry-content")
|
|
|
# femslashtheprompt = femslashprompttext.find("i")
|
|
|
# if femslashtheprompt is not None:
|
|
|
# print("femslash-ficlets (100–1000 words, F/F): \033[1m" + femslashtheprompt.text.lower() + "\033[0m (" + femslashprompt + ")\n")
|
|
|
# thefile.write("- [[" + femslashprompt + "][femslashficlets]] (100 words or a multiple of 100): *" + femslashtheprompt.text.lower() + "*\n")
|
|
|
# except:
|
|
|
# pass
|
|
|
|
|
|
try:
|
|
|
with requests.Session() as s:
|
|
|
response = s.post(login_url , data)
|
|
|
fffc = "https://fffc.dreamwidth.org/tag/!challenges?style=light&tag=%21challenges"
|
|
|
fffcpage = s.get(fffc)
|
|
|
fffcsoup = BeautifulSoup(fffcpage.content, "html.parser")
|
|
|
if 18 > today > 9:
|
|
|
fffclittleprompts = fffcsoup.find_all("h3", string=lambda text: "little special" in text.lower())
|
|
|
fffclittlesubsoup = BeautifulSoup(str(fffclittleprompts[0]), "html.parser")
|
|
|
fffclittleurl = fffclittlesubsoup.find("a")
|
|
|
fffclittleprompt = (fffclittleurl["href"])
|
|
|
fffclittlepromptnew = (fffclittleurl["href"] + "?style=light")
|
|
|
fffclittlepromptpage = s.get(fffclittlepromptnew)
|
|
|
fffclittlepromptsoup = BeautifulSoup(fffclittlepromptpage.content, "html.parser")
|
|
|
fffclittleprompttext = fffclittlepromptsoup.find("h3")
|
|
|
print("fffc little special (at least 100 words): \033[1m" + fffclittleprompttext.text.lower() + "\033[0m (" + fffclittleprompt + ")\n")
|
|
|
thefile.write("- [[" + fffclittleprompt + "][fffc little special]] (at least 100 words): *" + fffclittleprompttext.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fffc","type":"comm","notes":"at least 100 words","prompt":fffclittleprompttext.text.lower(),"url":fffclittleprompt,"challenge":"little special"})
|
|
|
fffcmadnessprompts = fffcsoup.find_all("h3", string=lambda text: "froday madness" in text.lower())
|
|
|
fffcmadnesssubsoup = BeautifulSoup(str(fffcmadnessprompts[0]), "html.parser")
|
|
|
fffcmadnessurl = fffcmadnesssubsoup.find("a")
|
|
|
fffcmadnessprompt = (fffcmadnessurl["href"])
|
|
|
fffcmadnesspromptnew = (fffcmadnessurl["href"] + "?style=light")
|
|
|
fffcmadnesspromptpage = s.get(fffcmadnesspromptnew)
|
|
|
fffcmadnesspromptsoup = BeautifulSoup(fffcmadnesspromptpage.content, "html.parser")
|
|
|
fffcmadnessprompttext = fffcmadnesspromptsoup.find(class_="entry-content")
|
|
|
fffcmadnesstheprompt = fffcmadnessprompttext.find("b")
|
|
|
print("fffc madness (at least 2000 words): \033[1m" + fffcmadnesstheprompt.text.lower() + "\033[0m (" + fffcmadnessprompt + ")\n")
|
|
|
thefile.write("- [[" + fffcmadnessprompt + "][fffc madness]] (at least 2000 words): *" + fffcmadnesstheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fffc","type":"comm","notes":"at least 2000 words","prompt":fffcmadnesstheprompt.text.lower(),"url":fffcmadnessprompt,"challenge":"froday madness"})
|
|
|
fffcmonthlyprompts = fffcsoup.find_all("h3", string=re.compile(monthstring))
|
|
|
fffcmonthlysubsoup = BeautifulSoup(str(fffcmonthlyprompts[0]), "html.parser")
|
|
|
fffcmonthlyurl = fffcmonthlysubsoup.find("a")
|
|
|
fffcmonthlyprompt = (fffcmonthlyurl["href"])
|
|
|
fffcmonthlypromptnew = (fffcmonthlyurl["href"] + "?style=light")
|
|
|
fffcmonthlypromptpage = s.get(fffcmonthlypromptnew)
|
|
|
fffcmonthlypromptsoup = BeautifulSoup(fffcmonthlypromptpage.content, "html.parser")
|
|
|
fffcmonthlyprompttext = fffcmonthlypromptsoup.find("h3")
|
|
|
print("fffc monthly special (usually at least 500 words): \033[1m" + fffcmonthlyprompttext.text.lower() + "\033[0m (" + fffcmonthlyprompt + ")\n")
|
|
|
thefile.write("- [[" + fffcmonthlyprompt + "][fffc monthly special]] (usually at least 500 words): *" + fffcmonthlyprompttext.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fffc","type":"comm","notes":"usually at least 500 words","prompt":fffcmonthlyprompttext.text.lower(),"url":fffcmonthlyprompt,"challenge":"monthly challenge"})
|
|
|
fffcregularprompts = fffcsoup.find_all("h3", string=lambda text: "regular challenge" in text.lower())
|
|
|
fffcregularsubsoup = BeautifulSoup(str(fffcregularprompts[0]), "html.parser")
|
|
|
fffcregularurl = fffcregularsubsoup.find("a")
|
|
|
fffcregularprompt = (fffcregularurl["href"])
|
|
|
fffcregularpromptnew = (fffcregularurl["href"] + "?style=light")
|
|
|
fffcregularpromptpage = s.get(fffcregularpromptnew)
|
|
|
fffcregularpromptsoup = BeautifulSoup(fffcregularpromptpage.content, "html.parser")
|
|
|
fffcregularprompttext = fffcregularpromptsoup.find(class_="entry-content")
|
|
|
fffcregulartheprompt = fffcregularprompttext.find("b")
|
|
|
print("fffc regular challenge (at least 100 words): \033[1m" + fffcregulartheprompt.text.lower() + "\033[0m (" + fffcregularprompt + ")\n")
|
|
|
thefile.write("- [[" + fffcregularprompt + "][fffc regular challenge]] (at least 100 words): *" + fffcregulartheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"fffc","type":"comm","notes":"at least 100 words","prompt":fffcregulartheprompt.text.lower(),"url":fffcregularprompt,"challenge":"regular challenge"})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
ficlet = "https://ficlet-zone.dreamwidth.org/tag/challenge+post?style=light&tag=challenge+post"
|
|
|
ficletpage = requests.get(ficlet)
|
|
|
ficletsoup = BeautifulSoup(ficletpage.content, "html.parser")
|
|
|
ficletprompts = ficletsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
|
|
|
ficletsubsoup = BeautifulSoup(str(ficletprompts[0]), "html.parser")
|
|
|
ficleturl = ficletsubsoup.find("a")
|
|
|
ficletprompt = (ficleturl["href"])
|
|
|
ficletpromptnew = (ficleturl["href"] + "?style=light")
|
|
|
ficletpromptpage = requests.get(ficletpromptnew)
|
|
|
ficletpromptsoup = BeautifulSoup(ficletpromptpage.content, "html.parser")
|
|
|
ficletprompttext = ficletpromptsoup.find(class_="entry-content")
|
|
|
ficlettheprompt = ficletprompttext.find("a")
|
|
|
print("ficlet-zone (any): \033[1m" + ficlettheprompt.text.lower() + "\033[0m (" + ficletprompt + ")\n")
|
|
|
thefile.write("- [[" + ficletprompt + "][ficlet-zone]] (any): *" + ficlettheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"ficlet-zone","type":"comm","notes":"any length","prompt":ficlettheprompt.text.lower(),"url":ficletprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
# first calculate the hour of the month …
|
|
|
try:
|
|
|
hourselapsed = (today - 1) * 24
|
|
|
hourstoday = int(datetime.now().strftime("%H"))
|
|
|
currenthour = (hourselapsed + hourstoday)
|
|
|
with requests.Session() as s:
|
|
|
response = s.post(login_url , data)
|
|
|
hourly = "https://hourlyprompts.dreamwidth.org/?style=light"
|
|
|
hourlypage = s.get(hourly)
|
|
|
hourlysoup = BeautifulSoup(hourlypage.content, "html.parser")
|
|
|
hourlyprompts = hourlysoup.find_all("h3", string=re.compile(monthstring))
|
|
|
hourlysubsoup = BeautifulSoup(str(hourlyprompts[0]), "html.parser")
|
|
|
hourlyurl = hourlysubsoup.find("a")
|
|
|
hourlyprompt = (hourlyurl["href"])
|
|
|
hourlypromptnew = (hourlyurl["href"] + "?style=light")
|
|
|
hourlypromptpage = s.get(hourlypromptnew)
|
|
|
hourlypromptsoup = BeautifulSoup(hourlypromptpage.content, "html.parser")
|
|
|
hourlyprompttext = hourlypromptsoup.find(class_="entry-content")
|
|
|
searchstring = r"<br/>" + re.escape(str(currenthour)) + r"\. .*?<br/>"
|
|
|
hourlypromptmedian = re.findall(searchstring, str(hourlyprompttext))
|
|
|
hourlypromptthishour = str(hourlypromptmedian[0])[5:-5]
|
|
|
print("hourlyprompts (any): \033[1m" + hourlypromptthishour.lower() + "\033[0m (" + hourlyprompt + ")\n")
|
|
|
thefile.write("- [[" + hourlyprompt + "][hourlyprompts]] (any): *" + hourlypromptthishour.lower() + "*\n")
|
|
|
prompts.append({"source":"hourlyprompts","type":"comm","notes":"any length","prompt":hourlypromptthishour.lower(),"url":hourlyprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
# sweet and short: complex and time-depedent rules …
|
|
|
# first need to work out which of the two alternating monthly challenges we're on
|
|
|
|
|
|
themonth = date.today().month
|
|
|
thisyear = date.today().year
|
|
|
if thisyear // 2:
|
|
|
if themonth == 1 or themonth == 3 or themonth == 6 or themonth == 9 or themonth == 11:
|
|
|
alternate = "comment"
|
|
|
else:
|
|
|
alternate = "picture"
|
|
|
else:
|
|
|
if themonth == 1 or themonth == 3 or themonth == 6 or themonth == 9 or themonth == 11:
|
|
|
alternate = "picture"
|
|
|
else:
|
|
|
alternate = "comment"
|
|
|
|
|
|
if themonth != 4 and themonth != 8 and themonth != 12:
|
|
|
try:
|
|
|
if today > 21:
|
|
|
ssbingo = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+bingo?style=light&tag=!new+challenge,challenge:+bingo&mode=and"
|
|
|
ssbingopage = requests.get(ssbingo)
|
|
|
ssbingosoup = BeautifulSoup(ssbingopage.content, "html.parser")
|
|
|
ssbingoprompts = ssbingosoup.find_all("h3")
|
|
|
ssbingosubsoup = BeautifulSoup(str(ssbingoprompts[0]), "html.parser")
|
|
|
ssbingourl = ssbingosubsoup.find("a")
|
|
|
ssbingoprompt = (ssbingourl["href"])
|
|
|
ssbingopromptnew = (ssbingourl["href"] + "?style=light")
|
|
|
ssbingopromptpage = requests.get(ssbingopromptnew)
|
|
|
ssbingopromptsoup = BeautifulSoup(ssbingopromptpage.content, "html.parser")
|
|
|
ssbingoprompttext = ssbingopromptsoup.find(class_="entry-content")
|
|
|
ssbingotheprompt = ssbingoprompttext.find_all("td")
|
|
|
ssbingoclean = []
|
|
|
for prompt in ssbingotheprompt:
|
|
|
newprompt = re.sub("<.*?>","",str(prompt))
|
|
|
ssbingoclean.append(newprompt)
|
|
|
ssbingofinal = "; ".join(ssbingoclean).lower()
|
|
|
print("sweet and short bingo (up to 500 words, separate or combined): \033[1m" + ssbingofinal + "\033[0m (" + ssbingoprompt + ")\n")
|
|
|
thefile.write("- [[" + ssbingoprompt + "][sweet and short bingo]] (up to 500 words, separate or combined): *" + ssbingofinal + "*\n")
|
|
|
prompts.append({"source":"sweetandshort","type":"comm","notes":"up to 500 words, separate or combined","prompt":ssbingoclean,"url":ssbingoprompt,"challenge":"bingo"})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
if today > 7:
|
|
|
if alternate == "comment":
|
|
|
ssquicky = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+comment+quicky?mode=and&style=light&tag=%21new+challenge,challenge:+comment+quicky"
|
|
|
ssquickypage = requests.get(ssquicky)
|
|
|
ssquickysoup = BeautifulSoup(ssquickypage.content, "html.parser")
|
|
|
ssquickyprompts = ssquickysoup.find_all("h3")
|
|
|
ssquickysubsoup = BeautifulSoup(str(ssquickyprompts[0]), "html.parser")
|
|
|
ssquickyurl = ssquickysubsoup.find("a")
|
|
|
ssquickyprompt = (ssquickyurl["href"])
|
|
|
# deliberately not using style=light here so we can get at the comment contents
|
|
|
ssquickypromptnew = (ssquickyurl["href"])
|
|
|
ssquickypromptpage = requests.get(ssquickypromptnew)
|
|
|
ssquickypromptsoup = BeautifulSoup(ssquickypromptpage.content, "html.parser")
|
|
|
ssquickytheprompt = ssquickypromptsoup.find_all(class_="comment")
|
|
|
ssquickycomments = []
|
|
|
for comment in ssquickytheprompt:
|
|
|
if re.search("new prompts here",str(comment),re.IGNORECASE):
|
|
|
commenttext = re.findall(r"<div class=\"comment-content\".*?</div>",str(comment))
|
|
|
commentprompt = re.sub("<.*?>","",str(commenttext)).replace("\\'","'")
|
|
|
ssquickycomments.append(str(commentprompt)[2:-2])
|
|
|
ssquickycomments = ssquickycomments[1:]
|
|
|
ssquickycprompt = "; ".join(ssquickycomments)
|
|
|
print("sweet and short comment quicky (up to 100 words): \033[1m" + ssquickycprompt.lower() + "\033[0m (" + ssquickyprompt + ")\n")
|
|
|
thefile.write("- [[" + ssquickyprompt + "][sweet and short comment quicky]] (up to 100 words): *" + ssquickycprompt.lower() + "*\n")
|
|
|
prompts.append({"source":"sweetandshort","type":"comm","notes":"up to 100 words","prompt":ssquickycomments,"url":ssquickyprompt,"challenge":"comment quicky"})
|
|
|
elif alternate == "picture":
|
|
|
sspicture = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+picture+prompt+fun?mode=and&style=light&tag=%21new+challenge,challenge:+picture+prompt+fun&mode=and"
|
|
|
sspicturepage = requests.get(sspicture)
|
|
|
sspicturesoup = BeautifulSoup(sspicturepage.content, "html.parser")
|
|
|
monthstring = ".*" + month + ".*"
|
|
|
sspictureprompts = sspicturesoup.find_all("h3", string=re.compile(monthstring))
|
|
|
sspicturesubsoup = BeautifulSoup(str(sspictureprompts[0]), "html.parser")
|
|
|
sspictureurl = sspicturesubsoup.find("a")
|
|
|
sspictureprompt = (sspictureurl["href"])
|
|
|
sspicturepromptnew = (sspictureurl["href"] + "?style=light")
|
|
|
sspicturepromptpage = requests.get(sspicturepromptnew)
|
|
|
sspicturepromptsoup = BeautifulSoup(sspicturepromptpage.content, "html.parser")
|
|
|
sspictureprompttext = sspicturepromptsoup.find("h3")
|
|
|
print("sweet and short picture prompts (up to 300 words): \033[1m" + sspictureprompttext.text.lower() + "\033[0m (" + sspictureprompt + ")\n")
|
|
|
thefile.write("- [[" + sspictureprompt + "][sweet and short picture prompts]] (up to 300 words): *" + sspictureprompttext.text.lower() + "*\n")
|
|
|
prompts.append({"source":"sweetandshort","type":"comm","notes":"up to 300 words","prompt":sspictureprompttext.text.lower(),"url":sspictureprompt,"challenge":"picture prompt fun"})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
ssmonthly = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+10+out+of+20?mode=and&style=light&tag=%21new+challenge,challenge:+10+out+of+20&mode=and"
|
|
|
ssmonthlypage = requests.get(ssmonthly)
|
|
|
ssmonthlysoup = BeautifulSoup(ssmonthlypage.content, "html.parser")
|
|
|
ssmonthlyprompts = ssmonthlysoup.find_all("h3")
|
|
|
ssmonthlysubsoup = BeautifulSoup(str(ssmonthlyprompts[0]), "html.parser")
|
|
|
ssmonthlyurl = ssmonthlysubsoup.find("a")
|
|
|
ssmonthlyprompt = (ssmonthlyurl["href"])
|
|
|
ssmonthlypromptnew = (ssmonthlyurl["href"] + "?style=light")
|
|
|
ssmonthlypromptpage = requests.get(ssmonthlypromptnew)
|
|
|
ssmonthlypromptsoup = BeautifulSoup(ssmonthlypromptpage.content, "html.parser")
|
|
|
ssmonthlyprompttext = ssmonthlypromptsoup.find(class_="entry-content")
|
|
|
ssmonthlypromptmedian = re.findall(r"<a name=\"cutid1\">.*", str(ssmonthlyprompttext))
|
|
|
ssmonthlypromptstripone = re.sub("<.*?>","",str(ssmonthlypromptmedian))
|
|
|
ssmonthlypromptstriptwo = re.sub("([a-z])- ","\\1; ",str(ssmonthlypromptstripone))
|
|
|
ssmonthlypromptstripthree = re.sub("- ","",str(ssmonthlypromptstriptwo))
|
|
|
ssmonthlypromptfinal = str(ssmonthlypromptstripthree)[2:-2]
|
|
|
print("sweet and short monthly prompts (up to 500 words based on at least 10 prompts): \033[1m" + ssmonthlypromptfinal + "\033[0m (" + ssmonthlyprompt + ")\n")
|
|
|
thefile.write("- [[" + ssmonthlyprompt + "][sweet and short monthly prompts]] (up to 500 words based on at least 10 prompts): *" + ssmonthlypromptfinal + "*\n")
|
|
|
prompts.append({"source":"sweetandshort","type":"comm","notes":"up to 500 words based on at least 10 prompts","prompt":ssmonthlypromptfinal,"url":ssmonthlyprompt,"challenge":"monthly prompt"})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
if today > 14:
|
|
|
ssone = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+only+one?mode=and&style=light&tag=%21new+challenge,challenge:+only+one&mode=and"
|
|
|
ssonepage = requests.get(ssone)
|
|
|
ssonesoup = BeautifulSoup(ssonepage.content, "html.parser")
|
|
|
ssoneprompts = ssonesoup.find_all("h3")
|
|
|
ssonesubsoup = BeautifulSoup(str(ssoneprompts[0]), "html.parser")
|
|
|
ssoneurl = ssonesubsoup.find("a")
|
|
|
ssoneprompt = (ssoneurl["href"])
|
|
|
ssonepromptnew = (ssoneurl["href"] + "?style=light")
|
|
|
ssonepromptpage = requests.get(ssonepromptnew)
|
|
|
ssonepromptsoup = BeautifulSoup(ssonepromptpage.content, "html.parser")
|
|
|
ssoneprompttext = ssonepromptsoup.find("i")
|
|
|
ssonepromptstripone = re.sub("<.*?>","",str(ssoneprompttext))
|
|
|
ssonepromptstriptwo = re.sub("1. ","",ssonepromptstripone)
|
|
|
ssonepromptfinal = re.sub("2. ","; ",ssonepromptstriptwo)
|
|
|
print("sweet and short one sentence (up to 500 words, use one or both lines as the start and/or end): \033[1m" + ssonepromptfinal + "\033[0m (" + ssoneprompt + ")\n")
|
|
|
thefile.write("- [[" + ssoneprompt + "][sweet and short one sentence]] (up to 500 words, use one or both lines as the start and/or end): *" + ssonepromptfinal + "*\n")
|
|
|
prompts.append({"source":"sweetandshort","type":"comm","notes":"up to 500 words, use one or both lines as the start and/or end","prompt":ssonepromptfinal,"url":ssoneprompt,"challenge":"only one"})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
try:
|
|
|
vocab = "https://vocab-drabbles.dreamwidth.org/?style=light&tag=challenge"
|
|
|
vocabpage = requests.get(vocab)
|
|
|
vocabsoup = BeautifulSoup(vocabpage.content, "html.parser")
|
|
|
vocabprompts = vocabsoup.find_all("h3")
|
|
|
vocabsubsoup = BeautifulSoup(str(vocabprompts[0]), "html.parser")
|
|
|
vocaburl = vocabsubsoup.find("a")
|
|
|
vocabprompt = (vocaburl["href"])
|
|
|
vocabpromptnew = (vocaburl["href"] + "?style=light")
|
|
|
vocabpromptpage = requests.get(vocabpromptnew)
|
|
|
vocabpromptsoup = BeautifulSoup(vocabpromptpage.content, "html.parser")
|
|
|
vocabprompttext = vocabpromptsoup.find(class_="entry-content")
|
|
|
vocabtheprompt = vocabprompttext.find("strong")
|
|
|
print("vocab-drabbles (50–500 words): \033[1m" + vocabtheprompt.text.lower() + "\033[0m (" + vocabprompt + ")\n")
|
|
|
thefile.write("- [[" + vocabprompt + "][vocab-drabbles]] (50–500 words): *" + vocabtheprompt.text.lower() + "*\n")
|
|
|
prompts.append({"source":"vocab-drabbles","type":"comm","notes":"50–500 words","prompt":vocabtheprompt.text.lower(),"url":vocabprompt})
|
|
|
except:
|
|
|
pass
|
|
|
|
|
|
|
|
|
if os.path.exists("prompts.html"):
|
|
|
os.remove("prompts.html")
|
|
|
|
|
|
htmlfile = open("prompts.html", "a")
|
|
|
htmlfile.write("<!DOCTYPE html>\n<html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width,initial-scale=1\">\n <link rel=\"stylesheet\" href=\"prompts.css\">\n <link rel=\"preconnect\" href=\"https://fonts.googleapis.com\">\n <link rel=\"preconnect\" href=\"https://fonts.gstatic.com\" crossorigin>\n <link href=\"https://fonts.googleapis.com/css2?family=Poppins:wght@400;700&display=swap\" rel=\"stylesheet\">\n <meta name=\"theme-color\" content=\"#d5d5ef\">\n <title>Multifandom prompt communities on DW</title>\n </head>\n <body>\n <h1>Multifandom prompt communities on DW</h1>\n <div class=\"promptwrapper\">\n")
|
|
|
for prompt in prompts:
|
|
|
htmlfile.write(" <div class=\"promptcomm\"><p><span style=\"white-space: nowrap;\" class=\"ljuser\"><a href=\"https://" + prompt["source"] + ".dreamwidth.org/profile\"><img src=\"https://www.dreamwidth.org/img/silk/identity/community.png\" alt=\"[community profile]\" width=\"17\" height=\"17\" style=\"border: 0; padding-right: 1px;\" /></a><a href=\"https://" + prompt["source"] + ".dreamwidth.org/\"><b>" + prompt["source"].replace("-","_") + "</b></a></span>")
|
|
|
try:
|
|
|
if prompt["challenge"]:
|
|
|
htmlfile.write(" " + prompt["challenge"])
|
|
|
except:
|
|
|
pass
|
|
|
if type(prompt["prompt"]) == list:
|
|
|
htmlfile.write(": <a href=\"" + prompt["url"] + "\">prompt post</a> <span class=\"notes\">(" + prompt["notes"] + ")</span></p>\n <ul>\n")
|
|
|
for theprompt in prompt["prompt"]:
|
|
|
htmlfile.write(" <li><span class=\"prompt\">" + theprompt + "</span></li>\n")
|
|
|
htmlfile.write(" </ul>")
|
|
|
else:
|
|
|
htmlfile.write(": <a href=\"" + prompt["url"] + "\"><span class=\"prompt\">" + prompt["prompt"] + "</span></a> <span class=\"notes\">(" + prompt["notes"] + ")</span></p>")
|
|
|
htmlfile.write("</div>\n")
|
|
|
htmlfile.write(" </div>\n <div class=\"timestamp\"><p>Generated " + datetime.now().strftime("%H:%M (UK time), %-d %B %Y") + "</p></div>\n </body>\n</html>")
|
|
|
htmlfile.close()
|