Handle exceptions properly in prompt scraper

master
trémeur 11 months ago
parent bd85f72186
commit 31f4a80ddc

@ -18,20 +18,23 @@ today = int(date.today().strftime("%d"))
month = str(date.today().strftime("%B")) month = str(date.today().strftime("%B"))
monthstring = ".*" + month + ".*" monthstring = ".*" + month + ".*"
cent = "https://100words.dreamwidth.org/tag/!prompt?style=light&tag=%21prompt" try:
centpage = requests.get(cent) cent = "https://100words.dreamwidth.org/tag/!prompt?style=light&tag=%21prompt"
centsoup = BeautifulSoup(centpage.content, "html.parser") centpage = requests.get(cent)
centprompts = centsoup.find_all("h3", string=lambda text: "prompt:" in text.lower()) centsoup = BeautifulSoup(centpage.content, "html.parser")
centsubsoup = BeautifulSoup(str(centprompts[0]), "html.parser") centprompts = centsoup.find_all("h3", string=lambda text: "prompt:" in text.lower())
centurl = centsubsoup.find("a") centsubsoup = BeautifulSoup(str(centprompts[0]), "html.parser")
centprompt = (centurl["href"]) centurl = centsubsoup.find("a")
centpromptnew = (centurl["href"] + "?style=light") centprompt = (centurl["href"])
centpromptpage = requests.get(centpromptnew) centpromptnew = (centurl["href"] + "?style=light")
centpromptsoup = BeautifulSoup(centpromptpage.content, "html.parser") centpromptpage = requests.get(centpromptnew)
centprompttext = centpromptsoup.find(class_="entry-content") centpromptsoup = BeautifulSoup(centpromptpage.content, "html.parser")
centtheprompt = centprompttext.find("strong") centprompttext = centpromptsoup.find(class_="entry-content")
print("100words (100 words): \033[1m" + centtheprompt.text.lower() + "\033[0m (" + centprompt + ")\n") centtheprompt = centprompttext.find("strong")
thefile.write("- [[" + centprompt + "][100words]] (100 words): *" + centtheprompt.text.lower() + "*\n") print("100words (100 words): \033[1m" + centtheprompt.text.lower() + "\033[0m (" + centprompt + ")\n")
thefile.write("- [[" + centprompt + "][100words]] (100 words): *" + centtheprompt.text.lower() + "*\n")
except:
pass
# for this one we need to extract the right entry from a list, which may be an <ol> but may not be. also, need to use the right month, as next months prompts are posted in advance # for this one we need to extract the right entry from a list, which may be an <ol> but may not be. also, need to use the right month, as next months prompts are posted in advance
# now defunct?? # now defunct??
@ -65,339 +68,384 @@ thefile.write("- [[" + centprompt + "][100words]] (100 words): *" + centthepromp
# thefile.write("- [[" + thirtyoneprompt + "][31-days]] (any): *" + thirtyonetheprompt.lower() + "*\n") # thefile.write("- [[" + thirtyoneprompt + "][31-days]] (any): *" + thirtyonetheprompt.lower() + "*\n")
ad = "https://anythingdrabble.dreamwidth.org/tag/mod!+post?style=light&tag=mod%21+post" try:
adpage = requests.get(ad) ad = "https://anythingdrabble.dreamwidth.org/tag/mod!+post?style=light&tag=mod%21+post"
adsoup = BeautifulSoup(adpage.content, "html.parser") adpage = requests.get(ad)
adprompts = adsoup.find_all("h3", string=lambda text: "prompt post" in text.lower()) adsoup = BeautifulSoup(adpage.content, "html.parser")
adsubsoup = BeautifulSoup(str(adprompts[0]), "html.parser") adprompts = adsoup.find_all("h3", string=lambda text: "prompt post" in text.lower())
adurl = adsubsoup.find("a") adsubsoup = BeautifulSoup(str(adprompts[0]), "html.parser")
adprompt = (adurl["href"]) adurl = adsubsoup.find("a")
adpromptnew = (adurl["href"] + "?style=light") adprompt = (adurl["href"])
adpromptpage = requests.get(adpromptnew) adpromptnew = (adurl["href"] + "?style=light")
adpromptsoup = BeautifulSoup(adpromptpage.content, "html.parser") adpromptpage = requests.get(adpromptnew)
adprompttext = adpromptsoup.find(class_="entry-content") adpromptsoup = BeautifulSoup(adpromptpage.content, "html.parser")
adtheprompt = adprompttext.find("center") adprompttext = adpromptsoup.find(class_="entry-content")
adstrippable = str(adtheprompt.text) adtheprompt = adprompttext.find("center")
while adstrippable[-1] == " ": adstrippable = str(adtheprompt.text)
adstrippable = adstrippable[:-1] while adstrippable[-1] == " ":
print("anythingdrabble (100, 200, 300, 400, or 500 words): \033[1m" + adstrippable.lower() + "\033[0m (" + adprompt + ")\n") adstrippable = adstrippable[:-1]
thefile.write("- [[" + adprompt + "][anythingdrabble]] (100, 200, 300, 400, or 500 words): *" + adstrippable.lower() + "*\n") print("anythingdrabble (100, 200, 300, 400, or 500 words): \033[1m" + adstrippable.lower() + "\033[0m (" + adprompt + ")\n")
thefile.write("- [[" + adprompt + "][anythingdrabble]] (100, 200, 300, 400, or 500 words): *" + adstrippable.lower() + "*\n")
except:
pass
dove = "https://dove-drabbles.dreamwidth.org/?style=light" try:
dovepage = requests.get(dove) dove = "https://dove-drabbles.dreamwidth.org/?style=light"
dovesoup = BeautifulSoup(dovepage.content, "html.parser") dovepage = requests.get(dove)
doveprompts = dovesoup.find_all("h3", string=lambda text: "prompt post" in text.lower()) dovesoup = BeautifulSoup(dovepage.content, "html.parser")
dovesubsoup = BeautifulSoup(str(doveprompts[0]), "html.parser") doveprompts = dovesoup.find_all("h3", string=lambda text: "prompt post" in text.lower())
doveurl = dovesubsoup.find("a") dovesubsoup = BeautifulSoup(str(doveprompts[0]), "html.parser")
doveprompt = (doveurl["href"]) doveurl = dovesubsoup.find("a")
dovepromptnew = (doveurl["href"] + "?style=light") doveprompt = (doveurl["href"])
dovepromptpage = requests.get(dovepromptnew) dovepromptnew = (doveurl["href"] + "?style=light")
dovepromptsoup = BeautifulSoup(dovepromptpage.content, "html.parser") dovepromptpage = requests.get(dovepromptnew)
doveprompttext = dovepromptsoup.find(class_="entry-content") dovepromptsoup = BeautifulSoup(dovepromptpage.content, "html.parser")
dovetheprompt = doveprompttext.find("i") doveprompttext = dovepromptsoup.find(class_="entry-content")
print("dove-drabbles (any): \033[1m" + dovetheprompt.text.lower() + "\033[0m (" + doveprompt + ")\n") dovetheprompt = doveprompttext.find("i")
thefile.write("- [[" + doveprompt + "][dove-drabbles]] (any): *" + dovetheprompt.text.lower() + "*\n") print("dove-drabbles (any): \033[1m" + dovetheprompt.text.lower() + "\033[0m (" + doveprompt + ")\n")
thefile.write("- [[" + doveprompt + "][dove-drabbles]] (any): *" + dovetheprompt.text.lower() + "*\n")
except:
pass
with requests.Session() as s: try:
response = s.post(login_url , data) with requests.Session() as s:
zone = "https://drabble-zone.dreamwidth.org/tag/mod-post?style=light&tag=mod-post" response = s.post(login_url , data)
zonepage = s.get(zone) zone = "https://drabble-zone.dreamwidth.org/tag/mod-post?style=light&tag=mod-post"
zonesoup = BeautifulSoup(zonepage.content, "html.parser") zonepage = s.get(zone)
zoneprompts = zonesoup.find_all("h3", string=lambda text: "challenge" in text.lower()) zonesoup = BeautifulSoup(zonepage.content, "html.parser")
zonesubsoup = BeautifulSoup(str(zoneprompts[0]), "html.parser") zoneprompts = zonesoup.find_all("h3", string=lambda text: "challenge" in text.lower())
zoneurl = zonesubsoup.find("a") zonesubsoup = BeautifulSoup(str(zoneprompts[0]), "html.parser")
zoneprompt = (zoneurl["href"]) zoneurl = zonesubsoup.find("a")
zonepromptnew = (zoneurl["href"] + "?style=light") zoneprompt = (zoneurl["href"])
zonepromptpage = s.get(zonepromptnew) zonepromptnew = (zoneurl["href"] + "?style=light")
zonepromptsoup = BeautifulSoup(zonepromptpage.content, "html.parser") zonepromptpage = s.get(zonepromptnew)
zoneprompttext = zonepromptsoup.find(class_="entry-content") zonepromptsoup = BeautifulSoup(zonepromptpage.content, "html.parser")
zonetheprompt = zoneprompttext.find("strong") zoneprompttext = zonepromptsoup.find(class_="entry-content")
print("drabble-zone (100 or 200 words): \033[1m" + zonetheprompt.text.lower() + "\033[0m (" + zoneprompt + ")\n") zonetheprompt = zoneprompttext.find("strong")
thefile.write("- [[" + zoneprompt + "][drabble-zone]] (100 or 200 words): *" + zonetheprompt.text.lower() + "*\n") print("drabble-zone (100 or 200 words): \033[1m" + zonetheprompt.text.lower() + "\033[0m (" + zoneprompt + ")\n")
emotion = "https://emotion100.dreamwidth.org/tag/*modpost?style=light&tag=%2Amodpost" thefile.write("- [[" + zoneprompt + "][drabble-zone]] (100 or 200 words): *" + zonetheprompt.text.lower() + "*\n")
emotionpage = s.get(emotion) emotion = "https://emotion100.dreamwidth.org/tag/*modpost?style=light&tag=%2Amodpost"
emotionsoup = BeautifulSoup(emotionpage.content, "html.parser") emotionpage = s.get(emotion)
emotionprompts = emotionsoup.find_all("h3", string=lambda text: "prompt" in text.lower()) emotionsoup = BeautifulSoup(emotionpage.content, "html.parser")
emotionsubsoup = BeautifulSoup(str(emotionprompts[0]), "html.parser") emotionprompts = emotionsoup.find_all("h3", string=lambda text: "prompt" in text.lower())
emotionurl = emotionsubsoup.find("a") emotionsubsoup = BeautifulSoup(str(emotionprompts[0]), "html.parser")
emotionprompt = (emotionurl["href"]) emotionurl = emotionsubsoup.find("a")
emotionpromptnew = (emotionurl["href"] + "?style=light") emotionprompt = (emotionurl["href"])
emotionpromptpage = s.get(emotionpromptnew) emotionpromptnew = (emotionurl["href"] + "?style=light")
emotionpromptsoup = BeautifulSoup(emotionpromptpage.content, "html.parser") emotionpromptpage = s.get(emotionpromptnew)
emotionprompttext = emotionpromptsoup.find(class_="entry-content") emotionpromptsoup = BeautifulSoup(emotionpromptpage.content, "html.parser")
emotiontheprompt = emotionprompttext.find_all("span")[-1] emotionprompttext = emotionpromptsoup.find(class_="entry-content")
print("emotion100 (100 words or a multiple of 100): \033[1m" + emotiontheprompt.text.lower() + "\033[0m (" + emotionprompt + ")\n") emotiontheprompt = emotionprompttext.find_all("span")[-1]
thefile.write("- [[" + emotionprompt + "][emotion100]] (100 words or a multiple of 100): *" + emotiontheprompt.text.lower() + "*\n") print("emotion100 (100 words or a multiple of 100): \033[1m" + emotiontheprompt.text.lower() + "\033[0m (" + emotionprompt + ")\n")
thefile.write("- [[" + emotionprompt + "][emotion100]] (100 words or a multiple of 100): *" + emotiontheprompt.text.lower() + "*\n")
except:
pass
# for this one, have to get prompts from comments # for this one, have to get prompts from comments
ffa = "https://fail-fandomanon.dreamwidth.org/?style=light" try:
ffapage = requests.get(ffa) ffa = "https://fail-fandomanon.dreamwidth.org/?style=light"
ffasoup = BeautifulSoup(ffapage.content, "html.parser") ffapage = requests.get(ffa)
ffaprompts = ffasoup.find_all("h3", string=lambda text: "ffa dw post" in text.lower()) ffasoup = BeautifulSoup(ffapage.content, "html.parser")
ffapromptstrim = [x for x in ffaprompts if "Placeholder" not in str(x)] ffaprompts = ffasoup.find_all("h3", string=lambda text: "ffa dw post" in text.lower())
ffasubsoup = BeautifulSoup(str(ffapromptstrim[0]), "html.parser") ffapromptstrim = [x for x in ffaprompts if "Placeholder" not in str(x)]
ffaurl = ffasubsoup.find("a") ffasubsoup = BeautifulSoup(str(ffapromptstrim[0]), "html.parser")
ffaprompt = (ffaurl["href"]) ffaurl = ffasubsoup.find("a")
ffapromptnew = (ffaprompt + "?style=light") ffaprompt = (ffaurl["href"])
ffapromptpage = requests.get(ffapromptnew) ffapromptnew = (ffaprompt + "?style=light")
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
ffaprompttext = ffapromptsoup.find(id="comments")
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
ffatheprompt = ffaresoup.find_all("h4",text=True)
ffacent = []
i = 1
while i < 8:
ffapromptnew = (ffaprompt + "?page=" + str(i) + "&style=light")
ffapromptpage = requests.get(ffapromptnew) ffapromptpage = requests.get(ffapromptnew)
ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser") ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
ffaprompttext = ffapromptsoup.find(id="comments") ffaprompttext = ffapromptsoup.find(id="comments")
ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser") ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
ffatheprompt = ffaresoup.find_all("h4",text=True) ffatheprompt = ffaresoup.find_all("h4",text=True)
for each in ffatheprompt: ffacent = []
if "100 words of" in (str(each.get_text())) or "100 Words of" in (str(each.get_text())) or "100 Words Of" in (str(each.get_text())): i = 1
if "Re:" not in (str(each.get_text())) and "catch-up" not in (str(each.get_text())) and "Catch-Up" not in (str(each.get_text())): while i < 8:
ffacent.append(str(each.get_text())) ffapromptnew = (ffaprompt + "?page=" + str(i) + "&style=light")
i += 1 ffapromptpage = requests.get(ffapromptnew)
if ffacent: ffapromptsoup = BeautifulSoup(ffapromptpage.content, "html.parser")
ffacent = list(dict.fromkeys(ffacent)) ffaprompttext = ffapromptsoup.find(id="comments")
ffacentnew = [] ffaresoup = BeautifulSoup(str(ffaprompttext), "html.parser")
for x in ffacent: ffatheprompt = ffaresoup.find_all("h4",text=True)
x = x[13:] for each in ffatheprompt:
if x != "" and not x.startswith(" Fills"): if "100 words of" in (str(each.get_text())) or "100 Words of" in (str(each.get_text())) or "100 Words Of" in (str(each.get_text())):
ffacentnew.append(x) if "Re:" not in (str(each.get_text())) and "catch-up" not in (str(each.get_text())) and "Catch-Up" not in (str(each.get_text())):
ffaformat = "; ".join(ffacentnew) ffacent.append(str(each.get_text()))
print("fail-fandomanon (any): \033[1m" + ffaformat.lower() + "\033[0m (" + ffaprompt + ")\n") i += 1
thefile.write("- [[" + ffaprompt + "][fail-fandomanon]] (any): *" + ffaformat.lower() + "*\n") if ffacent:
ffacent = list(dict.fromkeys(ffacent))
ffacentnew = []
for x in ffacent:
x = x[13:]
if x != "" and not x.startswith(" Fills"):
ffacentnew.append(x)
ffaformat = "; ".join(ffacentnew)
print("fail-fandomanon (any): \033[1m" + ffaformat.lower() + "\033[0m (" + ffaprompt + ")\n")
thefile.write("- [[" + ffaprompt + "][fail-fandomanon]] (any): *" + ffaformat.lower() + "*\n")
except:
pass
# for this one, prompts are unavailable on tuesdays and wednesdays # for this one, prompts are unavailable on tuesdays and wednesdays
weekprogress = datetime.now().weekday() try:
if not 0 < weekprogress < 3: weekprogress = datetime.now().weekday()
fandom = "https://fandomweekly.dreamwidth.org/?style=light&tag=%23challenge" if not 0 < weekprogress < 3:
fandompage = requests.get(fandom) fandom = "https://fandomweekly.dreamwidth.org/?style=light&tag=%23challenge"
fandomsoup = BeautifulSoup(fandompage.content, "html.parser") fandompage = requests.get(fandom)
fandomprompts = fandomsoup.find_all("h3", string=lambda text: "challenge post" in text.lower()) fandomsoup = BeautifulSoup(fandompage.content, "html.parser")
fandomsubsoup = BeautifulSoup(str(fandomprompts[0]), "html.parser") fandomprompts = fandomsoup.find_all("h3", string=lambda text: "challenge post" in text.lower())
fandomurl = fandomsubsoup.find("a") fandomsubsoup = BeautifulSoup(str(fandomprompts[0]), "html.parser")
fandomprompt = (fandomurl["href"]) fandomurl = fandomsubsoup.find("a")
fandompromptnew = (fandomurl["href"] + "?style=light") fandomprompt = (fandomurl["href"])
fandompromptpage = requests.get(fandompromptnew) fandompromptnew = (fandomurl["href"] + "?style=light")
fandompromptsoup = BeautifulSoup(fandompromptpage.content, "html.parser") fandompromptpage = requests.get(fandompromptnew)
fandomprompttext = fandompromptsoup.find(class_="entry-content") fandompromptsoup = BeautifulSoup(fandompromptpage.content, "html.parser")
fandomtheprompt = fandomprompttext.find("td") fandomprompttext = fandompromptsoup.find(class_="entry-content")
print("fandomweekly (any, competitive): \033[1m" + fandomtheprompt.text.lower() + "\033[0m (" + fandomprompt + ")\n") fandomtheprompt = fandomprompttext.find("td")
thefile.write("- [[" + fandomprompt + "][fandomweekly]] (any, competitive): *" + fandomtheprompt.text.lower() + "*\n") print("fandomweekly (any, competitive): \033[1m" + fandomtheprompt.text.lower() + "\033[0m (" + fandomprompt + ")\n")
thefile.write("- [[" + fandomprompt + "][fandomweekly]] (any, competitive): *" + fandomtheprompt.text.lower() + "*\n")
except:
pass
flash = "https://fan-flashworks.dreamwidth.org/?style=light&tag=admin" try:
flashpage = requests.get(flash) flash = "https://fan-flashworks.dreamwidth.org/?style=light&tag=admin"
flashsoup = BeautifulSoup(flashpage.content, "html.parser") flashpage = requests.get(flash)
flashprompts = flashsoup.find_all("h3", string=lambda text: "challenge" in text.lower()) flashsoup = BeautifulSoup(flashpage.content, "html.parser")
flashsubsoup = BeautifulSoup(str(flashprompts[0]), "html.parser") flashprompts = flashsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
flashurl = flashsubsoup.find("a") flashsubsoup = BeautifulSoup(str(flashprompts[0]), "html.parser")
flashprompt = (flashurl["href"]) flashurl = flashsubsoup.find("a")
flashpromptnew = (flashurl["href"] + "?style=light") flashprompt = (flashurl["href"])
flashpromptpage = requests.get(flashpromptnew) flashpromptnew = (flashurl["href"] + "?style=light")
flashpromptsoup = BeautifulSoup(flashpromptpage.content, "html.parser") flashpromptpage = requests.get(flashpromptnew)
flashprompttext = flashpromptsoup.find(class_="entry-content") flashpromptsoup = BeautifulSoup(flashpromptpage.content, "html.parser")
flashtheprompt = flashprompttext.find("center") flashprompttext = flashpromptsoup.find(class_="entry-content")
print("fan-flashworks (any, cant post elsewhere until round is closed): \033[1m" + flashtheprompt.text.lower() + "\033[0m (" + flashprompt + ")\n") flashtheprompt = flashprompttext.find("center")
thefile.write("- [[" + flashprompt + "][fan-flashworks]] (any, cant post elsewhere until round is closed): *" + flashtheprompt.text.lower() + "*\n") print("fan-flashworks (any, cant post elsewhere until round is closed): \033[1m" + flashtheprompt.text.lower() + "\033[0m (" + flashprompt + ")\n")
thefile.write("- [[" + flashprompt + "][fan-flashworks]] (any, cant post elsewhere until round is closed): *" + flashtheprompt.text.lower() + "*\n")
except:
pass
femslash = "https://femslashficlets.dreamwidth.org/tag/challenges?style=light&tag=challenges" try:
femslashpage = requests.get(femslash) femslash = "https://femslashficlets.dreamwidth.org/tag/challenges?style=light&tag=challenges"
femslashsoup = BeautifulSoup(femslashpage.content, "html.parser") femslashpage = requests.get(femslash)
femslashprompts = femslashsoup.find_all("h3", string=lambda text: "challenge" in text.lower()) femslashsoup = BeautifulSoup(femslashpage.content, "html.parser")
femslashsubsoup = BeautifulSoup(str(femslashprompts[0]), "html.parser") femslashprompts = femslashsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
femslashurl = femslashsubsoup.find("a") femslashsubsoup = BeautifulSoup(str(femslashprompts[0]), "html.parser")
femslashprompt = (femslashurl["href"]) femslashurl = femslashsubsoup.find("a")
femslashpromptnew = (femslashurl["href"] + "?style=light") femslashprompt = (femslashurl["href"])
femslashpromptpage = requests.get(femslashpromptnew) femslashpromptnew = (femslashurl["href"] + "?style=light")
femslashpromptsoup = BeautifulSoup(femslashpromptpage.content, "html.parser") femslashpromptpage = requests.get(femslashpromptnew)
femslashprompttext = femslashpromptsoup.find(class_="entry-content") femslashpromptsoup = BeautifulSoup(femslashpromptpage.content, "html.parser")
femslashtheprompt = femslashprompttext.find("i") femslashprompttext = femslashpromptsoup.find(class_="entry-content")
if femslashtheprompt is not None: femslashtheprompt = femslashprompttext.find("i")
print("femslash-ficlets (1001000 words, F/F): \033[1m" + femslashtheprompt.text.lower() + "\033[0m (" + femslashprompt + ")\n") if femslashtheprompt is not None:
thefile.write("- [[" + femslashprompt + "][femslashficlets]] (100 words or a multiple of 100): *" + femslashtheprompt.text.lower() + "*\n") print("femslash-ficlets (1001000 words, F/F): \033[1m" + femslashtheprompt.text.lower() + "\033[0m (" + femslashprompt + ")\n")
thefile.write("- [[" + femslashprompt + "][femslashficlets]] (100 words or a multiple of 100): *" + femslashtheprompt.text.lower() + "*\n")
except:
pass
with requests.Session() as s: try:
response = s.post(login_url , data) with requests.Session() as s:
fffc = "https://fffc.dreamwidth.org/tag/!challenges?style=light&tag=%21challenges" response = s.post(login_url , data)
fffcpage = s.get(fffc) fffc = "https://fffc.dreamwidth.org/tag/!challenges?style=light&tag=%21challenges"
fffcsoup = BeautifulSoup(fffcpage.content, "html.parser") fffcpage = s.get(fffc)
if 18 > today > 9: fffcsoup = BeautifulSoup(fffcpage.content, "html.parser")
fffclittleprompts = fffcsoup.find_all("h3", string=lambda text: "little special" in text.lower()) if 18 > today > 9:
fffclittlesubsoup = BeautifulSoup(str(fffclittleprompts[0]), "html.parser") fffclittleprompts = fffcsoup.find_all("h3", string=lambda text: "little special" in text.lower())
fffclittleurl = fffclittlesubsoup.find("a") fffclittlesubsoup = BeautifulSoup(str(fffclittleprompts[0]), "html.parser")
fffclittleprompt = (fffclittleurl["href"]) fffclittleurl = fffclittlesubsoup.find("a")
fffclittlepromptnew = (fffclittleurl["href"] + "?style=light") fffclittleprompt = (fffclittleurl["href"])
fffclittlepromptpage = s.get(fffclittlepromptnew) fffclittlepromptnew = (fffclittleurl["href"] + "?style=light")
fffclittlepromptsoup = BeautifulSoup(fffclittlepromptpage.content, "html.parser") fffclittlepromptpage = s.get(fffclittlepromptnew)
fffclittleprompttext = fffclittlepromptsoup.find("h3") fffclittlepromptsoup = BeautifulSoup(fffclittlepromptpage.content, "html.parser")
print("fffc little special (at least 100 words): \033[1m" + fffclittleprompttext.text.lower() + "\033[0m (" + fffclittleprompt + ")\n") fffclittleprompttext = fffclittlepromptsoup.find("h3")
thefile.write("- [[" + fffclittleprompt + "][fffc little special]] (at least 100 words): *" + fffclittleprompttext.text.lower() + "*\n") print("fffc little special (at least 100 words): \033[1m" + fffclittleprompttext.text.lower() + "\033[0m (" + fffclittleprompt + ")\n")
fffcmadnessprompts = fffcsoup.find_all("h3", string=lambda text: "froday madness" in text.lower()) thefile.write("- [[" + fffclittleprompt + "][fffc little special]] (at least 100 words): *" + fffclittleprompttext.text.lower() + "*\n")
fffcmadnesssubsoup = BeautifulSoup(str(fffcmadnessprompts[0]), "html.parser") fffcmadnessprompts = fffcsoup.find_all("h3", string=lambda text: "froday madness" in text.lower())
fffcmadnessurl = fffcmadnesssubsoup.find("a") fffcmadnesssubsoup = BeautifulSoup(str(fffcmadnessprompts[0]), "html.parser")
fffcmadnessprompt = (fffcmadnessurl["href"]) fffcmadnessurl = fffcmadnesssubsoup.find("a")
fffcmadnesspromptnew = (fffcmadnessurl["href"] + "?style=light") fffcmadnessprompt = (fffcmadnessurl["href"])
fffcmadnesspromptpage = s.get(fffcmadnesspromptnew) fffcmadnesspromptnew = (fffcmadnessurl["href"] + "?style=light")
fffcmadnesspromptsoup = BeautifulSoup(fffcmadnesspromptpage.content, "html.parser") fffcmadnesspromptpage = s.get(fffcmadnesspromptnew)
fffcmadnessprompttext = fffcmadnesspromptsoup.find(class_="entry-content") fffcmadnesspromptsoup = BeautifulSoup(fffcmadnesspromptpage.content, "html.parser")
fffcmadnesstheprompt = fffcmadnessprompttext.find("b") fffcmadnessprompttext = fffcmadnesspromptsoup.find(class_="entry-content")
print("fffc madness (at least 2000 words): \033[1m" + fffcmadnesstheprompt.text.lower() + "\033[0m (" + fffcmadnessprompt + ")\n") fffcmadnesstheprompt = fffcmadnessprompttext.find("b")
thefile.write("- [[" + fffcmadnessprompt + "][fffc madness]] (at least 2000 words): *" + fffcmadnesstheprompt.text.lower() + "*\n") print("fffc madness (at least 2000 words): \033[1m" + fffcmadnesstheprompt.text.lower() + "\033[0m (" + fffcmadnessprompt + ")\n")
fffcmonthlyprompts = fffcsoup.find_all("h3", string=re.compile(monthstring)) thefile.write("- [[" + fffcmadnessprompt + "][fffc madness]] (at least 2000 words): *" + fffcmadnesstheprompt.text.lower() + "*\n")
fffcmonthlysubsoup = BeautifulSoup(str(fffcmonthlyprompts[0]), "html.parser") fffcmonthlyprompts = fffcsoup.find_all("h3", string=re.compile(monthstring))
fffcmonthlyurl = fffcmonthlysubsoup.find("a") fffcmonthlysubsoup = BeautifulSoup(str(fffcmonthlyprompts[0]), "html.parser")
fffcmonthlyprompt = (fffcmonthlyurl["href"]) fffcmonthlyurl = fffcmonthlysubsoup.find("a")
fffcmonthlypromptnew = (fffcmonthlyurl["href"] + "?style=light") fffcmonthlyprompt = (fffcmonthlyurl["href"])
fffcmonthlypromptpage = s.get(fffcmonthlypromptnew) fffcmonthlypromptnew = (fffcmonthlyurl["href"] + "?style=light")
fffcmonthlypromptsoup = BeautifulSoup(fffcmonthlypromptpage.content, "html.parser") fffcmonthlypromptpage = s.get(fffcmonthlypromptnew)
fffcmonthlyprompttext = fffcmonthlypromptsoup.find("h3") fffcmonthlypromptsoup = BeautifulSoup(fffcmonthlypromptpage.content, "html.parser")
print("fffc monthly special (usually at least 500 words): \033[1m" + fffcmonthlyprompttext.text.lower() + "\033[0m (" + fffcmonthlyprompt + ")\n") fffcmonthlyprompttext = fffcmonthlypromptsoup.find("h3")
thefile.write("- [[" + fffcmonthlyprompt + "][fffc monthly special]] (usually at least 500 words): *" + fffcmonthlyprompttext.text.lower() + "*\n") print("fffc monthly special (usually at least 500 words): \033[1m" + fffcmonthlyprompttext.text.lower() + "\033[0m (" + fffcmonthlyprompt + ")\n")
fffcregularprompts = fffcsoup.find_all("h3", string=lambda text: "regular challenge" in text.lower()) thefile.write("- [[" + fffcmonthlyprompt + "][fffc monthly special]] (usually at least 500 words): *" + fffcmonthlyprompttext.text.lower() + "*\n")
fffcregularsubsoup = BeautifulSoup(str(fffcregularprompts[0]), "html.parser") fffcregularprompts = fffcsoup.find_all("h3", string=lambda text: "regular challenge" in text.lower())
fffcregularurl = fffcregularsubsoup.find("a") fffcregularsubsoup = BeautifulSoup(str(fffcregularprompts[0]), "html.parser")
fffcregularprompt = (fffcregularurl["href"]) fffcregularurl = fffcregularsubsoup.find("a")
fffcregularpromptnew = (fffcregularurl["href"] + "?style=light") fffcregularprompt = (fffcregularurl["href"])
fffcregularpromptpage = s.get(fffcregularpromptnew) fffcregularpromptnew = (fffcregularurl["href"] + "?style=light")
fffcregularpromptsoup = BeautifulSoup(fffcregularpromptpage.content, "html.parser") fffcregularpromptpage = s.get(fffcregularpromptnew)
fffcregularprompttext = fffcregularpromptsoup.find(class_="entry-content") fffcregularpromptsoup = BeautifulSoup(fffcregularpromptpage.content, "html.parser")
fffcregulartheprompt = fffcregularprompttext.find("b") fffcregularprompttext = fffcregularpromptsoup.find(class_="entry-content")
print("fffc regular challenge (at least 100 words): \033[1m" + fffcregulartheprompt.text.lower() + "\033[0m (" + fffcregularprompt + ")\n") fffcregulartheprompt = fffcregularprompttext.find("b")
thefile.write("- [[" + fffcregularprompt + "][fffc regular challenge]] (at least 100 words): *" + fffcregulartheprompt.text.lower() + "*\n") print("fffc regular challenge (at least 100 words): \033[1m" + fffcregulartheprompt.text.lower() + "\033[0m (" + fffcregularprompt + ")\n")
thefile.write("- [[" + fffcregularprompt + "][fffc regular challenge]] (at least 100 words): *" + fffcregulartheprompt.text.lower() + "*\n")
except:
pass
ficlet = "https://ficlet-zone.dreamwidth.org/tag/challenge+post?style=light&tag=challenge+post" try:
ficletpage = requests.get(ficlet) ficlet = "https://ficlet-zone.dreamwidth.org/tag/challenge+post?style=light&tag=challenge+post"
ficletsoup = BeautifulSoup(ficletpage.content, "html.parser") ficletpage = requests.get(ficlet)
ficletprompts = ficletsoup.find_all("h3", string=lambda text: "challenge" in text.lower()) ficletsoup = BeautifulSoup(ficletpage.content, "html.parser")
ficletsubsoup = BeautifulSoup(str(ficletprompts[0]), "html.parser") ficletprompts = ficletsoup.find_all("h3", string=lambda text: "challenge" in text.lower())
ficleturl = ficletsubsoup.find("a") ficletsubsoup = BeautifulSoup(str(ficletprompts[0]), "html.parser")
ficletprompt = (ficleturl["href"]) ficleturl = ficletsubsoup.find("a")
ficletpromptnew = (ficleturl["href"] + "?style=light") ficletprompt = (ficleturl["href"])
ficletpromptpage = requests.get(ficletpromptnew) ficletpromptnew = (ficleturl["href"] + "?style=light")
ficletpromptsoup = BeautifulSoup(ficletpromptpage.content, "html.parser") ficletpromptpage = requests.get(ficletpromptnew)
ficletprompttext = ficletpromptsoup.find(class_="entry-content") ficletpromptsoup = BeautifulSoup(ficletpromptpage.content, "html.parser")
ficlettheprompt = ficletprompttext.find("a") ficletprompttext = ficletpromptsoup.find(class_="entry-content")
print("ficlet-zone (any): \033[1m" + ficlettheprompt.text.lower() + "\033[0m (" + ficletprompt + ")\n") ficlettheprompt = ficletprompttext.find("a")
thefile.write("- [[" + ficletprompt + "][ficlet-zone]] (any): *" + ficlettheprompt.text.lower() + "*\n") print("ficlet-zone (any): \033[1m" + ficlettheprompt.text.lower() + "\033[0m (" + ficletprompt + ")\n")
thefile.write("- [[" + ficletprompt + "][ficlet-zone]] (any): *" + ficlettheprompt.text.lower() + "*\n")
except:
pass
# first calculate the hour of the month … # first calculate the hour of the month …
hourselapsed = (today - 1) * 24 try:
hourstoday = int(datetime.now().strftime("%H")) hourselapsed = (today - 1) * 24
currenthour = (hourselapsed + hourstoday) hourstoday = int(datetime.now().strftime("%H"))
with requests.Session() as s: currenthour = (hourselapsed + hourstoday)
response = s.post(login_url , data) with requests.Session() as s:
hourly = "https://hourlyprompts.dreamwidth.org/?style=light" response = s.post(login_url , data)
hourlypage = s.get(hourly) hourly = "https://hourlyprompts.dreamwidth.org/?style=light"
hourlysoup = BeautifulSoup(hourlypage.content, "html.parser") hourlypage = s.get(hourly)
hourlyprompts = hourlysoup.find_all("h3", string=re.compile(monthstring)) hourlysoup = BeautifulSoup(hourlypage.content, "html.parser")
hourlysubsoup = BeautifulSoup(str(hourlyprompts[0]), "html.parser") hourlyprompts = hourlysoup.find_all("h3", string=re.compile(monthstring))
hourlyurl = hourlysubsoup.find("a") hourlysubsoup = BeautifulSoup(str(hourlyprompts[0]), "html.parser")
hourlyprompt = (hourlyurl["href"]) hourlyurl = hourlysubsoup.find("a")
hourlypromptnew = (hourlyurl["href"] + "?style=light") hourlyprompt = (hourlyurl["href"])
hourlypromptpage = s.get(hourlypromptnew) hourlypromptnew = (hourlyurl["href"] + "?style=light")
hourlypromptsoup = BeautifulSoup(hourlypromptpage.content, "html.parser") hourlypromptpage = s.get(hourlypromptnew)
hourlyprompttext = hourlypromptsoup.find(class_="entry-content") hourlypromptsoup = BeautifulSoup(hourlypromptpage.content, "html.parser")
searchstring = r"<br/>" + re.escape(str(currenthour)) + r"\. .*?<br/>" hourlyprompttext = hourlypromptsoup.find(class_="entry-content")
hourlypromptmedian = re.findall(searchstring, str(hourlyprompttext)) searchstring = r"<br/>" + re.escape(str(currenthour)) + r"\. .*?<br/>"
hourlypromptthishour = str(hourlypromptmedian[0])[5:-5] hourlypromptmedian = re.findall(searchstring, str(hourlyprompttext))
print("hourlyprompts (any): \033[1m" + hourlypromptthishour.lower() + "\033[0m (" + hourlyprompt + ")\n") hourlypromptthishour = str(hourlypromptmedian[0])[5:-5]
thefile.write("- [[" + hourlyprompt + "][hourlyprompts]] (any): *" + hourlypromptthishour.lower() + "*\n") print("hourlyprompts (any): \033[1m" + hourlypromptthishour.lower() + "\033[0m (" + hourlyprompt + ")\n")
thefile.write("- [[" + hourlyprompt + "][hourlyprompts]] (any): *" + hourlypromptthishour.lower() + "*\n")
except:
pass
if 30 > today > 21: try:
ssbingo = "https://sweetandshort.dreamwidth.org/tag/challenge:+bingo?style=light&tag=challenge:+bingo" if 30 > today > 21:
ssbingopage = requests.get(ssbingo) ssbingo = "https://sweetandshort.dreamwidth.org/tag/challenge:+bingo?style=light&tag=challenge:+bingo"
ssbingosoup = BeautifulSoup(ssbingopage.content, "html.parser") ssbingopage = requests.get(ssbingo)
ssbingoprompts = ssbingosoup.find_all("h3") ssbingosoup = BeautifulSoup(ssbingopage.content, "html.parser")
ssbingosubsoup = BeautifulSoup(str(ssbingoprompts[0]), "html.parser") ssbingoprompts = ssbingosoup.find_all("h3")
ssbingourl = ssbingosubsoup.find("a") ssbingosubsoup = BeautifulSoup(str(ssbingoprompts[0]), "html.parser")
ssbingoprompt = (ssbingourl["href"]) ssbingourl = ssbingosubsoup.find("a")
ssbingopromptnew = (ssbingourl["href"] + "?style=light") ssbingoprompt = (ssbingourl["href"])
ssbingopromptpage = requests.get(ssbingopromptnew) ssbingopromptnew = (ssbingourl["href"] + "?style=light")
ssbingopromptsoup = BeautifulSoup(ssbingopromptpage.content, "html.parser") ssbingopromptpage = requests.get(ssbingopromptnew)
ssbingoprompttext = ssbingopromptsoup.find(class_="entry-content") ssbingopromptsoup = BeautifulSoup(ssbingopromptpage.content, "html.parser")
ssbingotheprompt = ssbingoprompttext.find_all("td") ssbingoprompttext = ssbingopromptsoup.find(class_="entry-content")
ssbingoclean = [] ssbingotheprompt = ssbingoprompttext.find_all("td")
for prompt in ssbingotheprompt: ssbingoclean = []
newprompt = re.sub("<.*?>","",str(prompt)) for prompt in ssbingotheprompt:
ssbingoclean.append(newprompt) newprompt = re.sub("<.*?>","",str(prompt))
ssbingofinal = "; ".join(ssbingoclean).lower() ssbingoclean.append(newprompt)
print("sweet and short bingo (up to 300 words for two prompts, up to 600 words for four prompts): \033[1m" + ssbingofinal + "\033[0m (" + ssbingoprompt + ")\n") ssbingofinal = "; ".join(ssbingoclean).lower()
thefile.write("- [[" + ssbingoprompt + "][sweet and short bingo]] (up to 300 words for two prompts, up to 600 words for four prompts): *" + ssbingofinal + "*\n") print("sweet and short bingo (up to 300 words for two prompts, up to 600 words for four prompts): \033[1m" + ssbingofinal + "\033[0m (" + ssbingoprompt + ")\n")
thefile.write("- [[" + ssbingoprompt + "][sweet and short bingo]] (up to 300 words for two prompts, up to 600 words for four prompts): *" + ssbingofinal + "*\n")
except:
pass
if 16 > today > 7: try:
ssquicky = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+comment+quicky?mode=and&style=light&tag=%21new+challenge,challenge:+comment+quicky" if 16 > today > 7:
ssquickypage = requests.get(ssquicky) ssquicky = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+comment+quicky?mode=and&style=light&tag=%21new+challenge,challenge:+comment+quicky"
ssquickysoup = BeautifulSoup(ssquickypage.content, "html.parser") ssquickypage = requests.get(ssquicky)
ssquickyprompts = ssquickysoup.find_all("h3") ssquickysoup = BeautifulSoup(ssquickypage.content, "html.parser")
ssquickysubsoup = BeautifulSoup(str(ssquickyprompts[0]), "html.parser") ssquickyprompts = ssquickysoup.find_all("h3")
ssquickyurl = ssquickysubsoup.find("a") ssquickysubsoup = BeautifulSoup(str(ssquickyprompts[0]), "html.parser")
ssquickyprompt = (ssquickyurl["href"]) ssquickyurl = ssquickysubsoup.find("a")
# deliberately not using style=light here so we can get at the comment contents ssquickyprompt = (ssquickyurl["href"])
ssquickypromptnew = (ssquickyurl["href"]) # deliberately not using style=light here so we can get at the comment contents
ssquickypromptpage = requests.get(ssquickypromptnew) ssquickypromptnew = (ssquickyurl["href"])
ssquickypromptsoup = BeautifulSoup(ssquickypromptpage.content, "html.parser") ssquickypromptpage = requests.get(ssquickypromptnew)
promptcatch = ".*New Prompts Here" ssquickypromptsoup = BeautifulSoup(ssquickypromptpage.content, "html.parser")
# ssquickytheprompt = ssquickypromptsoup.find_all("h4",string = re.compile(promptcatch)) promptcatch = ".*New Prompts Here"
ssquickytheprompt = ssquickypromptsoup.find_all(class_="comment") # ssquickytheprompt = ssquickypromptsoup.find_all("h4",string = re.compile(promptcatch))
ssquickycomments = [] ssquickytheprompt = ssquickypromptsoup.find_all(class_="comment")
for comment in ssquickytheprompt: ssquickycomments = []
if re.search("New Prompts Here",str(comment)): for comment in ssquickytheprompt:
commenttext = re.findall(r"<div class=\"comment-content\".*?</div>",str(comment)) if re.search("New Prompts Here",str(comment)):
commentprompt = re.sub("<.*?>","",str(commenttext)) commenttext = re.findall(r"<div class=\"comment-content\".*?</div>",str(comment))
ssquickycomments.append(str(commentprompt)[2:-2]) commentprompt = re.sub("<.*?>","",str(commenttext))
ssquickycprompt = "; ".join(ssquickycomments) ssquickycomments.append(str(commentprompt)[2:-2])
print("sweet and short comment quicky (up to 99 words): \033[1m" + ssquickycprompt.lower() + "\033[0m (" + ssquickyprompt + ")\n") ssquickycprompt = "; ".join(ssquickycomments)
thefile.write("- [[" + ssquickyprompt + "][sweet and short comment quicky]] (up to 99 words): *" + ssquickycprompt.lower() + "*\n") print("sweet and short comment quicky (up to 99 words): \033[1m" + ssquickycprompt.lower() + "\033[0m (" + ssquickyprompt + ")\n")
thefile.write("- [[" + ssquickyprompt + "][sweet and short comment quicky]] (up to 99 words): *" + ssquickycprompt.lower() + "*\n")
except:
pass
ssmonthly = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+10+out+of+20?mode=and&style=light&tag=%21new+challenge,challenge:+10+out+of+20" try:
ssmonthlypage = requests.get(ssmonthly) ssmonthly = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+10+out+of+20?mode=and&style=light&tag=%21new+challenge,challenge:+10+out+of+20"
ssmonthlysoup = BeautifulSoup(ssmonthlypage.content, "html.parser") ssmonthlypage = requests.get(ssmonthly)
ssmonthlyprompts = ssmonthlysoup.find_all("h3") ssmonthlysoup = BeautifulSoup(ssmonthlypage.content, "html.parser")
ssmonthlysubsoup = BeautifulSoup(str(ssmonthlyprompts[0]), "html.parser") ssmonthlyprompts = ssmonthlysoup.find_all("h3")
ssmonthlyurl = ssmonthlysubsoup.find("a") ssmonthlysubsoup = BeautifulSoup(str(ssmonthlyprompts[0]), "html.parser")
ssmonthlyprompt = (ssmonthlyurl["href"]) ssmonthlyurl = ssmonthlysubsoup.find("a")
ssmonthlypromptnew = (ssmonthlyurl["href"] + "?style=light") ssmonthlyprompt = (ssmonthlyurl["href"])
ssmonthlypromptpage = requests.get(ssmonthlypromptnew) ssmonthlypromptnew = (ssmonthlyurl["href"] + "?style=light")
ssmonthlypromptsoup = BeautifulSoup(ssmonthlypromptpage.content, "html.parser") ssmonthlypromptpage = requests.get(ssmonthlypromptnew)
ssmonthlyprompttext = ssmonthlypromptsoup.find(class_="entry-content") ssmonthlypromptsoup = BeautifulSoup(ssmonthlypromptpage.content, "html.parser")
ssmonthlypromptmedian = re.findall(r"<a name=\"cutid1\">.*", str(ssmonthlyprompttext)) ssmonthlyprompttext = ssmonthlypromptsoup.find(class_="entry-content")
ssmonthlypromptstripone = re.sub("<.*?>","",str(ssmonthlypromptmedian)) ssmonthlypromptmedian = re.findall(r"<a name=\"cutid1\">.*", str(ssmonthlyprompttext))
ssmonthlypromptstriptwo = re.sub("([a-z])- ","\\1; ",str(ssmonthlypromptstripone)) ssmonthlypromptstripone = re.sub("<.*?>","",str(ssmonthlypromptmedian))
ssmonthlypromptstripthree = re.sub("- ","",str(ssmonthlypromptstriptwo)) ssmonthlypromptstriptwo = re.sub("([a-z])- ","\\1; ",str(ssmonthlypromptstripone))
ssmonthlypromptfinal = str(ssmonthlypromptstripthree)[2:-2] ssmonthlypromptstripthree = re.sub("- ","",str(ssmonthlypromptstriptwo))
print("sweet and short monthly prompts (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): \033[1m" + ssmonthlypromptfinal + "\033[0m (" + ssmonthlyprompt + ")\n") ssmonthlypromptfinal = str(ssmonthlypromptstripthree)[2:-2]
thefile.write("- [[" + ssmonthlyprompt + "][sweet and short monthly prompts]] (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): *" + ssmonthlypromptfinal + "*\n") print("sweet and short monthly prompts (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): \033[1m" + ssmonthlypromptfinal + "\033[0m (" + ssmonthlyprompt + ")\n")
thefile.write("- [[" + ssmonthlyprompt + "][sweet and short monthly prompts]] (up to 300 words [09 prompts], up to 900 words [1019 prompts], any [20 prompts]): *" + ssmonthlypromptfinal + "*\n")
except:
pass
if today > 14: try:
sspicture = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+picture+prompt+fun?mode=and&style=light&tag=%21new+challenge,challenge:+picture+prompt+fun" if today > 14:
sspicturepage = requests.get(sspicture) sspicture = "https://sweetandshort.dreamwidth.org/tag/!new+challenge,challenge:+picture+prompt+fun?mode=and&style=light&tag=%21new+challenge,challenge:+picture+prompt+fun"
sspicturesoup = BeautifulSoup(sspicturepage.content, "html.parser") sspicturepage = requests.get(sspicture)
monthstring = ".*" + month + ".*" sspicturesoup = BeautifulSoup(sspicturepage.content, "html.parser")
sspictureprompts = sspicturesoup.find_all("h3", string=re.compile(monthstring)) monthstring = ".*" + month + ".*"
sspicturesubsoup = BeautifulSoup(str(sspictureprompts[0]), "html.parser") sspictureprompts = sspicturesoup.find_all("h3", string=re.compile(monthstring))
sspictureurl = sspicturesubsoup.find("a") sspicturesubsoup = BeautifulSoup(str(sspictureprompts[0]), "html.parser")
sspictureprompt = (sspictureurl["href"]) sspictureurl = sspicturesubsoup.find("a")
sspicturepromptnew = (sspictureurl["href"] + "?style=light") sspictureprompt = (sspictureurl["href"])
sspicturepromptpage = requests.get(sspicturepromptnew) sspicturepromptnew = (sspictureurl["href"] + "?style=light")
sspicturepromptsoup = BeautifulSoup(sspicturepromptpage.content, "html.parser") sspicturepromptpage = requests.get(sspicturepromptnew)
sspictureprompttext = sspicturepromptsoup.find("h3") sspicturepromptsoup = BeautifulSoup(sspicturepromptpage.content, "html.parser")
print("sweet and short picture prompts (up to 300 words): \033[1m" + sspictureprompttext.text.lower() + "\033[0m (" + sspictureprompt + ")\n") sspictureprompttext = sspicturepromptsoup.find("h3")
thefile.write("- [[" + sspictureprompt + "][sweet and short picture prompts]] (up to 300 words): *" + sspictureprompttext.text.lower() + "*\n") print("sweet and short picture prompts (up to 300 words): \033[1m" + sspictureprompttext.text.lower() + "\033[0m (" + sspictureprompt + ")\n")
thefile.write("- [[" + sspictureprompt + "][sweet and short picture prompts]] (up to 300 words): *" + sspictureprompttext.text.lower() + "*\n")
except:
pass
vocab = "https://vocab-drabbles.dreamwidth.org/?style=light&tag=challenge" try:
vocabpage = requests.get(vocab) vocab = "https://vocab-drabbles.dreamwidth.org/?style=light&tag=challenge"
vocabsoup = BeautifulSoup(vocabpage.content, "html.parser") vocabpage = requests.get(vocab)
vocabprompts = vocabsoup.find_all("h3") vocabsoup = BeautifulSoup(vocabpage.content, "html.parser")
vocabsubsoup = BeautifulSoup(str(vocabprompts[0]), "html.parser") vocabprompts = vocabsoup.find_all("h3")
vocaburl = vocabsubsoup.find("a") vocabsubsoup = BeautifulSoup(str(vocabprompts[0]), "html.parser")
vocabprompt = (vocaburl["href"]) vocaburl = vocabsubsoup.find("a")
vocabpromptnew = (vocaburl["href"] + "?style=light") vocabprompt = (vocaburl["href"])
vocabpromptpage = requests.get(vocabpromptnew) vocabpromptnew = (vocaburl["href"] + "?style=light")
vocabpromptsoup = BeautifulSoup(vocabpromptpage.content, "html.parser") vocabpromptpage = requests.get(vocabpromptnew)
vocabprompttext = vocabpromptsoup.find(class_="entry-content") vocabpromptsoup = BeautifulSoup(vocabpromptpage.content, "html.parser")
vocabtheprompt = vocabprompttext.find("strong") vocabprompttext = vocabpromptsoup.find(class_="entry-content")
print("vocab-drabbles (50500 words): \033[1m" + vocabtheprompt.text.lower() + "\033[0m (" + vocabprompt + ")\n") vocabtheprompt = vocabprompttext.find("strong")
thefile.write("- [[" + vocabprompt + "][vocab-drabbles]] (50500 words): *" + vocabtheprompt.text.lower() + "*\n") print("vocab-drabbles (50500 words): \033[1m" + vocabtheprompt.text.lower() + "\033[0m (" + vocabprompt + ")\n")
thefile.write("- [[" + vocabprompt + "][vocab-drabbles]] (50500 words): *" + vocabtheprompt.text.lower() + "*\n")
except:
pass

Loading…
Cancel
Save