Skip to content

Commit

Permalink
Update to 1.3.0
Browse files Browse the repository at this point in the history
  • Loading branch information
mathgeniuszach committed Mar 20, 2021
1 parent 39763f1 commit 02e3619
Show file tree
Hide file tree
Showing 6 changed files with 90 additions and 14 deletions.
5 changes: 3 additions & 2 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,9 @@ __pycache__
# Don't include build and dist data and files
fpcurator.spec
build
build-all.py
build.bat
.venv.bat
_build.bat
_finalize.py
dist

# Don't include python venv data
Expand Down
8 changes: 4 additions & 4 deletions fpcurator.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@
<p>The Download URLs tool downloads and formats a list of files from a list of urls into a collection of organized folders inside the directory specified by "Output Folder". It works in a similar way to cURLsDownloader, but is powered by fpclib. Put all the urls you want to download into the textbox and press "Download".<br>
&nbsp;<br>Here are a list of options:
<ul>
<li><b>Delete "web.archive.org"</b> - When checked, the downloader will put all urls downloaded from the web archive back into their original domains.</li>
<li><b>Rename "web.archive.org"</b> - When checked, the downloader will put all urls downloaded from the web archive back into their original domains.</li>
<li><b>Keep URLVars</b> - When checked, the downloader will append url vars present on links being downloaded to the end of the html file. This is only necessary when you have two links to the same webpage that generate different html due to the url vars.</li>
<li><b>Clear Done URLs</b> - When checked, the downloader will clear any urls in the list when they are downloaded. Errored urls will remain in the list.</li>
<li><b>Notify When Done</b> - When checked, the downloader will show a message box when it is done downloading.</li>
Expand Down Expand Up @@ -154,8 +154,8 @@
'p1.verylowmetric': 'Has a very low similarity metric (<75%)'
}

TITLE = "fpcurator v1.2.0"
ABOUT = "Created by Zach K - v1.2.0"
TITLE = "fpcurator v1.3.0"
ABOUT = "Created by Zach K - v1.3.0"

SITES_FOLDER = "sites"

Expand Down Expand Up @@ -647,7 +647,7 @@ def __init__(self):
self.replace_https = tk.BooleanVar()
self.replace_https.set(True)

original = tk.Checkbutton(cframe, bg="white", text='Delete "web.archive.org"', var=self.original)
original = tk.Checkbutton(cframe, bg="white", text='Rename "web.archive.org"', var=self.original)
original.pack(side="left")
keep_vars = tk.Checkbutton(cframe, bg="white", text="Keep URLVars", var=self.keep_vars)
keep_vars.pack(side="left", padx=5)
Expand Down
19 changes: 14 additions & 5 deletions sites/AddictingGames.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
}

HTML_EMBED = """<body>
<iframe width="100%" height="100%" src="%s"></iframe>
<iframe width="100%%" height="100%%" src="%s"></iframe>
</body>
"""

Expand All @@ -36,6 +36,10 @@ class AddictingGames(fpclib.Curation):
def parse(self, soup):
self.title = soup.find("h1").text

# Get Logo
try: self.logo = fpclib.normalize(soup.find("meta", property="og:image")["content"], keep_prot=True)
except: pass

# Get Developer and set Publisher
try: self.dev = soup.select_one(".author-span > strong").text
except: pass
Expand All @@ -50,7 +54,10 @@ def parse(self, soup):
except: pass

# Get Description
self.desc = "\n\n".join(i.text for i in soup.select(".instru-blk > h5, .instru-blk > p")).strip()
desc = "\n\n".join(i.text for i in soup.select(".instru-blk > h5, .instru-blk > p")).strip()
if desc.endswith("Game Reviews"):
desc = desc[:-12].strip()
self.desc

# Get Launch Command
data = DATA_PARSER.search(soup.select(".node-game > script")[1].string)
Expand All @@ -68,8 +75,8 @@ def parse(self, soup):
# This is an HTML5 game
self.platform = "HTML5"
self.app = fpclib.BASILISK
self.if_url = fpclib.normalize(if_url, keep_vars=True)
self.if_file = fpclib.normalize(if_url)
self.if_url = fpclib.normalize(url, keep_vars=True)
self.if_file = fpclib.normalize(url)
self.cmd = fpclib.normalize(self.src)
elif data[1] == "markup":
# Markup games are special
Expand Down Expand Up @@ -97,7 +104,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_file[7:], "https:", "http:")
# Create html file for game
fpclib.write(self.cmd[7:], HTML_EMBED % self.if_file)
f = self.cmd[7:]
if f[-1] == "/": f += "index.html"
fpclib.write(f, HTML_EMBED % self.if_file)
else:
# Flash games are downloaded normally
super().get_files()
Expand Down
62 changes: 62 additions & 0 deletions sites/FreeArcade.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
# Free Arcade definition. Only supports Flash and Java.
from __main__ import fpclib
from __main__ import re, json
import traceback

regex = 'freearcade.com'

SWF = re.compile("SWFObject\('(.*?)'")
APPLET = re.compile("AppletObject\('(.*?)', '(.*?)', '(.*?)', '(.*?)'")

class FreeArcade(fpclib.Curation):
def parse(self, soup):
try:
self.title = soup.select_one("h2 > span").text

# Get logo
self.logo = "http://assets.freearcade.com/thumbnails/" + self.title.replace(" ", "") + "-sm.gif"
# Set publisher
self.pub = "FreeArcade"
# Get description
self.desc = soup.select_one(".game > p").text.replace("\t", "") + \
"\n\n" + \
soup.select_one(".sidebox > p").text.replace("\t", "")

# Get platform and launch command
data = soup.select_one("#gamecontent > script").string

swf = SWF.search(data)
if swf:
# Flash game
self.platform = "Flash"
self.app = fpclib.FLASH
self.cmd = swf[1]
else:
applet = APPLET.search(data)
# Java game
self.platform = "Java"
self.app = fpclib.JAVA
self.cmd = fpclib.normalize(self.src)
# Save applet
self.applet = str('<applet code="%s" name="%s" width="%s" height="%s"></applet>' % (applet[1], applet[2], applet[3], applet[4]))
self.code = applet[1]
except Exception as e:
traceback.print_exc()
raise e


def get_files(self):
if self.platform == "Java":
cmd = self.cmd[7:]
# Create applet html
fpclib.write(cmd, self.applet)
# Download applet code
fpclib.download_all((cmd[:cmd.rfind("/")+1] + self.code,))
else:
super().get_files()

def save_image(self, url, file_name):
# Surround save image with a try catch loop as some logos cannot be gotten.
try:
fpclib.download_image(url, name=file_name)
except: pass
4 changes: 3 additions & 1 deletion sites/Kongregate.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_file[7:], "https:", "http:")
# Create file to embed swf
fpclib.write(self.cmd[7:], HTML_EMBED % (self.title, self.size[1], self.size[2], self.if_file))
f = self.cmd[7:]
if f[-1] == "/": f += "index.html"
fpclib.write(f, HTML_EMBED % (self.title, self.size[1], self.size[2], self.if_file))
else:
# Flash games are downloaded normally
super().get_files()
Expand Down
6 changes: 4 additions & 2 deletions sites/Miniclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
regex = 'miniclip.com'

HTML_EMBED = """<body>
<iframe width="100%" height="100%" src="%s"></iframe>
<iframe width="100%%" height="100%%" src="%s"></iframe>
</body>
"""

Expand Down Expand Up @@ -45,7 +45,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_url[7:], "https:", "http:")
# Create file to embed swf
fpclib.write(self.cmd[7:], HTML_EMBED % self.if_url))
f = self.cmd[7:]
if f[-1] == "/": f += "index.html"
fpclib.write(f, HTML_EMBED % self.if_url)

def save_image(self, url, file_name):
# Surround save image with a try catch loop as some logos cannot be gotten.
Expand Down

0 comments on commit 02e3619

Please sign in to comment.