diff --git a/.gitignore b/.gitignore
index af29458..13aea44 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,8 +5,9 @@ __pycache__
# Don't include build and dist data and files
fpcurator.spec
build
-build-all.py
-build.bat
+.venv.bat
+_build.bat
+_finalize.py
dist
# Don't include python venv data
diff --git a/fpcurator.py b/fpcurator.py
index c169a51..162bba8 100644
--- a/fpcurator.py
+++ b/fpcurator.py
@@ -70,7 +70,7 @@
The Download URLs tool downloads and formats a list of files from a list of urls into a collection of organized folders inside the directory specified by "Output Folder". It works in a similar way to cURLsDownloader, but is powered by fpclib. Put all the urls you want to download into the textbox and press "Download".
Here are a list of options:
- - Delete "web.archive.org" - When checked, the downloader will put all urls downloaded from the web archive back into their original domains.
+ - Rename "web.archive.org" - When checked, the downloader will put all urls downloaded from the web archive back into their original domains.
- Keep URLVars - When checked, the downloader will append url vars present on links being downloaded to the end of the html file. This is only necessary when you have two links to the same webpage that generate different html due to the url vars.
- Clear Done URLs - When checked, the downloader will clear any urls in the list when they are downloaded. Errored urls will remain in the list.
- Notify When Done - When checked, the downloader will show a message box when it is done downloading.
@@ -154,8 +154,8 @@
'p1.verylowmetric': 'Has a very low similarity metric (<75%)'
}
-TITLE = "fpcurator v1.2.0"
-ABOUT = "Created by Zach K - v1.2.0"
+TITLE = "fpcurator v1.3.0"
+ABOUT = "Created by Zach K - v1.3.0"
SITES_FOLDER = "sites"
@@ -647,7 +647,7 @@ def __init__(self):
self.replace_https = tk.BooleanVar()
self.replace_https.set(True)
- original = tk.Checkbutton(cframe, bg="white", text='Delete "web.archive.org"', var=self.original)
+ original = tk.Checkbutton(cframe, bg="white", text='Rename "web.archive.org"', var=self.original)
original.pack(side="left")
keep_vars = tk.Checkbutton(cframe, bg="white", text="Keep URLVars", var=self.keep_vars)
keep_vars.pack(side="left", padx=5)
diff --git a/sites/AddictingGames.py b/sites/AddictingGames.py
index bfeffcb..8273293 100644
--- a/sites/AddictingGames.py
+++ b/sites/AddictingGames.py
@@ -25,7 +25,7 @@
}
HTML_EMBED = """
-
+
"""
@@ -36,6 +36,10 @@ class AddictingGames(fpclib.Curation):
def parse(self, soup):
self.title = soup.find("h1").text
+ # Get Logo
+ try: self.logo = fpclib.normalize(soup.find("meta", property="og:image")["content"], keep_prot=True)
+ except: pass
+
# Get Developer and set Publisher
try: self.dev = soup.select_one(".author-span > strong").text
except: pass
@@ -50,7 +54,10 @@ def parse(self, soup):
except: pass
# Get Description
- self.desc = "\n\n".join(i.text for i in soup.select(".instru-blk > h5, .instru-blk > p")).strip()
+ desc = "\n\n".join(i.text for i in soup.select(".instru-blk > h5, .instru-blk > p")).strip()
+ if desc.endswith("Game Reviews"):
+ desc = desc[:-12].strip()
+ self.desc
# Get Launch Command
data = DATA_PARSER.search(soup.select(".node-game > script")[1].string)
@@ -68,8 +75,8 @@ def parse(self, soup):
# This is an HTML5 game
self.platform = "HTML5"
self.app = fpclib.BASILISK
- self.if_url = fpclib.normalize(if_url, keep_vars=True)
- self.if_file = fpclib.normalize(if_url)
+ self.if_url = fpclib.normalize(url, keep_vars=True)
+ self.if_file = fpclib.normalize(url)
self.cmd = fpclib.normalize(self.src)
elif data[1] == "markup":
# Markup games are special
@@ -97,7 +104,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_file[7:], "https:", "http:")
# Create html file for game
- fpclib.write(self.cmd[7:], HTML_EMBED % self.if_file)
+ f = self.cmd[7:]
+ if f[-1] == "/": f += "index.html"
+ fpclib.write(f, HTML_EMBED % self.if_file)
else:
# Flash games are downloaded normally
super().get_files()
diff --git a/sites/FreeArcade.py b/sites/FreeArcade.py
new file mode 100644
index 0000000..dcd2e15
--- /dev/null
+++ b/sites/FreeArcade.py
@@ -0,0 +1,62 @@
+# Free Arcade definition. Only supports Flash and Java.
+from __main__ import fpclib
+from __main__ import re, json
+import traceback
+
+regex = 'freearcade.com'
+
+SWF = re.compile("SWFObject\('(.*?)'")
+APPLET = re.compile("AppletObject\('(.*?)', '(.*?)', '(.*?)', '(.*?)'")
+
+class FreeArcade(fpclib.Curation):
+ def parse(self, soup):
+ try:
+ self.title = soup.select_one("h2 > span").text
+
+ # Get logo
+ self.logo = "http://assets.freearcade.com/thumbnails/" + self.title.replace(" ", "") + "-sm.gif"
+ # Set publisher
+ self.pub = "FreeArcade"
+ # Get description
+ self.desc = soup.select_one(".game > p").text.replace("\t", "") + \
+ "\n\n" + \
+ soup.select_one(".sidebox > p").text.replace("\t", "")
+
+ # Get platform and launch command
+ data = soup.select_one("#gamecontent > script").string
+
+ swf = SWF.search(data)
+ if swf:
+ # Flash game
+ self.platform = "Flash"
+ self.app = fpclib.FLASH
+ self.cmd = swf[1]
+ else:
+ applet = APPLET.search(data)
+ # Java game
+ self.platform = "Java"
+ self.app = fpclib.JAVA
+ self.cmd = fpclib.normalize(self.src)
+ # Save applet
+ self.applet = str('' % (applet[1], applet[2], applet[3], applet[4]))
+ self.code = applet[1]
+ except Exception as e:
+ traceback.print_exc()
+ raise e
+
+
+ def get_files(self):
+ if self.platform == "Java":
+ cmd = self.cmd[7:]
+ # Create applet html
+ fpclib.write(cmd, self.applet)
+ # Download applet code
+ fpclib.download_all((cmd[:cmd.rfind("/")+1] + self.code,))
+ else:
+ super().get_files()
+
+ def save_image(self, url, file_name):
+ # Surround save image with a try catch loop as some logos cannot be gotten.
+ try:
+ fpclib.download_image(url, name=file_name)
+ except: pass
\ No newline at end of file
diff --git a/sites/Kongregate.py b/sites/Kongregate.py
index 884c187..52d5052 100644
--- a/sites/Kongregate.py
+++ b/sites/Kongregate.py
@@ -97,7 +97,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_file[7:], "https:", "http:")
# Create file to embed swf
- fpclib.write(self.cmd[7:], HTML_EMBED % (self.title, self.size[1], self.size[2], self.if_file))
+ f = self.cmd[7:]
+ if f[-1] == "/": f += "index.html"
+ fpclib.write(f, HTML_EMBED % (self.title, self.size[1], self.size[2], self.if_file))
else:
# Flash games are downloaded normally
super().get_files()
diff --git a/sites/Miniclip.py b/sites/Miniclip.py
index fd85588..e3665c6 100644
--- a/sites/Miniclip.py
+++ b/sites/Miniclip.py
@@ -5,7 +5,7 @@
regex = 'miniclip.com'
HTML_EMBED = """
-
+
"""
@@ -45,7 +45,9 @@ def get_files(self):
# Replace all references to https with http
fpclib.replace(self.if_url[7:], "https:", "http:")
# Create file to embed swf
- fpclib.write(self.cmd[7:], HTML_EMBED % self.if_url))
+ f = self.cmd[7:]
+ if f[-1] == "/": f += "index.html"
+ fpclib.write(f, HTML_EMBED % self.if_url)
def save_image(self, url, file_name):
# Surround save image with a try catch loop as some logos cannot be gotten.