Initial Upload
authordavyd <davyd>
Sat, 7 Feb 2004 06:57:46 +0000 (06:57 +0000)
committerdavyd <davyd>
Sat, 7 Feb 2004 06:57:46 +0000 (06:57 +0000)
XMLWriter.py
feedlist [new file with mode: 0644]
update-planet [new file with mode: 0755]

index 1311fb6..267789c 100644 (file)
@@ -60,7 +60,8 @@ class Planet:
                                if lastItem == None:
                                        break
                                # this checks to see if it's a new day
-                               if time.localtime(lastItem.itemDate) != lastDate:
+                               if time.localtime(lastItem.itemDate)[6] != lastDate:
+                                       lastDate        = time.localtime(lastItem.itemDate)[6]
                                        workingDate     = PlanetDate(lastItem.itemDate)
                                        self.dates.append(workingDate)
                                # append the item to the current date
diff --git a/feedlist b/feedlist
new file mode 100644 (file)
index 0000000..20658ca
--- /dev/null
+++ b/feedlist
@@ -0,0 +1,9 @@
+# feedlist
+#
+# read in by update-planet, to generate planet.html
+# name                 url
+#
+Davyd Madeley  http://www.livejournal.com/users/davyd/data/rss
+Ian McKellar   http://ian.mckellar.org/wp-rss2.php
+Grahame Bowland        http://www.livejournal.com/users/grahame/data/rss
+Adam Wright            http://www.livejournal.com/users/hipikat/data/rss
diff --git a/update-planet b/update-planet
new file mode 100755 (executable)
index 0000000..c6f04dd
--- /dev/null
@@ -0,0 +1,40 @@
+#!/usr/bin/python
+#
+# update-planet
+#
+# Downloads feeds from the URLs specified and generates the XHTML files.
+#
+# (c) 2004, Davyd Madeley <[email protected]>
+#
+
+import sys, urllib2, codecs
+import XMLParse, XMLWriter
+
+# step 1: read in the config and download the feeds
+feeds  = []
+for feed in open('feedlist').readlines():
+       if feed.strip()[0] != '#':
+               storage         = feed.strip().split('\t')
+               name, feed      = storage[0], storage[-1]
+               sys.stdout.write('Downloading feed "%s" from %s... ' % (name, feed))
+               try:
+                       # XXX: might want to consider some good caching code in here
+                       feeds.append((name, feed, urllib2.urlopen(feed).read()))
+                       sys.stdout.write('done.\n')
+               except:
+                       sys.stdout.write('failed.\n')
+
+# step 2: process each feed
+blogs  = []
+for feed in feeds:
+       xml     = XMLParse.XMLParse(feed[2]).parse()
+       for blog in xml:
+               blog.blogTitle  = feed[0]
+       blogs   += xml
+
+# step 3: write feed to disk
+try:
+       codecs.open('planet.html', 'wb', 'utf-8').write(XMLWriter.XMLWriter(XMLWriter.XHTMLWriter, blogs).write())
+except:
+       sys.stderr.write('DEBUG: update-planet: could not write planet.html, aborting\n')
+       raise

UCC git Repository :: git.ucc.asn.au