#
-import sys, urllib2, codecs
-import XMLParse, XMLWriter
+import sys, codecs
+import XMLParse2 as XMLParse, XMLWriter, CacheHandler
-# step 1: read in the config and download the feeds
+# step 1: read in the config and check each object from cache
+cache = CacheHandler.CacheHandler()
feeds = []
+
for feed in open('feedlist').readlines():
if feed.strip()[0] != '#':
storage = feed.strip().split('\t')
name, feed = storage[0], storage[-1]
- sys.stdout.write('Downloading feed "%s" from %s... ' % (name, feed))
try:
- # XXX: might want to consider some good caching code in here
- feeds.append((name, feed, urllib2.urlopen(feed).read()))
- sys.stdout.write('done.\n')
+ feeds.append((name, feed, cache.getBlog(name, feed)))
+# # XXX: might want to consider some good caching code in here
+# feeds.append((name, feed, urllib2.urlopen(feed).read()))
except:
- sys.stdout.write('failed.\n')
+ raise
# step 2: process each feed
blogs = []
for feed in feeds:
- xml = XMLParse.XMLParse(feed[2]).parse()
- for blog in xml:
- blog.blogTitle = feed[0]
- blogs += xml
+ # XMLParse2 takes two paramaters, a URL and a CacheObject
+ blog = XMLParse.XMLParse(feed[1], feed[2]).parse()
+ blog.blogTitle = feed[0]
+ blog.feedURL = feed[1]
+ blogs.append(blog)
+ # write the cache back down to disk
+ cache.storeBlog(blog)
# step 3: write feed to disk
try:
codecs.open('planet.html', 'wb', 'utf-8').write(XMLWriter.XMLWriter(XMLWriter.XHTMLWriter, blogs).write())
except:
sys.stderr.write('DEBUG: update-planet: could not write planet.html, aborting\n')
- raise