5 # Downloads feeds from the URLs specified and generates the XHTML files.
11 import XMLParse2 as XMLParse, XMLWriter, CacheHandler
13 # step 1: read in the config and check each object from cache
14 cache = CacheHandler.CacheHandler()
17 for feed in open('feedlist').readlines():
18 if feed.strip()[0] != '#':
19 storage = feed.strip().split('\t')
20 name, feed = storage[0], storage[-1]
22 feeds.append((name, feed, cache.getBlog(name, feed)))
23 # # XXX: might want to consider some good caching code in here
24 # feeds.append((name, feed, urllib2.urlopen(feed).read()))
28 # step 2: process each feed
31 # XMLParse2 takes two paramaters, a URL and a CacheObject
32 blog = XMLParse.XMLParse(feed[1], feed[2]).parse()
33 blog.blogTitle = feed[0]
34 blog.feedURL = feed[1]
36 # write the cache back down to disk
39 # step 3: write feed to disk
41 codecs.open('planet.html', 'wb', 'utf-8').write(XMLWriter.XMLWriter(XMLWriter.XHTMLWriter, blogs).write())
43 sys.stderr.write('DEBUG: update-planet: could not write planet.html, aborting\n')