5 # Downloads feeds from the URLs specified and generates the XHTML files.
11 import XMLParse2 as XMLParse, XMLWriter, CacheHandler
13 # step 1: read in the config and check each object from cache
14 cache = CacheHandler.CacheHandler()
17 for feed in open('feedlist').readlines():
18 if feed.strip()[0] != '#':
19 storage = feed.strip().split('\t')
20 name, feed = storage[0], storage[-1]
22 feeds.append((name, feed, cache.getBlog(name, feed)))
24 sys.stderr.write('DEBUG: update-planet: something went wrong retrieving feed\n')
26 # step 2: process each feed
29 # XMLParse2 takes two paramaters, a URL and a CacheObject
30 blog = XMLParse.XMLParse(feed[1], feed[2]).parse()
32 blog.blogTitle = feed[0]
33 blog.feedURL = feed[1]
35 # write the cache back down to disk
40 # step 3: write feed to disk
42 codecs.open('planet.html', 'wb', 'utf-8').write(XMLWriter.XMLWriter(XMLWriter.XHTMLWriter, blogs).write())
44 sys.stderr.write('DEBUG: update-planet: could not write planet.html, aborting\n')