5 # Downloads feeds from the URLs specified and generates the XHTML files.
10 import sys, urllib2, codecs
11 import XMLParse, XMLWriter
13 # step 1: read in the config and download the feeds
15 for feed in open('feedlist').readlines():
16 if feed.strip()[0] != '#':
17 storage = feed.strip().split('\t')
18 name, feed = storage[0], storage[-1]
19 sys.stdout.write('Downloading feed "%s" from %s... ' % (name, feed))
21 # XXX: might want to consider some good caching code in here
22 feeds.append((name, feed, urllib2.urlopen(feed).read()))
23 sys.stdout.write('done.\n')
25 sys.stdout.write('failed.\n')
27 # step 2: process each feed
30 xml = XMLParse.XMLParse(feed[2]).parse()
32 blog.blogTitle = feed[0]
35 # step 3: write feed to disk
37 codecs.open('planet.html', 'wb', 'utf-8').write(XMLWriter.XMLWriter(XMLWriter.XHTMLWriter, blogs).write())
39 sys.stderr.write('DEBUG: update-planet: could not write planet.html, aborting\n')