4 # Parse arbitrary XML news streams into an object type
5 # understandable by Planet UCC.
6 # Now uses feedparser to parse 9 different types of RSS _and_ Atom
13 sys.path.insert(0, 'extra')
34 def __init__(self, URL, blogObject):
36 self.blogObject = blogObject
39 "Return a single Blog object"
41 if self.blogObject and self.blogObject.cache:
42 sys.stdout.write('Downloading feed %s...' % self.feedURL)
44 data = feedparser.parse(self.feedURL, self.blogObject.cache.etag, self.blogObject.cache.date)
45 sys.stdout.write('done.\n')
47 sys.stdout.write('failed.\n')
49 # check to see what we got returned
50 if data['items'] == [] and data['channel'] == {}:
51 sys.stdout.write('Feed %s is upto date.\n' % self.feedURL)
52 return self.blogObject
54 sys.stdout.write('Downloading feed from %s (no cache)...' % self.feedURL)
56 data = feedparser.parse(self.feedURL)
57 sys.stdout.write('done.\n')
59 sys.stdout.write('failed.\n')
63 cache = CacheHandler.CacheObject()
64 cache.etag = data['etag']
65 cache.date = data['modified']
69 # parse the return of data into a blog
70 if data['channel'].has_key('title'):
71 item.blogTitle = data['channel']['title']
73 item.blogTitle = '(Unknown)'
74 if data['channel'].has_key('link'):
75 item.blogURL = data['channel']['link']
77 item.blogURL = self.feedURL
78 for entry in data['items']:
80 if entry.has_key('title'):
81 blogItem.itemTitle = entry['title']
83 blogItem.itemTitle = '(Untitled)'
84 if entry.has_key('link'):
85 blogItem.itemURL = entry['link']
87 blogItem.itemURL = item.blogURL
88 if entry.has_key('date_parsed'):
89 blogItem.itemDate = time.mktime(entry['date_parsed']) + 28800
92 if entry.has_key('description'):
93 blogItem.contents = entry['description']
95 blogItem.contents = '(entry could not be retrieved)'
96 item.items.append(blogItem)