# THE SOFTWARE.
import sys
-import re
import urlparse
import os
+
class InvalidICS(Exception): pass
class notJoined(Exception): pass
+class IncompleteICS(InvalidICS): pass
-icalEntry = re.compile('^[A-Z\-]+:.*')
def lineJoiner(oldcal):
'''Takes a string containing a calendar and returns an array of its lines'''
+ if not oldcal[0:15] == 'BEGIN:VCALENDAR':
+ raise InvalidICS, "Does not appear to be a valid ICS file"
+
+ if not 'END:VCALENDAR' in oldcal[-15:-1]:
+ raise IncompleteICS, "File appears to be incomplete"
+
if list(oldcal) == oldcal:
oldcal = '\r\n'.join(oldcal)
- oldcal.replace('\r\n ', '')
- return oldcal.split('\r\n')
+ oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','')
+ return oldcal.strip().split('\r\n')
def lineFolder(oldcal, length=75):
return cal
+
+def splitFields(cal):
+ '''Takes a list of lines in a calendar file and returns a list of key, value pairs'''
+
+ ical = [tuple(x.split(':',1)) for x in cal]
+
+ # Check that we got 2 items on every line
+ for line in ical:
+ if not len(line) == 2:
+ raise InvalidICS, "Didn't find a content key on: %s"%(line)
+
+ return ical
+
+
+def joinFields(ical):
+ '''Takes a list of tuples that make up a calendar file and returns a list of lines'''
+
+ return [':'.join(x) for x in ical]
+
+
def getContent(url='',stdin=False):
- pass
+ '''Generic content retriever, DO NOT use this function in a CGI script as
+ it can read from the local disk (which you probably don't want it to).
+ '''
+
+ # Special case, if this is a HTTP url, return the data from it using
+ # the HTTP functions which attempt to play a bit nicer.
+ parsedURL = urlparse.urlparse(url)
+ if 'http' in parsedURL[0]: return getHTTPContent(url)
+
+ if stdin:
+ content = sys.stdin.read()
+ return content
+
+ if not parsedURL[0]:
+ try: content = open(os.path.abspath(url),'r').read()
+ except (IOError, OSError), e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+ return content
+
+ # If we've survived, use python's generic URL opening library to handle it
+ import urllib2
+ try:
+ res = urllib2.urlopen(url)
+ content = res.read()
+ res.close()
+ except (urllib2.URLError, OSError), e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+ return content
+
+
+def getHTTPContent(url='',cache='.httplib2-cache'):
+ '''This function attempts to play nice when retrieving content from HTTP
+ services. It's what you should use in a CGI script. It will (by default)
+ slurp the first 20 bytes of the file and check that we are indeed looking
+ at an ICS file before going for broke.'''
+
+ try:
+ import httplib2
+ except ImportError:
+ import urllib2
+
+ if not url: return ''
+
+ if 'httplib2' in sys.modules:
+ try: h = httplib2.Http('.httplib2-cache')
+ except OSError: h = httplib2.Http()
+ else: h = False
+
+ try:
+ if h: content = h.request(url)[1]
+ return content
+ except ValueError, e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+
+ try:
+ content = urllib2.urlopen(url).read()
+ return content
+ except (urllib2.URLError, OSError), e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+
+ return ''
+
+
+def generateRules():
+ '''Attempts to load a series of rules into a list'''
+ try:
+ import parserrules
+ except ImportError:
+ return []
+
+ rules = [getattr(parserrules, rule) for rule in dir(parserrules) if callable(getattr(parserrules, rule))]
+ return rules
+
+
+def applyRules(ical, rules=[], verbose=False):
+ 'Runs a series of rules on the lines in ical and mangles its output'
+
+ for rule in rules:
+ output = []
+ if rule.__doc__ and verbose:
+ print(rule.__doc__)
+ for line in ical:
+ try:
+ out = rule(line[0],line[1])
+ except TypeError, e:
+ output.append(line)
+ print(e)
+ continue
+
+ # Drop lines that are boolean False
+ if not out and not out == None: continue
+
+ # If the rule did something and is a tuple or a list we'll accept it
+ # otherwise, pay no attention to the man behind the curtain
+ try:
+ if tuple(out) == out or list(out) == out and len(out) == 2:
+ output.append(tuple(out))
+ else:
+ output.append(line)
+ except TypeError, e:
+ output.append(line)
+
+ ical = output
+
+ return ical
+
if __name__ == '__main__':
else:
url = ''
- # Work out what url parsers we're going to need based on what the user
- # gave us on the command line - we do like files after all
- parsedURL = urlparse.urlparse(url)
- http = 'http' in parsedURL[0]
-
- if not parsedURL[0]: u = False
- else: u = True
-
- if not options.stdin and http:
- try:
- import httplib2
- except ImportError:
- import urllib2
-
- # Try and play nice with HTTP servers unless something goes wrong. We don't
- # really care about this cache (A lot of ics files seem to be generated with
- # php which hates caching with a passion).
- h = False
- if 'httplib2' in sys.modules:
- try: h = httplib2.Http('.httplib2-cache')
- except OSError: h = httplib2.Http()
-
- # Load urllib2 if this is not a stdin
- if not options.stdin and (not http or not 'httplib2' in sys.modules):
- import urllib2
-
- try:
- content = u and (h and h.request(url)[1] or urllib2.urlopen(url).read())
- except (ValueError, urllib2.URLError), e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
-
- if not u and not options.stdin:
- try: content = open(os.path.abspath(url),'r').read()
- except (IOError, OSError), e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
-
- if options.stdin:
- content = sys.stdin.read()
+ content = getContent(url, options.stdin)
+ cal = lineJoiner(content)
+ ical = applyRules(splitFields(cal), generateRules())
+ output = lineFolder(joinFields(ical))
+ print output