#!/usr/bin/python
#
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
import sys
import urlparse
import os
-
-
-class InvalidICS(Exception): pass
-class notJoined(Exception): pass
-class IncompleteICS(InvalidICS): pass
-
-
-def lineJoiner(oldcal):
- '''Takes a string containing a calendar and returns an array of its lines'''
-
- if not oldcal[0:15] == 'BEGIN:VCALENDAR':
- raise InvalidICS, "Does not appear to be a valid ICS file"
-
- if not 'END:VCALENDAR' in oldcal[-15:-1]:
- raise IncompleteICS, "File appears to be incomplete"
-
- if list(oldcal) == oldcal:
- oldcal = '\r\n'.join(oldcal)
-
- oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','')
- return oldcal.strip().split('\r\n')
-
-
-def lineFolder(oldcal, length=75):
- '''Folds content lines to a specified length, returns a list'''
-
- if length > 75:
- sys.stderr.write('WARN: lines > 75 octets are not RFC compliant\n')
-
- cal = []
- sl = length - 1
-
- for line in oldcal:
- # Line fits inside length, do nothing
- if len(line.rstrip()) <= length:
- cal.append(line)
- else:
- brokenline = [line[0:length] + '\r\n']
- ll = length
- while ll < len(line.rstrip('\r\n')) + 1:
- brokenline.append(' ' + line[ll:sl+ll].rstrip('\r\n') + '\r\n')
- ll += sl
- cal += brokenline
-
- return cal
+import vobject
+from cgi import parse_header
def getContent(url='',stdin=False):
it can read from the local disk (which you probably don't want it to).
'''
+ encoding = '' # If we don't populate this, the script will assume UTF-8
+
# Special case, if this is a HTTP url, return the data from it using
# the HTTP functions which attempt to play a bit nicer.
parsedURL = urlparse.urlparse(url)
if stdin:
content = sys.stdin.read()
- return content
+ return (content, encoding)
- if not parsedURL[0]:
- try: content = open(os.path.abspath(url),'r').read()
- except (IOError, OSError), e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
- return content
+ if not parsedURL[0]: url = 'file://' + os.path.abspath(url)
# If we've survived, use python's generic URL opening library to handle it
import urllib2
try:
res = urllib2.urlopen(url)
content = res.read()
+ ct = res.info().getplist()
res.close()
- except (urllib2.URLError, ValueError), e:
+ except (urllib2.URLError, OSError), e:
sys.stderr.write('%s\n'%e)
sys.exit(1)
- return content
+
+ for param in ct:
+ if 'charset' in param:
+ encoding = param.split('=')[1]
+ break
+
+ return (content, encoding)
def getHTTPContent(url='',cache='.httplib2-cache'):
'''This function attempts to play nice when retrieving content from HTTP
- services. It's what you should use in a CGI script. It will (by default)
- slurp the first 20 bytes of the file and check that we are indeed looking
- at an ICS file before going for broke.'''
+ services. It's what you should use in a CGI script.'''
try:
import httplib2
except ImportError:
import urllib2
- if not url: return ''
+ if not url: return ('','')
+
+ if not 'http' in urlparse.urlparse(url)[0]: return ('','')
if 'httplib2' in sys.modules:
try: h = httplib2.Http('.httplib2-cache')
except OSError: h = httplib2.Http()
else: h = False
- try:
- if h: content = h.request(url)[1]
- return content
- except ValueError, e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
+ if h:
+ try:
+ req = h.request(url)
+ except ValueError, e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+ resp, content = req
+ if 'content-type' in resp:
+ ct = 'Content-Type: %s'%req[0]['content-type']
+ ct = parse_header(ct)
+ if 'charset' in ct[1]: encoding = ct[1]['charset']
+ else: encoding = ''
+ else:
+ ct = ''
+ encoding = ''
+
+ else:
+ try:
+ req = urllib2.urlopen(url)
+ except urllib2.URLError, e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+
+ content = req.read()
+ ct = req.info().getplist()
+ for param in ct:
+ if 'charset' in param:
+ encoding = param.split('=')[1]
+ break
+
+ return (content, encoding)
+
+
+def generateRules():
+ '''Attempts to load a series of rules into a list'''
try:
- content = urllib2.urlopen(url).read()
- return content
- except urllib2.URLError, e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
+ import parserrules
+ except ImportError:
+ return []
+
+ rules = [getattr(parserrules, rule) for rule in dir(parserrules) if callable(getattr(parserrules, rule))]
+ return rules
+
+
+def applyRules(cal, rules=[], verbose=False):
+ 'Runs a series of rules on the lines in ical and mangles its output'
- return ''
+ for rule in rules:
+ cal = rule(cal)
+
+ return cal
+
+def writeOutput(cal, outfile=''):
+ '''Takes a list of lines and outputs to the specified file'''
+
+ if not cal:
+ sys.stderr.write('Refusing to write out an empty file')
+ sys.exit(0)
+
+ if not outfile:
+ out = sys.stdout
+ else:
+ try:
+ out = open(outfile, 'w')
+ except (IOError, OSError), e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+
+ cal.serialize(out)
+
+ if not out == sys.stdout:
+ out.close()
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser('usage: %prog [options] url')
parser.add_option('-s', '--stdin', action='store_true', dest='stdin',
default=False, help='Take a calendar from standard input')
+ parser.add_option('-v', '--verbose', action='store_true', dest='verbose',
+ default=False, help='Be verbose when rules are being applied')
parser.add_option('-o', '--output', dest='outfile', default='',
help='Specify output file (defaults to standard output)')
+ parser.add_option('-m','--encoding', dest='encoding', default='',
+ help='Specify a different character encoding'
+ '(ignored if the remote server also specifies one)')
(options, args) = parser.parse_args()
else:
url = ''
- content = getContent(url, options.stdin)
- cal = lineJoiner(content)
- print cal
+ (content, encoding) = getContent(url, options.stdin)
+ encoding = encoding or options.encoding or 'utf-8'
+
+ cal = vobject.readOne(unicode(content, encoding))
+ cal = applyRules(cal, generateRules(), options.verbose)
+
+ writeOutput(cal, options.outfile)