import sys
import urlparse
import os
+from cgi import parse_header
class InvalidICS(Exception): pass
def lineJoiner(oldcal, encoding='utf-8'):
'''Takes a string containing a calendar and returns an array of its lines'''
+ try:
+ oldcal = unicode(oldcal, encoding)
+ oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','')
+ except UnicodeDecodeError:
+ # This is probably a file with badly folded lines
+ oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','')
+ try: oldcal = unicode(oldcal, encoding)
+ except UnicodeDecodeError:
+ sys.stderr.write('Malformed File')
+ raise
if not oldcal[0:15] == 'BEGIN:VCALENDAR':
raise InvalidICS, "Does not appear to be a valid ICS file"
if list(oldcal) == oldcal:
oldcal = '\r\n'.join(oldcal)
- oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','')
- return [unicode(x, encoding) for x in oldcal.strip().split('\r\n')]
+ return oldcal.split('\r\n')
def lineFolder(oldcal, length=75):
'''Takes a list of lines in a calendar file and returns a list of tuples
as (key, value) pairs'''
- ical = [tuple(x.split(':',1)) for x in cal]
+ ical = []
# Check that we got 2 items on every line
- for line in ical:
- if not len(line) == 2:
- raise InvalidICS, "Didn't find a content key on: %s"%(line)
+ for line in [tuple(x.split(':',1)) for x in cal]:
+ if not len(line) == 2 and line[0]:
+ raise InvalidICS, 'Unusual content line: %s'%line
+ elif line[0]:
+ ical.append(line)
return ical
content = sys.stdin.read()
return (content, encoding)
- if not parsedURL[0]:
- try: content = open(os.path.abspath(url),'r').read()
- except (IOError, OSError), e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
- return (content, encoding)
+ if not parsedURL[0]: url = 'file://' + os.path.abspath(url)
# If we've survived, use python's generic URL opening library to handle it
import urllib2
try:
res = urllib2.urlopen(url)
content = res.read()
+ ct = res.info().getplist()
res.close()
except (urllib2.URLError, OSError), e:
sys.stderr.write('%s\n'%e)
sys.exit(1)
+
+ for param in ct:
+ if 'charset' in param:
+ encoding = param.split('=')[1]
+ break
+
return (content, encoding)
except ImportError:
import urllib2
- if not url: return ''
+ if not url: return ('','')
- encoding = '' # If we don't populate this, the script will assume UTF-8
+ if not 'http' in urlparse.urlparse(url)[0]: return ('','')
if 'httplib2' in sys.modules:
try: h = httplib2.Http('.httplib2-cache')
except OSError: h = httplib2.Http()
else: h = False
- try:
- if h:
+ if h:
+ try:
req = h.request(url)
- content = req[1]
- if 'content-type' in req[0]:
- for ct in req[0]['content-type'].split(';'):
- ct = ct.lower()
- print ct
- if 'charset' in ct:
- encoding = ct.split('=')[1]
- return (content, encoding)
- except ValueError, e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
+ except ValueError, e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
- try:
- content = urllib2.urlopen(url).read()
- return (content, encoding)
- except (urllib2.URLError, OSError), e:
- sys.stderr.write('%s\n'%e)
- sys.exit(1)
+ resp, content = req
+ if 'content-type' in resp:
+ ct = 'Content-Type: %s'%req[0]['content-type']
+ ct = parse_header(ct)
+ if 'charset' in ct[1]: encoding = ct[1]['charset']
+ else: encoding = ''
+ else:
+ ct = ''
+ encoding = ''
- return ('', '')
+ else:
+ try:
+ req = urllib2.urlopen(url)
+ except urllib2.URLError, e:
+ sys.stderr.write('%s\n'%e)
+ sys.exit(1)
+
+ content = req.read()
+ ct = req.info().getplist()
+ for param in ct:
+ if 'charset' in param:
+ encoding = param.split('=')[1]
+ break
+
+ return (content, encoding)
def generateRules():