X-Git-Url: https://git.ucc.asn.au/?a=blobdiff_plain;f=icalparse.py;h=d52ac7e0f476558db2cb4855ef27b8afe6f4a8b2;hb=9047310ae385b64077673170348d386eaec570b8;hp=395283fd427107a521ffa77135e8143aa81477be;hpb=c567bf8333d6cb3ab29aabf5ccba0e712c3b2005;p=frenchie%2Ficalparse.git diff --git a/icalparse.py b/icalparse.py index 395283f..d52ac7e 100755 --- a/icalparse.py +++ b/icalparse.py @@ -23,13 +23,24 @@ import sys import urlparse import os +from cgi import parse_header class InvalidICS(Exception): pass class IncompleteICS(InvalidICS): pass -def lineJoiner(oldcal): +def lineJoiner(oldcal, encoding='utf-8'): '''Takes a string containing a calendar and returns an array of its lines''' + try: + oldcal = unicode(oldcal, encoding) + oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','') + except UnicodeDecodeError: + # This is probably a file with badly folded lines + oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','') + try: oldcal = unicode(oldcal, encoding) + except UnicodeDecodeError: + sys.stderr.write('Malformed File') + raise if not oldcal[0:15] == 'BEGIN:VCALENDAR': raise InvalidICS, "Does not appear to be a valid ICS file" @@ -40,8 +51,7 @@ def lineJoiner(oldcal): if list(oldcal) == oldcal: oldcal = '\r\n'.join(oldcal) - oldcal = oldcal.replace('\r\n ', '').replace('\r\n\t','') - return [unicode(x, 'utf-8') for x in oldcal.strip().split('\r\n')] + return oldcal.split('\r\n') def lineFolder(oldcal, length=75): @@ -64,24 +74,14 @@ def lineFolder(oldcal, length=75): ll = length foldedline = [] while uline: - # This algorithm prevents slicing multi-byte chars in half - - # Convert up to length octets to unicode, dropping any - # half characters ufold = unicode(line[0:ll], 'utf-8', 'ignore') fold = ufold.encode('utf-8') - - # Remove what we've converted from the line uline = uline.replace(ufold,u'',1) line = uline.encode('utf-8') - - # And add the fold to the list foldedline.append(fold) # Subsequent lines are shorter as they include a space ll = length - 1 - - # Finally, add the fold 'marker' to the line cal.append('\r\n '.join(foldedline)) return cal @@ -91,12 +91,14 @@ def splitFields(cal): '''Takes a list of lines in a calendar file and returns a list of tuples as (key, value) pairs''' - ical = [tuple(x.split(':',1)) for x in cal] + ical = [] # Check that we got 2 items on every line - for line in ical: - if not len(line) == 2: - raise InvalidICS, "Didn't find a content key on: %s"%(line) + for line in [tuple(x.split(':',1)) for x in cal]: + if not len(line) == 2 and line[0]: + raise InvalidICS, 'Unusual content line: %s'%line + elif line[0]: + ical.append(line) return ical @@ -113,6 +115,8 @@ def getContent(url='',stdin=False): it can read from the local disk (which you probably don't want it to). ''' + encoding = '' # If we don't populate this, the script will assume UTF-8 + # Special case, if this is a HTTP url, return the data from it using # the HTTP functions which attempt to play a bit nicer. parsedURL = urlparse.urlparse(url) @@ -120,25 +124,27 @@ def getContent(url='',stdin=False): if stdin: content = sys.stdin.read() - return content + return (content, encoding) - if not parsedURL[0]: - try: content = open(os.path.abspath(url),'r').read() - except (IOError, OSError), e: - sys.stderr.write('%s\n'%e) - sys.exit(1) - return content + if not parsedURL[0]: url = 'file://' + os.path.abspath(url) # If we've survived, use python's generic URL opening library to handle it import urllib2 try: res = urllib2.urlopen(url) content = res.read() + ct = res.info().getplist() res.close() except (urllib2.URLError, OSError), e: sys.stderr.write('%s\n'%e) sys.exit(1) - return content + + for param in ct: + if 'charset' in param: + encoding = param.split('=')[1] + break + + return (content, encoding) def getHTTPContent(url='',cache='.httplib2-cache'): @@ -150,28 +156,47 @@ def getHTTPContent(url='',cache='.httplib2-cache'): except ImportError: import urllib2 - if not url: return '' + if not url: return ('','') + + if not 'http' in urlparse.urlparse(url)[0]: return ('','') if 'httplib2' in sys.modules: try: h = httplib2.Http('.httplib2-cache') except OSError: h = httplib2.Http() else: h = False - try: - if h: content = h.request(url)[1] - return content - except ValueError, e: - sys.stderr.write('%s\n'%e) - sys.exit(1) + if h: + try: + req = h.request(url) + except ValueError, e: + sys.stderr.write('%s\n'%e) + sys.exit(1) - try: - content = urllib2.urlopen(url).read() - return content - except (urllib2.URLError, OSError), e: - sys.stderr.write('%s\n'%e) - sys.exit(1) + resp, content = req + if 'content-type' in resp: + ct = 'Content-Type: %s'%req[0]['content-type'] + ct = parse_header(ct) + if 'charset' in ct[1]: encoding = ct[1]['charset'] + else: encoding = '' + else: + ct = '' + encoding = '' + + else: + try: + req = urllib2.urlopen(url) + except urllib2.URLError, e: + sys.stderr.write('%s\n'%e) + sys.exit(1) + + content = req.read() + ct = req.info().getplist() + for param in ct: + if 'charset' in param: + encoding = param.split('=')[1] + break - return '' + return (content, encoding) def generateRules(): @@ -254,6 +279,9 @@ if __name__ == '__main__': default=False, help='Be verbose when rules are being applied') parser.add_option('-o', '--output', dest='outfile', default='', help='Specify output file (defaults to standard output)') + parser.add_option('-m','--encoding', dest='encoding', default='', + help='Specify a different character encoding' + '(ignored if the remote server also specifies one)') (options, args) = parser.parse_args() @@ -265,8 +293,9 @@ if __name__ == '__main__': else: url = '' - content = getContent(url, options.stdin) - cal = lineJoiner(content) + (content, encoding) = getContent(url, options.stdin) + encoding = encoding or options.encoding or 'utf-8' + cal = lineJoiner(content, encoding) ical = applyRules(splitFields(cal), generateRules(), options.verbose) output = lineFolder(joinFields(ical)) writeOutput(output, options.outfile)