Skip to content

Instantly share code, notes, and snippets.

@bruienne
Created June 20, 2014 20:45
Show Gist options
  • Save bruienne/1e9e00b183907afa7b33 to your computer and use it in GitHub Desktop.
Save bruienne/1e9e00b183907afa7b33 to your computer and use it in GitHub Desktop.
Parse Adobe's channel ID PDF into product plists for aamporter
#!/usr/bin/python
################################################################################
#
# cc_channel_parser.py by Pepijn Bruienne
# Copyright (c) 2014 - The Regents of the University of Michigan
#
# *** Requirements: pdfminer is required to run this tool. ***
# *** Run 'pip install pdfminer' or 'easy_install pdfminer' before use. ***
#
# A tool to parse Adobe's official Channel IDs document into product plist files
# for use with "aamporter" by Tim Sutton. This is useful for creating product
# plists for certain products the user does not have an installer source for
# but wishes to cache and deploy updates for.
#
# This tool takes the path to the PDF and an optional language code (US, GB) to
# allow filtering out localized updates the user doesn't want aamporter to
# download and cache.
#
# The output is a number of .plist files at the same path the script is located,
# named for each CC product listed and with the matching channels per
# product. The "munki_update_for" key is also set to that of the product name
# from the PDF, it is up to the user to change the key as needed.
#
# Includes code from layout_scanner.py by Denis Papathanasiou:
# https://github.com/dpapathanasiou/pdfminer-layout-scanner
################################################################################
############# Start code included from layout_scanner.py #############
import re
import plistlib
import sys
import os
from binascii import b2a_hex
###
### pdf-miner requirements
###
try:
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfdocument import PDFDocument, PDFNoOutlines
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBox, LTTextLine, LTFigure, LTImage, LTChar
except Exception, e:
print('The "pdfminer" module is not installed, please install it first.')
print e
sys.exit(-1)
def with_pdf (pdf_doc, fn, pdf_pwd, *args):
"""Open the pdf document, and apply the function, returning the results"""
result = None
try:
# open the pdf file
fp = open(pdf_doc, 'rb')
# create a parser object associated with the file object
parser = PDFParser(fp)
# create a PDFDocument object that stores the document structure
doc = PDFDocument(parser)
# connect the parser and document objects
parser.set_document(doc)
# supply the password for initialization
# doc.initialize(pdf_pwd)
if doc.is_extractable:
# apply the function and return the result
result = fn(doc, *args)
# close the pdf file
fp.close()
except IOError:
# the file doesn't exist or similar problem
pass
return result
###
### Extracting Images
###
def write_file (folder, filename, filedata, flags='w'):
"""Write the file data to the folder and filename combination
(flags: 'w' for write text, 'wb' for write binary, use 'a' instead of 'w' for append)"""
result = False
if os.path.isdir(folder):
try:
file_obj = open(os.path.join(folder, filename), flags)
file_obj.write(filedata)
file_obj.close()
result = True
except IOError:
pass
return result
def determine_image_type (stream_first_4_bytes):
"""Find out the image file type based on the magic number comparison of the first 4 (or 2) bytes"""
file_type = None
bytes_as_hex = b2a_hex(stream_first_4_bytes)
if bytes_as_hex.startswith('ffd8'):
file_type = '.jpeg'
elif bytes_as_hex == '89504e47':
file_type = '.png'
elif bytes_as_hex == '47494638':
file_type = '.gif'
elif bytes_as_hex.startswith('424d'):
file_type = '.bmp'
return file_type
def save_image (lt_image, page_number, images_folder):
"""Try to save the image data from this LTImage object, and return the file name, if successful"""
result = None
if lt_image.stream:
file_stream = lt_image.stream.get_rawdata()
if file_stream:
file_ext = determine_image_type(file_stream[0:4])
if file_ext:
file_name = ''.join([str(page_number), '_', lt_image.name, file_ext])
if write_file(images_folder, file_name, file_stream, flags='wb'):
result = file_name
return result
###
### Extracting Text
###
def to_bytestring (s, enc='utf-8'):
"""Convert the given unicode string to a bytestring, using the standard encoding,
unless it's already a bytestring"""
if s:
if isinstance(s, str):
return s
else:
return s.encode(enc)
def update_page_text_hash (h, lt_obj, pct=0.2):
"""Use the bbox x0,x1 values within pct% to produce lists of associated text within the hash"""
x0 = lt_obj.bbox[0]
x1 = lt_obj.bbox[2]
key_found = False
for k, v in h.items():
hash_x0 = k[0]
if x0 >= (hash_x0 * (1.0-pct)) and (hash_x0 * (1.0+pct)) >= x0:
hash_x1 = k[1]
if x1 >= (hash_x1 * (1.0-pct)) and (hash_x1 * (1.0+pct)) >= x1:
# the text inside this LT* object was positioned at the same
# width as a prior series of text, so it belongs together
key_found = True
v.append(to_bytestring(lt_obj.get_text()))
h[k] = v
if not key_found:
# the text, based on width, is a new series,
# so it gets its own series (entry in the hash)
h[(x0,x1)] = [to_bytestring(lt_obj.get_text())]
return h
def parse_lt_objs (lt_objs, page_number, images_folder, text=[]):
"""Iterate through the list of LT* objects and capture the text or image data contained in each"""
text_content = []
page_text = {} # k=(x0, x1) of the bbox, v=list of text strings within that bbox width (physical column)
for lt_obj in lt_objs:
if isinstance(lt_obj, LTTextBox) or isinstance(lt_obj, LTTextLine):
# text, so arrange is logically based on its column width
page_text = update_page_text_hash(page_text, lt_obj)
elif isinstance(lt_obj, LTImage):
pass
elif isinstance(lt_obj, LTFigure):
# LTFigure objects are containers for other LT* objects, so recurse through the children
text_content.append(parse_lt_objs(lt_obj, page_number, images_folder, text_content))
for k, v in sorted([(key,value) for (key,value) in page_text.items()]):
# sort the page_text hash by the keys (x0,x1 values of the bbox),
# which produces a top-down, left-to-right sequence of related columns
text_content.append(''.join(v))
return '\n'.join(text_content)
###
### Processing Pages
###
def _parse_pages (doc, images_folder):
"""With an open PDFDocument object, get the pages and parse each one
[this is a higher-order function to be passed to with_pdf()]"""
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
text_content = []
for i, page in enumerate(PDFPage.create_pages(doc)):
interpreter.process_page(page)
# receive the LTPage object for this page
layout = device.get_result()
# layout is an LTPage object which may contain child objects like LTTextBox, LTFigure, LTImage, etc.
text_content.append(parse_lt_objs(layout, (i+1), images_folder))
return text_content
def get_pages (pdf_doc, pdf_pwd='', images_folder='/tmp'):
"""Process each of the pages in this pdf file and return a list of strings representing the text found in each page"""
return with_pdf(pdf_doc, _parse_pages, pdf_pwd, *tuple([images_folder]))
############# End code included from layout_scanner.py #############
if len(sys.argv[1]) > 0:
adobepdf = sys.argv[1]
else:
print('No path to PDF given, stopping.')
sys.exit(-1)
try:
if len(sys.argv[2]) == 2:
language = sys.argv[2]
else:
print('Incorrect language code provided, stopping.')
sys.exit(-1)
except Exception, e:
language = 'all'
try:
pages=get_pages(adobepdf)
except Exception, e:
print e
sys.exit(-1)
for x in enumerate(pages):
if "Mac-OS" in x[1]:
startmacpage = x[0] + 1
elif "List of products and channel ids for Adobe Creative Suite" in x[1] and x[0] != 0:
endmacpage = x[0]
productlist = {}
try:
for page in pages[startmacpage:endmacpage]:
parsed = page.split(' \n \n')
for i in parsed[1:len(parsed)-1]:
ulist = re.sub('\n', '', i).split(' - ')
if len(ulist) == 2:
lastprod = ulist[0].strip()
# print('Processing %s' % lastprod)
updates = re.sub(',\ ?$', '', ulist[1])
productlist[lastprod] = updates
elif len(ulist) == 1:
updates_cont = re.sub(',\ ?$', '', ulist[0])
productlist[lastprod].join(updates_cont)
except Exception, e:
print('%s is not the expected PDF from Adobe.' % adobepdf)
print('Try again with the PDF from this link: https://forums.adobe.com/docs/DOC-2434')
sys.exit(-1)
for channel, updates in productlist.iteritems():
plist_dict = {'munki_update_for': channel}
updates = updates.split(', ')
channels = []
if 'all' not in language:
haslcode = re.compile('_[A-Z][A-Z]|-[a-z][a-z][A-Z][A-Z]')
lcode = re.compile(language)
for item in updates:
if haslcode.search(item):
print(' Localized item, checking for %s' % language)
if lcode.search(item):
print(' + %s is localized for %s, adding' % (item, language))
channels.append(item)
else:
print(' - %s is not localized for %s, skipping' % (item, language))
else:
channels.append(item)
else:
print('No localization filtering requested for %s, adding all.' % channel)
channels = updates
plist_dict['channels'] = channels
print('Writing plist for %s...' % channel)
plist_out = re.sub('\ ', '_', channel) + '.plist'
plistlib.writePlist(plist_dict, plist_out)
print('\nDone parsing %s - product plists are at the same path as this script.' % adobepdf)
@bruienne
Copy link
Author

Examples:

Build product plists for all products and languages:

$ ./cc_channel_parser.py /path/to/AdobeApplicationManagerEnterpriseEdition_ChannelIds.pdf

Build product plists for all products but only list US English localized files:

$ ./cc_channel_parser.py /path/to/AdobeApplicationManagerEnterpriseEdition_ChannelIds.pdf US

Build product plists for all products but only list Swedish localized files:

$ ./cc_channel_parser.py /path/to/AdobeApplicationManagerEnterpriseEdition_ChannelIds.pdf SE

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment