tools v5.0

Introduction of alfcrypto library for speed
Reorganisation of archive plugins,apps,other
This commit is contained in:
Apprentice Alf 2012-03-06 18:24:28 +00:00
parent 882edb6c69
commit 07e532f59c
112 changed files with 11472 additions and 5177 deletions

View file

@ -4,28 +4,62 @@ from __future__ import with_statement
from calibre.customize import FileTypePlugin from calibre.customize import FileTypePlugin
from calibre.gui2 import is_ok_to_use_qt from calibre.gui2 import is_ok_to_use_qt
from calibre.utils.config import config_dir
from calibre.constants import iswindows, isosx
# from calibre.ptempfile import PersistentTemporaryDirectory # from calibre.ptempfile import PersistentTemporaryDirectory
from calibre_plugins.k4mobidedrm import kgenpids
from calibre_plugins.k4mobidedrm import topazextract
from calibre_plugins.k4mobidedrm import mobidedrm
import sys import sys
import os import os
import re import re
from zipfile import ZipFile
class K4DeDRM(FileTypePlugin): class K4DeDRM(FileTypePlugin):
name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin name = 'K4PC, K4Mac, Kindle Mobi and Topaz DeDRM' # Name of the plugin
description = 'Removes DRM from Mobipocket, Kindle/Mobi, Kindle/Topaz and Kindle/Print Replica files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.' description = 'Removes DRM from Mobipocket, Kindle/Mobi, Kindle/Topaz and Kindle/Print Replica files. Provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc.'
supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on supported_platforms = ['osx', 'windows', 'linux'] # Platforms this plugin will run on
author = 'DiapDealer, SomeUpdates' # The author of this plugin author = 'DiapDealer, SomeUpdates' # The author of this plugin
version = (0, 3, 8) # The version number of this plugin version = (0, 4, 1) # The version number of this plugin
file_types = set(['prc','mobi','azw','azw1','azw4','tpz']) # The file types that this plugin will be applied to file_types = set(['prc','mobi','azw','azw1','azw4','tpz']) # The file types that this plugin will be applied to
on_import = True # Run this plugin during the import on_import = True # Run this plugin during the import
priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm priority = 210 # run this plugin before mobidedrm, k4pcdedrm, k4dedrm
minimum_calibre_version = (0, 7, 55) minimum_calibre_version = (0, 7, 55)
def initialize(self):
"""
Dynamic modules can't be imported/loaded from a zipfile... so this routine
runs whenever the plugin gets initialized. This will extract the appropriate
library for the target OS and copy it to the 'alfcrypto' subdirectory of
calibre's configuration directory. That 'alfcrypto' directory is then
inserted into the syspath (as the very first entry) in the run function
so the CDLL stuff will work in the alfcrypto.py script.
"""
if iswindows:
names = ['alfcrypto.dll','alfcrypto64.dll']
elif isosx:
names = ['libalfcrypto.dylib']
else:
names = ['libalfcrypto32.so','libalfcrypto64.so']
lib_dict = self.load_resources(names)
self.alfdir = os.path.join(config_dir, 'alfcrypto')
if not os.path.exists(self.alfdir):
os.mkdir(self.alfdir)
for entry, data in lib_dict.items():
file_path = os.path.join(self.alfdir, entry)
with open(file_path,'wb') as f:
f.write(data)
def run(self, path_to_ebook): def run(self, path_to_ebook):
# add the alfcrypto directory to sys.path so alfcrypto.py
# will be able to locate the custom lib(s) for CDLL import.
sys.path.insert(0, self.alfdir)
# Had to move these imports here so the custom libs can be
# extracted to the appropriate places beforehand these routines
# look for them.
from calibre_plugins.k4mobidedrm import kgenpids
from calibre_plugins.k4mobidedrm import topazextract
from calibre_plugins.k4mobidedrm import mobidedrm
plug_ver = '.'.join(str(self.version).strip('()').replace(' ', '').split(',')) plug_ver = '.'.join(str(self.version).strip('()').replace(' ', '').split(','))
k4 = True k4 = True
if sys.platform.startswith('linux'): if sys.platform.startswith('linux'):
@ -45,7 +79,7 @@ class K4DeDRM(FileTypePlugin):
serials.append(customvalue) serials.append(customvalue)
else: else:
print "%s is not a valid Kindle serial number or PID." % str(customvalue) print "%s is not a valid Kindle serial number or PID." % str(customvalue)
# Load any kindle info files (*.info) included Calibre's config directory. # Load any kindle info files (*.info) included Calibre's config directory.
try: try:
# Find Calibre's configuration directory. # Find Calibre's configuration directory.
@ -77,7 +111,7 @@ class K4DeDRM(FileTypePlugin):
title = mb.getBookTitle() title = mb.getBookTitle()
md1, md2 = mb.getPIDMetaInfo() md1, md2 = mb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
try: try:
mb.processBook(pidlst) mb.processBook(pidlst)
@ -94,11 +128,11 @@ class K4DeDRM(FileTypePlugin):
except topazextract.TpzDRMError, e: except topazextract.TpzDRMError, e:
#if you reached here then no luck raise and exception #if you reached here then no luck raise and exception
if is_ok_to_use_qt(): if is_ok_to_use_qt():
from PyQt4.Qt import QMessageBox from PyQt4.Qt import QMessageBox
d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook) d = QMessageBox(QMessageBox.Warning, "K4MobiDeDRM v%s Plugin" % plug_ver, "Error: " + str(e) + "... %s\n" % path_to_ebook)
d.show() d.show()
d.raise_() d.raise_()
d.exec_() d.exec_()
raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e))) raise Exception("K4MobiDeDRM plugin v%s Error: %s" % (plug_ver, str(e)))
print "Success!" print "Success!"
@ -117,3 +151,11 @@ class K4DeDRM(FileTypePlugin):
def customization_help(self, gui=False): def customization_help(self, gui=False):
return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.' return 'Enter 10 character PIDs and/or Kindle serial numbers, use a comma (no spaces) to separate each PID or SerialNumber from the next.'
def load_resources(self, names):
ans = {}
with ZipFile(self.plugin_path, 'r') as zf:
for candidate in zf.namelist():
if candidate in names:
ans[candidate] = zf.read(candidate)
return ans

File diff suppressed because it is too large Load diff

View file

@ -1,250 +1,290 @@
#! /usr/bin/python #! /usr/bin/env python
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import sys import sys, os
import csv import hmac
import os
import getopt
from struct import pack from struct import pack
from struct import unpack import hashlib
class PParser(object): # interface to needed routines libalfcrypto
def __init__(self, gd, flatxml, meta_array): def _load_libalfcrypto():
self.gd = gd import ctypes
self.flatdoc = flatxml.split('\n') from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
self.docSize = len(self.flatdoc) Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
self.temp = []
self.ph = -1
self.pw = -1
startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
for p in startpos:
(name, argres) = self.lineinDoc(p)
self.ph = max(self.ph, int(argres))
startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
for p in startpos:
(name, argres) = self.lineinDoc(p)
self.pw = max(self.pw, int(argres))
if self.ph <= 0:
self.ph = int(meta_array.get('pageHeight', '11000'))
if self.pw <= 0:
self.pw = int(meta_array.get('pageWidth', '8500'))
res = [] pointer_size = ctypes.sizeof(ctypes.c_voidp)
startpos = self.posinDoc('info.glyph.x') name_of_lib = None
for p in startpos: if sys.platform.startswith('darwin'):
argres = self.getDataatPos('info.glyph.x', p) name_of_lib = 'libalfcrypto.dylib'
res.extend(argres) elif sys.platform.startswith('win'):
self.gx = res if pointer_size == 4:
name_of_lib = 'alfcrypto.dll'
res = []
startpos = self.posinDoc('info.glyph.y')
for p in startpos:
argres = self.getDataatPos('info.glyph.y', p)
res.extend(argres)
self.gy = res
res = []
startpos = self.posinDoc('info.glyph.glyphID')
for p in startpos:
argres = self.getDataatPos('info.glyph.glyphID', p)
res.extend(argres)
self.gid = res
# return tag at line pos in document
def lineinDoc(self, pos) :
if (pos >= 0) and (pos < self.docSize) :
item = self.flatdoc[pos]
if item.find('=') >= 0:
(name, argres) = item.split('=',1)
else :
name = item
argres = ''
return name, argres
# find tag in doc if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) :
result = None
if end == -1 :
end = self.docSize
else: else:
end = min(self.docSize, end) name_of_lib = 'alfcrypto64.dll'
foundat = -1
for j in xrange(pos, end):
item = self.flatdoc[j]
if item.find('=') >= 0:
(name, argres) = item.split('=',1)
else :
name = item
argres = ''
if name.endswith(tagpath) :
result = argres
foundat = j
break
return foundat, result
# return list of start positions for the tagpath
def posinDoc(self, tagpath):
startpos = []
pos = 0
res = ""
while res != None :
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
if res != None :
startpos.append(foundpos)
pos = foundpos + 1
return startpos
def getData(self, path):
result = None
cnt = len(self.flatdoc)
for j in xrange(cnt):
item = self.flatdoc[j]
if item.find('=') >= 0:
(name, argt) = item.split('=')
argres = argt.split('|')
else:
name = item
argres = []
if (name.endswith(path)):
result = argres
break
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
return result
def getDataatPos(self, path, pos):
result = None
item = self.flatdoc[pos]
if item.find('=') >= 0:
(name, argt) = item.split('=')
argres = argt.split('|')
else:
name = item
argres = []
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
if (name.endswith(path)):
result = argres
return result
def getDataTemp(self, path):
result = None
cnt = len(self.temp)
for j in xrange(cnt):
item = self.temp[j]
if item.find('=') >= 0:
(name, argt) = item.split('=')
argres = argt.split('|')
else:
name = item
argres = []
if (name.endswith(path)):
result = argres
self.temp.pop(j)
break
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
return result
def getImages(self):
result = []
self.temp = self.flatdoc
while (self.getDataTemp('img') != None):
h = self.getDataTemp('img.h')[0]
w = self.getDataTemp('img.w')[0]
x = self.getDataTemp('img.x')[0]
y = self.getDataTemp('img.y')[0]
src = self.getDataTemp('img.src')[0]
result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
return result
def getGlyphs(self):
result = []
if (self.gid != None) and (len(self.gid) > 0):
glyphs = []
for j in set(self.gid):
glyphs.append(j)
glyphs.sort()
for gid in glyphs:
id='id="gl%d"' % gid
path = self.gd.lookup(id)
if path:
result.append(id + ' ' + path)
return result
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
ml = ''
pp = PParser(gdict, flat_xml, meta_array)
ml += '<?xml version="1.0" standalone="no"?>\n'
if (raw):
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n'
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1)
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
else: else:
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' if pointer_size == 4:
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n' name_of_lib = 'libalfcrypto32.so'
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors'])
ml += '<script><![CDATA[\n'
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n'
ml += 'var dpi=%d;\n' % scaledpi
if (previd) :
ml += 'var prevpage="page%04d.xhtml";\n' % (previd)
if (nextid) :
ml += 'var nextpage="page%04d.xhtml";\n' % (nextid)
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph)
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n'
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n'
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n'
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n'
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n'
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n'
ml += 'window.onload=setsize;\n'
ml += ']]></script>\n'
ml += '</head>\n'
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n'
ml += '<div style="white-space:nowrap;">\n'
if previd == None:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
else: else:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n' name_of_lib = 'libalfcrypto64.so'
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph) libalfcrypto = sys.path[0] + os.sep + name_of_lib
if (pp.gid != None):
ml += '<defs>\n' if not os.path.isfile(libalfcrypto):
gdefs = pp.getGlyphs() raise Exception('libalfcrypto not found')
for j in xrange(0,len(gdefs)):
ml += gdefs[j] libalfcrypto = CDLL(libalfcrypto)
ml += '</defs>\n'
img = pp.getImages() c_char_pp = POINTER(c_char_p)
if (img != None): c_int_p = POINTER(c_int)
for j in xrange(0,len(img)):
ml += img[j]
if (pp.gid != None): def F(restype, name, argtypes):
for j in xrange(0,len(pp.gid)): func = getattr(libalfcrypto, name)
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]) func.restype = restype
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0): func.argtypes = argtypes
xpos = "%d" % (pp.pw // 3) return func
ypos = "%d" % (pp.ph // 3)
ml += '<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n' # aes cbc decryption
if (raw) : #
ml += '</svg>' # struct aes_key_st {
else : # unsigned long rd_key[4 *(AES_MAXNR + 1)];
ml += '</svg></a>\n' # int rounds;
if nextid == None: # };
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' #
else : # typedef struct aes_key_st AES_KEY;
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n' #
ml += '</div>\n' # int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n' #
ml += '</body>\n' #
ml += '</html>\n' # void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
return ml # const unsigned long length, const AES_KEY *key,
# unsigned char *ivec, const int enc);
AES_MAXNR = 14
class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY)
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# Pukall 1 Cipher
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
# unsigned char *dest, unsigned int len, int decryption);
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
# Topaz Encryption
# typedef struct _TpzCtx {
# unsigned int v[2];
# } TpzCtx;
#
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
class TPZ_CTX(Structure):
_fields_ = [('v', c_long * 2)]
TPZ_CTX_p = POINTER(TPZ_CTX)
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
class AES_CBC(object):
def __init__(self):
self._blocksize = 0
self._keyctx = None
self._iv = 0
def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey)
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
raise Exception('AES CBC improper key used')
return
keyctx = self._keyctx = AES_KEY()
self._iv = iv
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0:
raise Exception('Failed to initialize AES CBC key')
def decrypt(self, data):
out = create_string_buffer(len(data))
mutable_iv = create_string_buffer(self._iv, len(self._iv))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
if rv == 0:
raise Exception('AES CBC decryption failed')
return out.raw
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
self.key = key
out = create_string_buffer(len(src))
de = 0
if decryption:
de = 1
rv = PC1(key, len(key), src, out, len(src), de)
return out.raw
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
tpz_ctx = self._ctx = TPZ_CTX()
topazCryptoInit(tpz_ctx, key, len(key))
return tpz_ctx
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
out = create_string_buffer(len(data))
topazCryptoDecrypt(ctx, data, out, len(data))
return out.raw
print "Using Library AlfCrypto DLL/DYLIB/SO"
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_python_alfcrypto():
import aescbc
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
sum1 = 0;
sum2 = 0;
keyXorVal = 0;
if len(key)!=16:
print "Bad key length!"
return None
wkey = []
for i in xrange(8):
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
self._ctx = [ctx1, ctx2]
return [ctx1,ctx2]
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
class AES_CBC(object):
def __init__(self):
self._key = None
self._iv = None
self.aes = None
def set_decrypt_key(self, userkey, iv):
self._key = userkey
self._iv = iv
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
def decrypt(self, data):
iv = self._iv
cleartext = self.aes.decrypt(iv + data)
return cleartext
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_crypto():
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
for loader in cryptolist:
try:
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
break
except (ImportError, Exception):
pass
return AES_CBC, Pukall_Cipher, Topaz_Cipher
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
class KeyIVGen(object):
# this only exists in openssl so we will use pure python implementation instead
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
def pbkdf2(self, passwd, salt, iter, keylen):
def xorstr( a, b ):
if len(a) != len(b):
raise Exception("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T
sha = hashlib.sha1
digest_size = sha().digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( passwd, None, sha )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, iter, i )
return T[0: keylen]

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,726 @@
# standlone set of Mac OSX specific routines needed for KindleBooks
from __future__ import with_statement
import sys
import os
import os.path
import re
import copy
import subprocess
from struct import pack, unpack, unpack_from
class DrmException(Exception):
pass
# interface to needed routines in openssl's libcrypto
def _load_crypto_libcrypto():
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
Structure, c_ulong, create_string_buffer, addressof, string_at, cast
from ctypes.util import find_library
libcrypto = find_library('crypto')
if libcrypto is None:
raise DrmException('libcrypto not found')
libcrypto = CDLL(libcrypto)
# From OpenSSL's crypto aes header
#
# AES_ENCRYPT 1
# AES_DECRYPT 0
# AES_MAXNR 14 (in bytes)
# AES_BLOCK_SIZE 16 (in bytes)
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
# note: the ivec string, and output buffer are both mutable
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
AES_MAXNR = 14
c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int)
class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes):
func = getattr(libcrypto, name)
func.restype = restype
func.argtypes = argtypes
return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# From OpenSSL's Crypto evp/p5_crpt2.c
#
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# int keylen, unsigned char *out);
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
class LibCrypto(object):
def __init__(self):
self._blocksize = 0
self._keyctx = None
self._iv = 0
def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey)
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
raise DrmException('AES improper key used')
return
keyctx = self._keyctx = AES_KEY()
self._iv = iv
self._userkey = userkey
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0:
raise DrmException('Failed to initialize AES key')
def decrypt(self, data):
out = create_string_buffer(len(data))
mutable_iv = create_string_buffer(self._iv, len(self._iv))
keyctx = self._keyctx
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
if rv == 0:
raise DrmException('AES decryption failed')
return out.raw
def keyivgen(self, passwd, salt, iter, keylen):
saltlen = len(salt)
passlen = len(passwd)
out = create_string_buffer(keylen)
rv = PKCS5_PBKDF2_HMAC_SHA1(passwd, passlen, salt, saltlen, iter, keylen, out)
return out.raw
return LibCrypto
def _load_crypto():
LibCrypto = None
try:
LibCrypto = _load_crypto_libcrypto()
except (ImportError, DrmException):
pass
return LibCrypto
LibCrypto = _load_crypto()
#
# Utility Routines
#
# crypto digestroutines
import hashlib
def MD5(message):
ctx = hashlib.md5()
ctx.update(message)
return ctx.digest()
def SHA1(message):
ctx = hashlib.sha1()
ctx.update(message)
return ctx.digest()
def SHA256(message):
ctx = hashlib.sha256()
ctx.update(message)
return ctx.digest()
# Various character maps used to decrypt books. Probably supposed to act as obfuscation
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
# For kinf approach of K4Mac 1.6.X or later
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# For Mac they seem to re-use charMap2 here
charMap5 = charMap2
# new in K4M 1.9.X
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
def encode(data, map):
result = ""
for char in data:
value = ord(char)
Q = (value ^ 0x80) // len(map)
R = value % len(map)
result += map[Q]
result += map[R]
return result
# Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map):
return encode(MD5(data),map)
# Decode the string in data with the characters in map. Returns the decoded bytes
def decode(data,map):
result = ""
for i in range (0,len(data)-1,2):
high = map.find(data[i])
low = map.find(data[i+1])
if (high == -1) or (low == -1) :
break
value = (((high * len(map)) ^ 0x80) & 0xFF) + low
result += pack("B",value)
return result
# For K4M 1.6.X and later
# generate table of prime number less than or equal to int n
def primes(n):
if n==2: return [2]
elif n<2: return []
s=range(3,n+1,2)
mroot = n ** 0.5
half=(n+1)/2-1
i=0
m=3
while m <= mroot:
if s[i]:
j=(m*m-3)/2
s[j]=0
while j<half:
s[j]=0
j+=m
i=i+1
m=2*i+3
return [2]+[x for x in s if x]
# uses a sub process to get the Hard Drive Serial Number using ioreg
# returns with the serial number of drive whose BSD Name is "disk0"
def GetVolumeSerialNumber():
sernum = os.getenv('MYSERIALNUMBER')
if sernum != None:
return sernum
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p.communicate()
reslst = out1.split('\n')
cnt = len(reslst)
bsdname = None
sernum = None
foundIt = False
for j in xrange(cnt):
resline = reslst[j]
pp = resline.find('"Serial Number" = "')
if pp >= 0:
sernum = resline[pp+19:-1]
sernum = sernum.strip()
bb = resline.find('"BSD Name" = "')
if bb >= 0:
bsdname = resline[bb+14:-1]
bsdname = bsdname.strip()
if (bsdname == 'disk0') and (sernum != None):
foundIt = True
break
if not foundIt:
sernum = ''
return sernum
def GetUserHomeAppSupKindleDirParitionName():
home = os.getenv('HOME')
dpath = home + '/Library/Application Support/Kindle'
cmdline = '/sbin/mount'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p.communicate()
reslst = out1.split('\n')
cnt = len(reslst)
disk = ''
foundIt = False
for j in xrange(cnt):
resline = reslst[j]
if resline.startswith('/dev'):
(devpart, mpath) = resline.split(' on ')
dpart = devpart[5:]
pp = mpath.find('(')
if pp >= 0:
mpath = mpath[:pp-1]
if dpath.startswith(mpath):
disk = dpart
return disk
# uses a sub process to get the UUID of the specified disk partition using ioreg
def GetDiskPartitionUUID(diskpart):
uuidnum = os.getenv('MYUUIDNUMBER')
if uuidnum != None:
return uuidnum
cmdline = '/usr/sbin/ioreg -l -S -w 0 -r -c AppleAHCIDiskDriver'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p.communicate()
reslst = out1.split('\n')
cnt = len(reslst)
bsdname = None
uuidnum = None
foundIt = False
nest = 0
uuidnest = -1
partnest = -2
for j in xrange(cnt):
resline = reslst[j]
if resline.find('{') >= 0:
nest += 1
if resline.find('}') >= 0:
nest -= 1
pp = resline.find('"UUID" = "')
if pp >= 0:
uuidnum = resline[pp+10:-1]
uuidnum = uuidnum.strip()
uuidnest = nest
if partnest == uuidnest and uuidnest > 0:
foundIt = True
break
bb = resline.find('"BSD Name" = "')
if bb >= 0:
bsdname = resline[bb+14:-1]
bsdname = bsdname.strip()
if (bsdname == diskpart):
partnest = nest
else :
partnest = -2
if partnest == uuidnest and partnest > 0:
foundIt = True
break
if nest == 0:
partnest = -2
uuidnest = -1
uuidnum = None
bsdname = None
if not foundIt:
uuidnum = ''
return uuidnum
def GetMACAddressMunged():
macnum = os.getenv('MYMACNUM')
if macnum != None:
return macnum
cmdline = '/sbin/ifconfig en0'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p.communicate()
reslst = out1.split('\n')
cnt = len(reslst)
macnum = None
foundIt = False
for j in xrange(cnt):
resline = reslst[j]
pp = resline.find('ether ')
if pp >= 0:
macnum = resline[pp+6:-1]
macnum = macnum.strip()
# print "original mac", macnum
# now munge it up the way Kindle app does
# by xoring it with 0xa5 and swapping elements 3 and 4
maclst = macnum.split(':')
n = len(maclst)
if n != 6:
fountIt = False
break
for i in range(6):
maclst[i] = int('0x' + maclst[i], 0)
mlst = [0x00, 0x00, 0x00, 0x00, 0x00, 0x00]
mlst[5] = maclst[5] ^ 0xa5
mlst[4] = maclst[3] ^ 0xa5
mlst[3] = maclst[4] ^ 0xa5
mlst[2] = maclst[2] ^ 0xa5
mlst[1] = maclst[1] ^ 0xa5
mlst[0] = maclst[0] ^ 0xa5
macnum = "%0.2x%0.2x%0.2x%0.2x%0.2x%0.2x" % (mlst[0], mlst[1], mlst[2], mlst[3], mlst[4], mlst[5])
foundIt = True
break
if not foundIt:
macnum = ''
return macnum
# uses unix env to get username instead of using sysctlbyname
def GetUserName():
username = os.getenv('USER')
return username
def isNewInstall():
home = os.getenv('HOME')
# soccer game fan anyone
dpath = home + '/Library/Application Support/Kindle/storage/.pes2011'
# print dpath, os.path.exists(dpath)
if os.path.exists(dpath):
return True
return False
def GetIDString():
# K4Mac now has an extensive set of ids strings it uses
# in encoding pids and in creating unique passwords
# for use in its own version of CryptUnprotectDataV2
# BUT Amazon has now become nasty enough to detect when its app
# is being run under a debugger and actually changes code paths
# including which one of these strings is chosen, all to try
# to prevent reverse engineering
# Sad really ... they will only hurt their own sales ...
# true book lovers really want to keep their books forever
# and move them to their devices and DRM prevents that so they
# will just buy from someplace else that they can remove
# the DRM from
# Amazon should know by now that true book lover's are not like
# penniless kids that pirate music, we do not pirate books
if isNewInstall():
mungedmac = GetMACAddressMunged()
if len(mungedmac) > 7:
return mungedmac
sernum = GetVolumeSerialNumber()
if len(sernum) > 7:
return sernum
diskpart = GetUserHomeAppSupKindleDirParitionName()
uuidnum = GetDiskPartitionUUID(diskpart)
if len(uuidnum) > 7:
return uuidnum
mungedmac = GetMACAddressMunged()
if len(mungedmac) > 7:
return mungedmac
return '9999999999'
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
class CryptUnprotectData(object):
def __init__(self):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
self.crp = LibCrypto()
iter = 0x3e8
keylen = 0x80
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.6.0
class CryptUnprotectDataV2(object):
def __init__(self):
sp = GetUserName() + ':&%:' + GetIDString()
passwdData = encode(SHA256(sp),charMap5)
# salt generation as per the code
salt = 0x0512981d * 2 * 1 * 1
salt = str(salt) + GetUserName()
salt = encode(salt,charMap5)
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext
# unprotect the new header blob in .kinf2011
# used in Kindle for Mac Version >= 1.9.0
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
crp = LibCrypto()
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData)
return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.9.0
class CryptUnprotectDataV3(object):
def __init__(self, entropy):
sp = GetUserName() + '+@#$%+' + GetIDString()
passwdData = encode(SHA256(sp),charMap2)
salt = entropy
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap2)
return cleartext
# Locate the .kindle-info files
def getKindleInfoFiles(kInfoFiles):
# first search for current .kindle-info files
home = os.getenv('HOME')
cmdline = 'find "' + home + '/Library/Application Support" -name ".kindle-info"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
kinfopath = 'NONE'
found = False
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
# add any .rainier*-kinf files
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
# add any .kinf2011 files
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
if not found:
print('No kindle-info files have been found.')
return kInfoFiles
# determine type of kindle info provided and return a
# database of keynames and values
def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
DB = {}
cnt = 0
infoReader = open(kInfoFile, 'r')
hdr = infoReader.read(1)
data = infoReader.read()
if data.find('[') != -1 :
# older style kindle-info file
cud = CryptUnprotectData()
items = data.split('[')
for item in items:
if item != '':
keyhash, rawdata = item.split(':')
keyname = "unknown"
for name in names:
if encodeHash(name,charMap2) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
encryptedValue = decode(rawdata,charMap2)
cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
if hdr == '/':
# else newer style .kinf file used by K4Mac >= 1.6.0
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
cud = CryptUnprotectDataV2()
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
# "entropy" not used for K4Mac only K4PC
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# the latest .kinf2011 version for K4M 1.9.1
# put back the hdr char, it is needed
data = hdr + data
data = data[:-1]
items = data.split('/')
# the headerblob is the encrypted information needed to build the entropy string
headerblob = items.pop(0)
encryptedValue = decode(headerblob, charMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces in the same way
# this version is different from K4PC it scales the build number by multipying by 735
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
cud = CryptUnprotectDataV3(entropy)
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
keyname = "unknown"
# unlike K4PC the keyhash is not used in generating entropy
# entropy = SHA1(keyhash) + added_entropy
# entropy = added_entropy
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,testMap8) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by testMap8
encdata = "".join(edlst)
contlen = len(encdata)
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using testMap8 to get the CryptProtect Data
encryptedValue = decode(encdata,testMap8)
cleartext = cud.decrypt(encryptedValue)
# print keyname
# print cleartext
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB

View file

@ -1,50 +1,134 @@
#! /usr/bin/python #! /usr/bin/python
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# For use with Topaz Scripts Version 2.6
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
import sys
sys.stdout=Unbuffered(sys.stdout)
import csv import csv
import sys
import os import os
import getopt import getopt
from struct import pack from struct import pack
from struct import unpack from struct import unpack
class TpzDRMError(Exception):
pass
class DocParser(object): # local support routines
def __init__(self, flatxml, fontsize, ph, pw): if 'calibre' in sys.modules:
inCalibre = True
else:
inCalibre = False
if inCalibre :
from calibre_plugins.k4mobidedrm import convert2xml
from calibre_plugins.k4mobidedrm import flatxml2html
from calibre_plugins.k4mobidedrm import flatxml2svg
from calibre_plugins.k4mobidedrm import stylexml2css
else :
import convert2xml
import flatxml2html
import flatxml2svg
import stylexml2css
# global switch
buildXML = False
# Get a 7 bit encoded number from a file
def readEncodedNumber(file):
flag = False
c = file.read(1)
if (len(c) == 0):
return None
data = ord(c)
if data == 0xFF:
flag = True
c = file.read(1)
if (len(c) == 0):
return None
data = ord(c)
if data >= 0x80:
datax = (data & 0x7F)
while data >= 0x80 :
c = file.read(1)
if (len(c) == 0):
return None
data = ord(c)
datax = (datax <<7) + (data & 0x7F)
data = datax
if flag:
data = -data
return data
# Get a length prefixed string from the file
def lengthPrefixString(data):
return encodeNumber(len(data))+data
def readString(file):
stringLength = readEncodedNumber(file)
if (stringLength == None):
return None
sv = file.read(stringLength)
if (len(sv) != stringLength):
return ""
return unpack(str(stringLength)+"s",sv)[0]
def getMetaArray(metaFile):
# parse the meta file
result = {}
fo = file(metaFile,'rb')
size = readEncodedNumber(fo)
for i in xrange(size):
tag = readString(fo)
value = readString(fo)
result[tag] = value
# print tag, value
fo.close()
return result
# dictionary of all text strings by index value
class Dictionary(object):
def __init__(self, dictFile):
self.filename = dictFile
self.size = 0
self.fo = file(dictFile,'rb')
self.stable = []
self.size = readEncodedNumber(self.fo)
for i in xrange(self.size):
self.stable.append(self.escapestr(readString(self.fo)))
self.pos = 0
def escapestr(self, str):
str = str.replace('&','&amp;')
str = str.replace('<','&lt;')
str = str.replace('>','&gt;')
str = str.replace('=','&#61;')
return str
def lookup(self,val):
if ((val >= 0) and (val < self.size)) :
self.pos = val
return self.stable[self.pos]
else:
print "Error - %d outside of string table limits" % val
raise TpzDRMError('outside or string table limits')
# sys.exit(-1)
def getSize(self):
return self.size
def getPos(self):
return self.pos
class PageDimParser(object):
def __init__(self, flatxml):
self.flatdoc = flatxml.split('\n') self.flatdoc = flatxml.split('\n')
self.fontsize = int(fontsize)
self.ph = int(ph) * 1.0
self.pw = int(pw) * 1.0
stags = {
'paragraph' : 'p',
'graphic' : '.graphic'
}
attr_val_map = {
'hang' : 'text-indent: ',
'indent' : 'text-indent: ',
'line-space' : 'line-height: ',
'margin-bottom' : 'margin-bottom: ',
'margin-left' : 'margin-left: ',
'margin-right' : 'margin-right: ',
'margin-top' : 'margin-top: ',
'space-after' : 'padding-bottom: ',
}
attr_str_map = {
'align-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
'align-left' : 'text-align: left;',
'align-right' : 'text-align: right;',
'align-justify' : 'text-align: justify;',
'display-inline' : 'display: inline;',
'pos-left' : 'text-align: left;',
'pos-right' : 'text-align: right;',
'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
}
# find tag if within pos to end inclusive # find tag if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -58,198 +142,568 @@ class DocParser(object):
for j in xrange(pos, end): for j in xrange(pos, end):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=')
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
return foundat, result return foundat, result
# return list of start positions for the tagpath
def posinDoc(self, tagpath):
startpos = []
pos = 0
res = ""
while res != None :
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
if res != None :
startpos.append(foundpos)
pos = foundpos + 1
return startpos
# returns a vector of integers for the tagpath
def getData(self, tagpath, pos, end):
argres=[]
(foundat, argt) = self.findinDoc(tagpath, pos, end)
if (argt != None) and (len(argt) > 0) :
argList = argt.split('|')
argres = [ int(strval) for strval in argList]
return argres
def process(self): def process(self):
(pos, sph) = self.findinDoc('page.h',0,-1)
(pos, spw) = self.findinDoc('page.w',0,-1)
if (sph == None): sph = '-1'
if (spw == None): spw = '-1'
return sph, spw
classlst = '' def getPageDim(flatxml):
csspage = '.cl-center { text-align: center; margin-left: auto; margin-right: auto; }\n'
csspage += '.cl-right { text-align: right; }\n'
csspage += '.cl-left { text-align: left; }\n'
csspage += '.cl-justify { text-align: justify; }\n'
# generate a list of each <style> starting point in the stylesheet
styleList= self.posinDoc('book.stylesheet.style')
stylecnt = len(styleList)
styleList.append(-1)
# process each style converting what you can
for j in xrange(stylecnt):
start = styleList[j]
end = styleList[j+1]
(pos, tag) = self.findinDoc('style._tag',start,end)
if tag == None :
(pos, tag) = self.findinDoc('style.type',start,end)
# Is this something we know how to convert to css
if tag in self.stags :
# get the style class
(pos, sclass) = self.findinDoc('style.class',start,end)
if sclass != None:
sclass = sclass.replace(' ','-')
sclass = '.cl-' + sclass.lower()
else :
sclass = ''
# check for any "after class" specifiers
(pos, aftclass) = self.findinDoc('style._after_class',start,end)
if aftclass != None:
aftclass = aftclass.replace(' ','-')
aftclass = '.cl-' + aftclass.lower()
else :
aftclass = ''
cssargs = {}
while True :
(pos1, attr) = self.findinDoc('style.rule.attr', start, end)
(pos2, val) = self.findinDoc('style.rule.value', start, end)
if attr == None : break
if (attr == 'display') or (attr == 'pos') or (attr == 'align'):
# handle text based attributess
attr = attr + '-' + val
if attr in self.attr_str_map :
cssargs[attr] = (self.attr_str_map[attr], '')
else :
# handle value based attributes
if attr in self.attr_val_map :
name = self.attr_val_map[attr]
if attr in ('margin-bottom', 'margin-top', 'space-after') :
scale = self.ph
elif attr in ('margin-right', 'indent', 'margin-left', 'hang') :
scale = self.pw
elif attr == 'line-space':
scale = self.fontsize * 2.0
if not ((attr == 'hang') and (int(val) == 0)) :
pv = float(val)/scale
cssargs[attr] = (self.attr_val_map[attr], pv)
keep = True
start = max(pos1, pos2) + 1
# disable all of the after class tags until I figure out how to handle them
if aftclass != "" : keep = False
if keep :
# make sure line-space does not go below 100% or above 300% since
# it can be wacky in some styles
if 'line-space' in cssargs:
seg = cssargs['line-space'][0]
val = cssargs['line-space'][1]
if val < 1.0: val = 1.0
if val > 3.0: val = 3.0
del cssargs['line-space']
cssargs['line-space'] = (self.attr_val_map['line-space'], val)
# handle modifications for css style hanging indents
if 'hang' in cssargs:
hseg = cssargs['hang'][0]
hval = cssargs['hang'][1]
del cssargs['hang']
cssargs['hang'] = (self.attr_val_map['hang'], -hval)
mval = 0
mseg = 'margin-left: '
mval = hval
if 'margin-left' in cssargs:
mseg = cssargs['margin-left'][0]
mval = cssargs['margin-left'][1]
if mval < 0: mval = 0
mval = hval + mval
cssargs['margin-left'] = (mseg, mval)
if 'indent' in cssargs:
del cssargs['indent']
cssline = sclass + ' { '
for key in iter(cssargs):
mseg = cssargs[key][0]
mval = cssargs[key][1]
if mval == '':
cssline += mseg + ' '
else :
aseg = mseg + '%.1f%%;' % (mval * 100.0)
cssline += aseg + ' '
cssline += '}'
if sclass != '' :
classlst += sclass + '\n'
# handle special case of paragraph class used inside chapter heading
# and non-chapter headings
if sclass != '' :
ctype = sclass[4:7]
if ctype == 'ch1' :
csspage += 'h1' + cssline + '\n'
if ctype == 'ch2' :
csspage += 'h2' + cssline + '\n'
if ctype == 'ch3' :
csspage += 'h3' + cssline + '\n'
if ctype == 'h1-' :
csspage += 'h4' + cssline + '\n'
if ctype == 'h2-' :
csspage += 'h5' + cssline + '\n'
if ctype == 'h3_' :
csspage += 'h6' + cssline + '\n'
if cssline != ' { }':
csspage += self.stags[tag] + cssline + '\n'
return csspage, classlst
def convert2CSS(flatxml, fontsize, ph, pw):
print ' ', 'Using font size:',fontsize
print ' ', 'Using page height:', ph
print ' ', 'Using page width:', pw
# create a document parser # create a document parser
dp = DocParser(flatxml, fontsize, ph, pw) dp = PageDimParser(flatxml)
csspage = dp.process() (ph, pw) = dp.process()
return csspage return ph, pw
class GParser(object):
def __init__(self, flatxml):
self.flatdoc = flatxml.split('\n')
self.dpi = 1440
self.gh = self.getData('info.glyph.h')
self.gw = self.getData('info.glyph.w')
self.guse = self.getData('info.glyph.use')
if self.guse :
self.count = len(self.guse)
else :
self.count = 0
self.gvtx = self.getData('info.glyph.vtx')
self.glen = self.getData('info.glyph.len')
self.gdpi = self.getData('info.glyph.dpi')
self.vx = self.getData('info.vtx.x')
self.vy = self.getData('info.vtx.y')
self.vlen = self.getData('info.len.n')
if self.vlen :
self.glen.append(len(self.vlen))
elif self.glen:
self.glen.append(0)
if self.vx :
self.gvtx.append(len(self.vx))
elif self.gvtx :
self.gvtx.append(0)
def getData(self, path):
result = None
cnt = len(self.flatdoc)
for j in xrange(cnt):
item = self.flatdoc[j]
if item.find('=') >= 0:
(name, argt) = item.split('=')
argres = argt.split('|')
else:
name = item
argres = []
if (name == path):
result = argres
break
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
return result
def getGlyphDim(self, gly):
if self.gdpi[gly] == 0:
return 0, 0
maxh = (self.gh[gly] * self.dpi) / self.gdpi[gly]
maxw = (self.gw[gly] * self.dpi) / self.gdpi[gly]
return maxh, maxw
def getPath(self, gly):
path = ''
if (gly < 0) or (gly >= self.count):
return path
tx = self.vx[self.gvtx[gly]:self.gvtx[gly+1]]
ty = self.vy[self.gvtx[gly]:self.gvtx[gly+1]]
p = 0
for k in xrange(self.glen[gly], self.glen[gly+1]):
if (p == 0):
zx = tx[0:self.vlen[k]+1]
zy = ty[0:self.vlen[k]+1]
else:
zx = tx[self.vlen[k-1]+1:self.vlen[k]+1]
zy = ty[self.vlen[k-1]+1:self.vlen[k]+1]
p += 1
j = 0
while ( j < len(zx) ):
if (j == 0):
# Start Position.
path += 'M %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly])
elif (j <= len(zx)-3):
# Cubic Bezier Curve
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[j+2] * self.dpi / self.gdpi[gly], zy[j+2] * self.dpi / self.gdpi[gly])
j += 2
elif (j == len(zx)-2):
# Cubic Bezier Curve to Start Position
path += 'C %d %d %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[j+1] * self.dpi / self.gdpi[gly], zy[j+1] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
j += 1
elif (j == len(zx)-1):
# Quadratic Bezier Curve to Start Position
path += 'Q %d %d %d %d ' % (zx[j] * self.dpi / self.gdpi[gly], zy[j] * self.dpi / self.gdpi[gly], zx[0] * self.dpi / self.gdpi[gly], zy[0] * self.dpi / self.gdpi[gly])
j += 1
path += 'z'
return path
def getpageIDMap(flatxml):
dp = DocParser(flatxml, 0, 0, 0) # dictionary of all text strings by index value
pageidnumbers = dp.getData('info.original.pid', 0, -1) class GlyphDict(object):
return pageidnumbers def __init__(self):
self.gdict = {}
def lookup(self, id):
# id='id="gl%d"' % val
if id in self.gdict:
return self.gdict[id]
return None
def addGlyph(self, val, path):
id='id="gl%d"' % val
self.gdict[id] = path
def generateBook(bookDir, raw, fixedimage):
# sanity check Topaz file extraction
if not os.path.exists(bookDir) :
print "Can not find directory with unencrypted book"
return 1
dictFile = os.path.join(bookDir,'dict0000.dat')
if not os.path.exists(dictFile) :
print "Can not find dict0000.dat file"
return 1
pageDir = os.path.join(bookDir,'page')
if not os.path.exists(pageDir) :
print "Can not find page directory in unencrypted book"
return 1
imgDir = os.path.join(bookDir,'img')
if not os.path.exists(imgDir) :
print "Can not find image directory in unencrypted book"
return 1
glyphsDir = os.path.join(bookDir,'glyphs')
if not os.path.exists(glyphsDir) :
print "Can not find glyphs directory in unencrypted book"
return 1
metaFile = os.path.join(bookDir,'metadata0000.dat')
if not os.path.exists(metaFile) :
print "Can not find metadata0000.dat in unencrypted book"
return 1
svgDir = os.path.join(bookDir,'svg')
if not os.path.exists(svgDir) :
os.makedirs(svgDir)
if buildXML:
xmlDir = os.path.join(bookDir,'xml')
if not os.path.exists(xmlDir) :
os.makedirs(xmlDir)
otherFile = os.path.join(bookDir,'other0000.dat')
if not os.path.exists(otherFile) :
print "Can not find other0000.dat in unencrypted book"
return 1
print "Updating to color images if available"
spath = os.path.join(bookDir,'color_img')
dpath = os.path.join(bookDir,'img')
filenames = os.listdir(spath)
filenames = sorted(filenames)
for filename in filenames:
imgname = filename.replace('color','img')
sfile = os.path.join(spath,filename)
dfile = os.path.join(dpath,imgname)
imgdata = file(sfile,'rb').read()
file(dfile,'wb').write(imgdata)
print "Creating cover.jpg"
isCover = False
cpath = os.path.join(bookDir,'img')
cpath = os.path.join(cpath,'img0000.jpg')
if os.path.isfile(cpath):
cover = file(cpath, 'rb').read()
cpath = os.path.join(bookDir,'cover.jpg')
file(cpath, 'wb').write(cover)
isCover = True
print 'Processing Dictionary'
dict = Dictionary(dictFile)
print 'Processing Meta Data and creating OPF'
meta_array = getMetaArray(metaFile)
# replace special chars in title and authors like & < >
title = meta_array.get('Title','No Title Provided')
title = title.replace('&','&amp;')
title = title.replace('<','&lt;')
title = title.replace('>','&gt;')
meta_array['Title'] = title
authors = meta_array.get('Authors','No Authors Provided')
authors = authors.replace('&','&amp;')
authors = authors.replace('<','&lt;')
authors = authors.replace('>','&gt;')
meta_array['Authors'] = authors
if buildXML:
xname = os.path.join(xmlDir, 'metadata.xml')
mlst = []
for key in meta_array:
mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
metastr = "".join(mlst)
mlst = None
file(xname, 'wb').write(metastr)
print 'Processing StyleSheet'
# get some scaling info from metadata to use while processing styles
fontsize = '135'
if 'fontSize' in meta_array:
fontsize = meta_array['fontSize']
# also get the size of a normal text page
spage = '1'
if 'firstTextPage' in meta_array:
spage = meta_array['firstTextPage']
pnum = int(spage)
# get page height and width from first text page for use in stylesheet scaling
pname = 'page%04d.dat' % (pnum + 1)
fname = os.path.join(pageDir,pname)
flat_xml = convert2xml.fromData(dict, fname)
(ph, pw) = getPageDim(flat_xml)
if (ph == '-1') or (ph == '0') : ph = '11000'
if (pw == '-1') or (pw == '0') : pw = '8500'
meta_array['pageHeight'] = ph
meta_array['pageWidth'] = pw
if 'fontSize' not in meta_array.keys():
meta_array['fontSize'] = fontsize
# process other.dat for css info and for map of page files to svg images
# this map is needed because some pages actually are made up of multiple
# pageXXXX.xml files
xname = os.path.join(bookDir, 'style.css')
flat_xml = convert2xml.fromData(dict, otherFile)
# extract info.original.pid to get original page information
pageIDMap = {}
pageidnums = stylexml2css.getpageIDMap(flat_xml)
if len(pageidnums) == 0:
filenames = os.listdir(pageDir)
numfiles = len(filenames)
for k in range(numfiles):
pageidnums.append(k)
# create a map from page ids to list of page file nums to process for that page
for i in range(len(pageidnums)):
id = pageidnums[i]
if id in pageIDMap.keys():
pageIDMap[id].append(i)
else:
pageIDMap[id] = [i]
# now get the css info
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
file(xname, 'wb').write(cssstr)
if buildXML:
xname = os.path.join(xmlDir, 'other0000.xml')
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
print 'Processing Glyphs'
gd = GlyphDict()
filenames = os.listdir(glyphsDir)
filenames = sorted(filenames)
glyfname = os.path.join(svgDir,'glyphs.svg')
glyfile = open(glyfname, 'w')
glyfile.write('<?xml version="1.0" standalone="no"?>\n')
glyfile.write('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
glyfile.write('<svg width="512" height="512" viewBox="0 0 511 511" xmlns="http://www.w3.org/2000/svg" version="1.1">\n')
glyfile.write('<title>Glyphs for %s</title>\n' % meta_array['Title'])
glyfile.write('<defs>\n')
counter = 0
for filename in filenames:
# print ' ', filename
print '.',
fname = os.path.join(glyphsDir,filename)
flat_xml = convert2xml.fromData(dict, fname)
if buildXML:
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
gp = GParser(flat_xml)
for i in xrange(0, gp.count):
path = gp.getPath(i)
maxh, maxw = gp.getGlyphDim(i)
fullpath = '<path id="gl%d" d="%s" fill="black" /><!-- width=%d height=%d -->\n' % (counter * 256 + i, path, maxw, maxh)
glyfile.write(fullpath)
gd.addGlyph(counter * 256 + i, fullpath)
counter += 1
glyfile.write('</defs>\n')
glyfile.write('</svg>\n')
glyfile.close()
print " "
# start up the html
# also build up tocentries while processing html
htmlFileName = "book.html"
hlst = []
hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
hlst.append('<head>\n')
hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array:
hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array:
hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
hlst.append('</head>\n<body>\n')
print 'Processing Pages'
# Books are at 1440 DPI. This is rendering at twice that size for
# readability when rendering to the screen.
scaledpi = 1440.0
filenames = os.listdir(pageDir)
filenames = sorted(filenames)
numfiles = len(filenames)
xmllst = []
elst = []
for filename in filenames:
# print ' ', filename
print ".",
fname = os.path.join(pageDir,filename)
flat_xml = convert2xml.fromData(dict, fname)
# keep flat_xml for later svg processing
xmllst.append(flat_xml)
if buildXML:
xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
# first get the html
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
elst.append(tocinfo)
hlst.append(pagehtml)
# finish up the html string and output it
hlst.append('</body>\n</html>\n')
htmlstr = "".join(hlst)
hlst = None
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
print " "
print 'Extracting Table of Contents from Amazon OCR'
# first create a table of contents file for the svg images
tlst = []
tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
tlst.append('<head>\n')
tlst.append('<title>' + meta_array['Title'] + '</title>\n')
tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array:
tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array:
tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
tlst.append('</head>\n')
tlst.append('<body>\n')
tlst.append('<h2>Table of Contents</h2>\n')
start = pageidnums[0]
if (raw):
startname = 'page%04d.svg' % start
else:
startname = 'page%04d.xhtml' % start
tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
# build up a table of contents for the svg xhtml output
tocentries = "".join(elst)
elst = None
toclst = tocentries.split('\n')
toclst.pop()
for entry in toclst:
print entry
title, pagenum = entry.split('|')
id = pageidnums[int(pagenum)]
if (raw):
fname = 'page%04d.svg' % id
else:
fname = 'page%04d.xhtml' % id
tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
tlst.append('</body>\n')
tlst.append('</html>\n')
tochtml = "".join(tlst)
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
# now create index_svg.xhtml that points to all required files
slst = []
slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
slst.append('<head>\n')
slst.append('<title>' + meta_array['Title'] + '</title>\n')
slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array:
slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array:
slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
slst.append('</head>\n')
slst.append('<body>\n')
print "Building svg images of each book page"
slst.append('<h2>List of Pages</h2>\n')
slst.append('<div>\n')
idlst = sorted(pageIDMap.keys())
numids = len(idlst)
cnt = len(idlst)
previd = None
for j in range(cnt):
pageid = idlst[j]
if j < cnt - 1:
nextid = idlst[j+1]
else:
nextid = None
print '.',
pagelst = pageIDMap[pageid]
flst = []
for page in pagelst:
flst.append(xmllst[page])
flat_svg = "".join(flst)
flst=None
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
if (raw) :
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
else :
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
previd = pageid
pfile.write(svgxml)
pfile.close()
counter += 1
slst.append('</div>\n')
slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
slst.append('</body>\n</html>\n')
svgindex = "".join(slst)
slst = None
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
print " "
# build the opf file
opfname = os.path.join(bookDir, 'book.opf')
olst = []
olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
# adding metadata
olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
if 'GUID' in meta_array:
olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
if 'ASIN' in meta_array:
olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
if 'oASIN' in meta_array:
olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
olst.append(' <dc:language>en</dc:language>\n')
olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
if isCover:
olst.append(' <meta name="cover" content="bookcover"/>\n')
olst.append(' </metadata>\n')
olst.append('<manifest>\n')
olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
# adding image files to manifest
filenames = os.listdir(imgDir)
filenames = sorted(filenames)
for filename in filenames:
imgname, imgext = os.path.splitext(filename)
if imgext == '.jpg':
imgext = 'jpeg'
if imgext == '.svg':
imgext = 'svg+xml'
olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
if isCover:
olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
olst.append('</manifest>\n')
# adding spine
olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
if isCover:
olst.append(' <guide>\n')
olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
olst.append(' </guide>\n')
olst.append('</package>\n')
opfstr = "".join(olst)
olst = None
file(opfname, 'wb').write(opfstr)
print 'Processing Complete'
return 0
def usage():
print "genbook.py generates a book from the extract Topaz Files"
print "Usage:"
print " genbook.py [-r] [-h [--fixed-image] <bookDir> "
print " "
print "Options:"
print " -h : help - print this usage message"
print " -r : generate raw svg files (not wrapped in xhtml)"
print " --fixed-image : genearate any Fixed Area as an svg image in the html"
print " "
def main(argv):
bookDir = ''
if len(argv) == 0:
argv = sys.argv
try:
opts, args = getopt.getopt(argv[1:], "rh:",["fixed-image"])
except getopt.GetoptError, err:
print str(err)
usage()
return 1
if len(opts) == 0 and len(args) == 0 :
usage()
return 1
raw = 0
fixedimage = True
for o, a in opts:
if o =="-h":
usage()
return 0
if o =="-r":
raw = 1
if o =="--fixed-image":
fixedimage = True
bookDir = args[0]
rv = generateBook(bookDir, raw, fixedimage)
return rv
if __name__ == '__main__':
sys.exit(main(''))

View file

@ -1,5 +1,24 @@
#!/usr/bin/env python #!/usr/bin/env python
from __future__ import with_statement
# engine to remove drm from Kindle for Mac and Kindle for PC books
# for personal use for archiving and converting your ebooks
# PLEASE DO NOT PIRATE EBOOKS!
# We want all authors and publishers, and eBook stores to live
# long and prosperous lives but at the same time we just want to
# be able to read OUR books on whatever device we want and to keep
# readable for a long, long time
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
# and many many others
__version__ = '4.0'
class Unbuffered: class Unbuffered:
def __init__(self, stream): def __init__(self, stream):
self.stream = stream self.stream = stream
@ -10,460 +29,184 @@ class Unbuffered:
return getattr(self.stream, attr) return getattr(self.stream, attr)
import sys import sys
import os, csv, getopt
import string
import re
import traceback
buildXML = False
class DrmException(Exception):
pass
if 'calibre' in sys.modules: if 'calibre' in sys.modules:
inCalibre = True inCalibre = True
else: else:
inCalibre = False inCalibre = False
import os, csv, getopt
import zlib, zipfile, tempfile, shutil
from struct import pack
from struct import unpack
class TpzDRMError(Exception):
pass
# local support routines
if inCalibre: if inCalibre:
from calibre_plugins.k4mobidedrm import mobidedrm
from calibre_plugins.k4mobidedrm import topazextract
from calibre_plugins.k4mobidedrm import kgenpids from calibre_plugins.k4mobidedrm import kgenpids
from calibre_plugins.k4mobidedrm import genbook
else: else:
import mobidedrm
import topazextract
import kgenpids import kgenpids
import genbook
# recursive zip creation support routine # cleanup bytestring filenames
def zipUpDir(myzip, tdir, localname): # borrowed from calibre from calibre/src/calibre/__init__.py
currentdir = tdir # added in removal of non-printing chars
if localname != "": # and removal of . at start
currentdir = os.path.join(currentdir,localname) # convert spaces to underscores
list = os.listdir(currentdir) def cleanup_name(name):
for file in list: _filename_sanitize = re.compile(r'[\xae\0\\|\?\*<":>\+/]')
afilename = file substitute='_'
localfilePath = os.path.join(localname, afilename) one = ''.join(char for char in name if char in string.printable)
realfilePath = os.path.join(currentdir,file) one = _filename_sanitize.sub(substitute, one)
if os.path.isfile(realfilePath): one = re.sub(r'\s', ' ', one).strip()
myzip.write(realfilePath, localfilePath) one = re.sub(r'^\.+$', '_', one)
elif os.path.isdir(realfilePath): one = one.replace('..', substitute)
zipUpDir(myzip, tdir, localfilePath) # Windows doesn't like path components that end with a period
if one.endswith('.'):
one = one[:-1]+substitute
# Mac and Unix don't like file names that begin with a full stop
if len(one) > 0 and one[0] == '.':
one = substitute+one[1:]
one = one.replace(' ','_')
return one
# def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
# Utility routines global buildXML
#
# Get a 7 bit encoded number from file # handle the obvious cases at the beginning
def bookReadEncodedNumber(fo): if not os.path.isfile(infile):
flag = False print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
data = ord(fo.read(1)) return 1
if data == 0xFF:
flag = True
data = ord(fo.read(1))
if data >= 0x80:
datax = (data & 0x7F)
while data >= 0x80 :
data = ord(fo.read(1))
datax = (datax <<7) + (data & 0x7F)
data = datax
if flag:
data = -data
return data
# Get a length prefixed string from file
def bookReadString(fo):
stringLength = bookReadEncodedNumber(fo)
return unpack(str(stringLength)+"s",fo.read(stringLength))[0]
# mobi = True
# crypto routines magic3 = file(infile,'rb').read(3)
# if magic3 == 'TPZ':
mobi = False
# Context initialisation for the Topaz Crypto bookname = os.path.splitext(os.path.basename(infile))[0]
def topazCryptoInit(key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
return [ctx1,ctx2]
# decrypt data with the context prepared by topazCryptoInit()
def topazCryptoDecrypt(data, ctx):
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
# Decrypt data with the PID if mobi:
def decryptRecord(data,PID): mb = mobidedrm.MobiBook(infile)
ctx = topazCryptoInit(PID) else:
return topazCryptoDecrypt(data, ctx) mb = topazextract.TopazBook(infile)
# Try to decrypt a dkey record (contains the bookPID) title = mb.getBookTitle()
def decryptDkeyRecord(data,PID): print "Processing Book: ", title
record = decryptRecord(data,PID) filenametitle = cleanup_name(title)
fields = unpack("3sB8sB8s3s",record) outfilename = bookname
if fields[0] != "PID" or fields[5] != "pid" : if len(outfilename)<=8 or len(filenametitle)<=8:
raise TpzDRMError("Didn't find PID magic numbers in record") outfilename = outfilename + "_" + filenametitle
elif fields[1] != 8 or fields[3] != 8 : elif outfilename[:8] != filenametitle[:8]:
raise TpzDRMError("Record didn't contain correct length fields") outfilename = outfilename[:8] + "_" + filenametitle
elif fields[2] != PID :
raise TpzDRMError("Record didn't contain PID")
return fields[4]
# Decrypt all dkey records (contain the book PID) # avoid excessively long file names
def decryptDkeyRecords(data,PID): if len(outfilename)>150:
nbKeyRecords = ord(data[0]) outfilename = outfilename[:150]
records = []
data = data[1:]
for i in range (0,nbKeyRecords):
length = ord(data[0])
try:
key = decryptDkeyRecord(data[1:length+1],PID)
records.append(key)
except TpzDRMError:
pass
data = data[1+length:]
if len(records) == 0:
raise TpzDRMError("BookKey Not Found")
return records
# build pid list
md1, md2 = mb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
class TopazBook: try:
def __init__(self, filename): mb.processBook(pidlst)
self.fo = file(filename, 'rb')
self.outdir = tempfile.mkdtemp()
# self.outdir = 'rawdat'
self.bookPayloadOffset = 0
self.bookHeaderRecords = {}
self.bookMetadata = {}
self.bookKey = None
magic = unpack("4s",self.fo.read(4))[0]
if magic != 'TPZ0':
raise TpzDRMError("Parse Error : Invalid Header, not a Topaz file")
self.parseTopazHeaders()
self.parseMetadata()
def parseTopazHeaders(self): except mobidedrm.DrmException, e:
def bookReadHeaderRecordData(): print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
# Read and return the data of one header record at the current book file position return 1
# [[offset,decompressedLength,compressedLength],...] except topazextract.TpzDRMError, e:
nbValues = bookReadEncodedNumber(self.fo) print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
values = [] return 1
for i in range (0,nbValues): except Exception, e:
values.append([bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo),bookReadEncodedNumber(self.fo)]) print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: " + str(e) + "\nDRM Removal Failed.\n"
return values return 1
def parseTopazHeaderRecord():
# Read and parse one header record at the current book file position and return the associated data
# [[offset,decompressedLength,compressedLength],...]
if ord(self.fo.read(1)) != 0x63:
raise TpzDRMError("Parse Error : Invalid Header")
tag = bookReadString(self.fo)
record = bookReadHeaderRecordData()
return [tag,record]
nbRecords = bookReadEncodedNumber(self.fo)
for i in range (0,nbRecords):
result = parseTopazHeaderRecord()
# print result[0], result[1]
self.bookHeaderRecords[result[0]] = result[1]
if ord(self.fo.read(1)) != 0x64 :
raise TpzDRMError("Parse Error : Invalid Header")
self.bookPayloadOffset = self.fo.tell()
def parseMetadata(self): if mobi:
# Parse the metadata record from the book payload and return a list of [key,values] if mb.getPrintReplica():
self.fo.seek(self.bookPayloadOffset + self.bookHeaderRecords["metadata"][0][0]) outfile = os.path.join(outdir, outfilename + '_nodrm' + '.azw4')
tag = bookReadString(self.fo)
if tag != "metadata" :
raise TpzDRMError("Parse Error : Record Names Don't Match")
flags = ord(self.fo.read(1))
nbRecords = ord(self.fo.read(1))
# print nbRecords
for i in range (0,nbRecords) :
keyval = bookReadString(self.fo)
content = bookReadString(self.fo)
# print keyval
# print content
self.bookMetadata[keyval] = content
return self.bookMetadata
def getPIDMetaInfo(self):
keysRecord = self.bookMetadata.get('keys','')
keysRecordRecord = ''
if keysRecord != '':
keylst = keysRecord.split(',')
for keyval in keylst:
keysRecordRecord += self.bookMetadata.get(keyval,'')
return keysRecord, keysRecordRecord
def getBookTitle(self):
title = ''
if 'Title' in self.bookMetadata:
title = self.bookMetadata['Title']
return title
def setBookKey(self, key):
self.bookKey = key
def getBookPayloadRecord(self, name, index):
# Get a record in the book payload, given its name and index.
# decrypted and decompressed if necessary
encrypted = False
compressed = False
try:
recordOffset = self.bookHeaderRecords[name][index][0]
except:
raise TpzDRMError("Parse Error : Invalid Record, record not found")
self.fo.seek(self.bookPayloadOffset + recordOffset)
tag = bookReadString(self.fo)
if tag != name :
raise TpzDRMError("Parse Error : Invalid Record, record name doesn't match")
recordIndex = bookReadEncodedNumber(self.fo)
if recordIndex < 0 :
encrypted = True
recordIndex = -recordIndex -1
if recordIndex != index :
raise TpzDRMError("Parse Error : Invalid Record, index doesn't match")
if (self.bookHeaderRecords[name][index][2] > 0):
compressed = True
record = self.fo.read(self.bookHeaderRecords[name][index][2])
else: else:
record = self.fo.read(self.bookHeaderRecords[name][index][1]) outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
mb.getMobiFile(outfile)
return 0
if encrypted: # topaz:
if self.bookKey: print " Creating NoDRM HTMLZ Archive"
ctx = topazCryptoInit(self.bookKey) zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
record = topazCryptoDecrypt(record,ctx) mb.getHTMLZip(zipname)
else :
raise TpzDRMError("Error: Attempt to decrypt without bookKey")
if compressed: print " Creating SVG ZIP Archive"
record = zlib.decompress(record) zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
mb.getSVGZip(zipname)
return record if buildXML:
print " Creating XML ZIP Archive"
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
mb.getXMLZip(zipname)
def processBook(self, pidlst): # remove internal temporary directory of Topaz pieces
raw = 0 mb.cleanup()
fixedimage=True
try:
keydata = self.getBookPayloadRecord('dkey', 0)
except TpzDRMError, e:
print "no dkey record found, book may not be encrypted"
print "attempting to extrct files without a book key"
self.createBookDirectory()
self.extractFiles()
print "Successfully Extracted Topaz contents"
rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0:
print "\nBook Successfully generated"
return rv
# try each pid to decode the file
bookKey = None
for pid in pidlst:
# use 8 digit pids here
pid = pid[0:8]
print "\nTrying: ", pid
bookKeys = []
data = keydata
try:
bookKeys+=decryptDkeyRecords(data,pid)
except TpzDRMError, e:
pass
else:
bookKey = bookKeys[0]
print "Book Key Found!"
break
if not bookKey: return 0
raise TpzDRMError('Decryption Unsucessful; No valid pid found')
self.setBookKey(bookKey)
self.createBookDirectory()
self.extractFiles()
print "Successfully Extracted Topaz contents"
rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0:
print "\nBook Successfully generated"
return rv
def createBookDirectory(self):
outdir = self.outdir
# create output directory structure
if not os.path.exists(outdir):
os.makedirs(outdir)
destdir = os.path.join(outdir,'img')
if not os.path.exists(destdir):
os.makedirs(destdir)
destdir = os.path.join(outdir,'color_img')
if not os.path.exists(destdir):
os.makedirs(destdir)
destdir = os.path.join(outdir,'page')
if not os.path.exists(destdir):
os.makedirs(destdir)
destdir = os.path.join(outdir,'glyphs')
if not os.path.exists(destdir):
os.makedirs(destdir)
def extractFiles(self):
outdir = self.outdir
for headerRecord in self.bookHeaderRecords:
name = headerRecord
if name != "dkey" :
ext = '.dat'
if name == 'img' : ext = '.jpg'
if name == 'color' : ext = '.jpg'
print "\nProcessing Section: %s " % name
for index in range (0,len(self.bookHeaderRecords[name])) :
fnum = "%04d" % index
fname = name + fnum + ext
destdir = outdir
if name == 'img':
destdir = os.path.join(outdir,'img')
if name == 'color':
destdir = os.path.join(outdir,'color_img')
if name == 'page':
destdir = os.path.join(outdir,'page')
if name == 'glyphs':
destdir = os.path.join(outdir,'glyphs')
outputFile = os.path.join(destdir,fname)
print ".",
record = self.getBookPayloadRecord(name,index)
if record != '':
file(outputFile, 'wb').write(record)
print " "
def getHTMLZip(self, zipname):
htmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
htmlzip.write(os.path.join(self.outdir,'book.html'),'book.html')
htmlzip.write(os.path.join(self.outdir,'book.opf'),'book.opf')
if os.path.isfile(os.path.join(self.outdir,'cover.jpg')):
htmlzip.write(os.path.join(self.outdir,'cover.jpg'),'cover.jpg')
htmlzip.write(os.path.join(self.outdir,'style.css'),'style.css')
zipUpDir(htmlzip, self.outdir, 'img')
htmlzip.close()
def getSVGZip(self, zipname):
svgzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
svgzip.write(os.path.join(self.outdir,'index_svg.xhtml'),'index_svg.xhtml')
zipUpDir(svgzip, self.outdir, 'svg')
zipUpDir(svgzip, self.outdir, 'img')
svgzip.close()
def getXMLZip(self, zipname):
xmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
targetdir = os.path.join(self.outdir,'xml')
zipUpDir(xmlzip, targetdir, '')
zipUpDir(xmlzip, self.outdir, 'img')
xmlzip.close()
def cleanup(self):
if os.path.isdir(self.outdir):
pass
# shutil.rmtree(self.outdir, True)
def usage(progname): def usage(progname):
print "Removes DRM protection from Topaz ebooks and extract the contents" print "Removes DRM protection from K4PC/M, Kindle, Mobi and Topaz ebooks"
print "Usage:" print "Usage:"
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
#
# Main # Main
#
def main(argv=sys.argv): def main(argv=sys.argv):
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
k4 = False k4 = False
pids = []
serials = []
kInfoFiles = [] kInfoFiles = []
serials = []
pids = []
print ('K4MobiDeDrm v%(__version__)s '
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
except getopt.GetoptError, err: except getopt.GetoptError, err:
print str(err) print str(err)
usage(progname) usage(progname)
return 1 sys.exit(2)
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
return 1 sys.exit(2)
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
print "Invalid parameter for -k" raise DrmException("Invalid parameter for -k")
return 1
kInfoFiles.append(a) kInfoFiles.append(a)
if o == "-p": if o == "-p":
if a == None : if a == None :
print "Invalid parameter for -p" raise DrmException("Invalid parameter for -p")
return 1
pids = a.split(',') pids = a.split(',')
if o == "-s": if o == "-s":
if a == None : if a == None :
print "Invalid parameter for -s" raise DrmException("Invalid parameter for -s")
return 1
serials = a.split(',') serials = a.split(',')
k4 = True
# try with built in Kindle Info files
k4 = True
if sys.platform.startswith('linux'):
k4 = False
kInfoFiles = None
infile = args[0] infile = args[0]
outdir = args[1] outdir = args[1]
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
if not os.path.isfile(infile):
print "Input File Does Not Exist"
return 1
bookname = os.path.splitext(os.path.basename(infile))[0]
tb = TopazBook(infile)
title = tb.getBookTitle()
print "Processing Book: ", title
keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
try:
print "Decrypting Book"
tb.processBook(pidlst)
print " Creating HTML ZIP Archive"
zipname = os.path.join(outdir, bookname + '_nodrm' + '.htmlz')
tb.getHTMLZip(zipname)
print " Creating SVG ZIP Archive"
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
tb.getSVGZip(zipname)
print " Creating XML ZIP Archive"
zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
tb.getXMLZip(zipname)
# removing internal temporary directory of pieces
tb.cleanup()
except TpzDRMError, e:
print str(e)
# tb.cleanup()
return 1
except Exception, e:
print str(e)
# tb.cleanup
return 1
return 0
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,444 +1,249 @@
#!/usr/bin/python #! /usr/bin/python
# # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# This is a python script. You need a Python interpreter to run it.
# For example, ActiveState Python, which exists for windows.
#
# Changelog
# 0.01 - Initial version
# 0.02 - Huffdic compressed books were not properly decrypted
# 0.03 - Wasn't checking MOBI header length
# 0.04 - Wasn't sanity checking size of data record
# 0.05 - It seems that the extra data flags take two bytes not four
# 0.06 - And that low bit does mean something after all :-)
# 0.07 - The extra data flags aren't present in MOBI header < 0xE8 in size
# 0.08 - ...and also not in Mobi header version < 6
# 0.09 - ...but they are there with Mobi header version 6, header size 0xE4!
# 0.10 - Outputs unencrypted files as-is, so that when run as a Calibre
# import filter it works when importing unencrypted files.
# Also now handles encrypted files that don't need a specific PID.
# 0.11 - use autoflushed stdout and proper return values
# 0.12 - Fix for problems with metadata import as Calibre plugin, report errors
# 0.13 - Formatting fixes: retabbed file, removed trailing whitespace
# and extra blank lines, converted CR/LF pairs at ends of each line,
# and other cosmetic fixes.
# 0.14 - Working out when the extra data flags are present has been problematic
# Versions 7 through 9 have tried to tweak the conditions, but have been
# only partially successful. Closer examination of lots of sample
# files reveals that a confusion has arisen because trailing data entries
# are not encrypted, but it turns out that the multibyte entries
# in utf8 file are encrypted. (Although neither kind gets compressed.)
# This knowledge leads to a simplification of the test for the
# trailing data byte flags - version 5 and higher AND header size >= 0xE4.
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
# 0.17 - added modifications to support its use as an imported python module
# both inside calibre and also in other places (ie K4DeDRM tools)
# 0.17a- disabled the standalone plugin feature since a plugin can not import
# a plugin
# 0.18 - It seems that multibyte entries aren't encrypted in a v7 file...
# Removed the disabled Calibre plug-in code
# Permit use of 8-digit PIDs
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
# 0.21 - Added support for multiple pids
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
# 0.23 - fixed problem with older files with no EXTH section
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
# 0.28 - slight additional changes to metadata token generation (None -> '')
# 0.29 - It seems that the ideas about when multibyte trailing characters were
# included in the encryption were wrong. They are for DOC compressed
# files, but they are not for HUFF/CDIC compress files!
# 0.30 - Modified interface slightly to work better with new calibre plugin style
# 0.31 - The multibyte encrytion info is true for version 7 files too.
# 0.32 - Added support for "Print Replica" Kindle ebooks
__version__ = '0.32'
import sys import sys
import csv
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout=Unbuffered(sys.stdout)
import os import os
import struct import getopt
import binascii from struct import pack
from struct import unpack
class DrmException(Exception):
pass
# class PParser(object):
# MobiBook Utility Routines def __init__(self, gd, flatxml, meta_array):
# self.gd = gd
self.flatdoc = flatxml.split('\n')
self.docSize = len(self.flatdoc)
self.temp = []
# Implementation of Pukall Cipher 1 self.ph = -1
def PC1(key, src, decryption=True): self.pw = -1
sum1 = 0; startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
sum2 = 0; for p in startpos:
keyXorVal = 0; (name, argres) = self.lineinDoc(p)
if len(key)!=16: self.ph = max(self.ph, int(argres))
print "Bad key length!" startpos = self.posinDoc('page.w') or self.posinDoc('book.w')
return None for p in startpos:
wkey = [] (name, argres) = self.lineinDoc(p)
for i in xrange(8): self.pw = max(self.pw, int(argres))
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
def checksumPid(s): if self.ph <= 0:
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789" self.ph = int(meta_array.get('pageHeight', '11000'))
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF if self.pw <= 0:
crc = crc ^ (crc >> 16) self.pw = int(meta_array.get('pageWidth', '8500'))
res = s
l = len(letters)
for i in (0,1):
b = crc & 0xff
pos = (b // l) ^ (b % l)
res += letters[pos%l]
crc >>= 8
return res
def getSizeOfTrailingDataEntries(ptr, size, flags): res = []
def getSizeOfTrailingDataEntry(ptr, size): startpos = self.posinDoc('info.glyph.x')
bitpos, result = 0, 0 for p in startpos:
if size <= 0: argres = self.getDataatPos('info.glyph.x', p)
return result res.extend(argres)
while True: self.gx = res
v = ord(ptr[size-1])
result |= (v & 0x7F) << bitpos res = []
bitpos += 7 startpos = self.posinDoc('info.glyph.y')
size -= 1 for p in startpos:
if (v & 0x80) != 0 or (bitpos >= 28) or (size == 0): argres = self.getDataatPos('info.glyph.y', p)
return result res.extend(argres)
num = 0 self.gy = res
testflags = flags >> 1
while testflags: res = []
if testflags & 1: startpos = self.posinDoc('info.glyph.glyphID')
num += getSizeOfTrailingDataEntry(ptr, size - num) for p in startpos:
testflags >>= 1 argres = self.getDataatPos('info.glyph.glyphID', p)
# Check the low bit to see if there's multibyte data present. res.extend(argres)
# if multibyte data is included in the encryped data, we'll self.gid = res
# have already cleared this flag.
if flags & 1:
num += (ord(ptr[size - num - 1]) & 0x3) + 1
return num
# return tag at line pos in document
def lineinDoc(self, pos) :
if (pos >= 0) and (pos < self.docSize) :
item = self.flatdoc[pos]
if item.find('=') >= 0:
(name, argres) = item.split('=',1)
else :
name = item
argres = ''
return name, argres
class MobiBook: # find tag in doc if within pos to end inclusive
def loadSection(self, section): def findinDoc(self, tagpath, pos, end) :
if (section + 1 == self.num_sections): result = None
endoff = len(self.data_file) if end == -1 :
end = self.docSize
else: else:
endoff = self.sections[section + 1][0] end = min(self.docSize, end)
off = self.sections[section][0] foundat = -1
return self.data_file[off:endoff] for j in xrange(pos, end):
item = self.flatdoc[j]
def __init__(self, infile): if item.find('=') >= 0:
print ('MobiDeDrm v%(__version__)s. ' (name, argres) = item.split('=',1)
'Copyright 2008-2011 The Dark Reverser et al.' % globals()) else :
name = item
# initial sanity check on file argres = ''
self.data_file = file(infile, 'rb').read() if name.endswith(tagpath) :
self.mobi_data = '' result = argres
self.header = self.data_file[0:78] foundat = j
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
raise DrmException("invalid file format")
self.magic = self.header[0x3C:0x3C+8]
self.crypto_type = -1
# build up section offset and flag info
self.num_sections, = struct.unpack('>H', self.header[76:78])
self.sections = []
for i in xrange(self.num_sections):
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data_file[78+i*8:78+i*8+8])
flags, val = a1, a2<<16|a3<<8|a4
self.sections.append( (offset, flags, val) )
# parse information from section 0
self.sect = self.loadSection(0)
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
if self.magic == 'TEXtREAd':
print "Book has format: ", self.magic
self.extra_data_flags = 0
self.mobi_length = 0
self.mobi_version = -1
self.meta_array = {}
return
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
self.extra_data_flags = 0
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
print "Extra Data Flags = %d" % self.extra_data_flags
if (self.compression != 17480):
# multibyte utf8 data is included in the encryption for PalmDoc compression
# so clear that byte so that we leave it to be decrypted.
self.extra_data_flags &= 0xFFFE
# if exth region exists parse it for metadata array
self.meta_array = {}
try:
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
exth = 'NONE'
if exth_flag & 0x40:
exth = self.sect[16 + self.mobi_length:]
if (len(exth) >= 4) and (exth[:4] == 'EXTH'):
nitems, = struct.unpack('>I', exth[8:12])
pos = 12
for i in xrange(nitems):
type, size = struct.unpack('>II', exth[pos: pos + 8])
content = exth[pos + 8: pos + size]
self.meta_array[type] = content
# reset the text to speech flag and clipping limit, if present
if type == 401 and size == 9:
# set clipping limit to 100%
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
elif type == 404 and size == 9:
# make sure text to speech is enabled
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
# print type, size, content, content.encode('hex')
pos += size
except:
self.meta_array = {}
pass
self.print_replica = False
def getBookTitle(self):
codec_map = {
1252 : 'windows-1252',
65001 : 'utf-8',
}
title = ''
if 503 in self.meta_array:
title = self.meta_array[503]
else :
toff, tlen = struct.unpack('>II', self.sect[0x54:0x5c])
tend = toff + tlen
title = self.sect[toff:tend]
if title == '':
title = self.header[:32]
title = title.split("\0")[0]
codec = 'windows-1252'
if self.mobi_codepage in codec_map.keys():
codec = codec_map[self.mobi_codepage]
return unicode(title, codec).encode('utf-8')
def getPIDMetaInfo(self):
rec209 = ''
token = ''
if 209 in self.meta_array:
rec209 = self.meta_array[209]
data = rec209
# The 209 data comes in five byte groups. Interpret the last four bytes
# of each group as a big endian unsigned integer to get a key value
# if that key exists in the meta_array, append its contents to the token
for i in xrange(0,len(data),5):
val, = struct.unpack('>I',data[i+1:i+5])
sval = self.meta_array.get(val,'')
token += sval
return rec209, token
def patch(self, off, new):
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
def patchSection(self, section, new, in_off = 0):
if (section + 1 == self.num_sections):
endoff = len(self.data_file)
else:
endoff = self.sections[section + 1][0]
off = self.sections[section][0]
assert off + in_off + len(new) <= endoff
self.patch(off + in_off, new)
def parseDRM(self, data, count, pidlist):
found_key = None
keyvec1 = "\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96"
for pid in pidlist:
bigpid = pid.ljust(16,'\0')
temp_key = PC1(keyvec1, bigpid, False)
temp_key_sum = sum(map(ord,temp_key)) & 0xff
found_key = None
for i in xrange(count):
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
if cksum == temp_key_sum:
cookie = PC1(temp_key, cookie)
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
if verification == ver and (flags & 0x1F) == 1:
found_key = finalkey
break
if found_key != None:
break break
if not found_key: return foundat, result
# Then try the default encoding that doesn't require a PID
pid = "00000000"
temp_key = keyvec1
temp_key_sum = sum(map(ord,temp_key)) & 0xff
for i in xrange(count):
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
if cksum == temp_key_sum:
cookie = PC1(temp_key, cookie)
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
if verification == ver:
found_key = finalkey
break
return [found_key,pid]
def getMobiFile(self, outpath): # return list of start positions for the tagpath
file(outpath,'wb').write(self.mobi_data) def posinDoc(self, tagpath):
startpos = []
def getPrintReplica(self): pos = 0
return self.print_replica res = ""
while res != None :
(foundpos, res) = self.findinDoc(tagpath, pos, -1)
if res != None :
startpos.append(foundpos)
pos = foundpos + 1
return startpos
def processBook(self, pidlist): def getData(self, path):
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2]) result = None
print 'Crypto Type is: ', crypto_type cnt = len(self.flatdoc)
self.crypto_type = crypto_type for j in xrange(cnt):
if crypto_type == 0: item = self.flatdoc[j]
print "This book is not encrypted." if item.find('=') >= 0:
# we must still check for Print Replica (name, argt) = item.split('=')
self.print_replica = (self.loadSection(1)[0:4] == '%MOP') argres = argt.split('|')
self.mobi_data = self.data_file
return
if crypto_type != 2 and crypto_type != 1:
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
if 406 in self.meta_array:
data406 = self.meta_array[406]
val406, = struct.unpack('>Q',data406)
if val406 != 0:
raise DrmException("Cannot decode library or rented ebooks.")
goodpids = []
for pid in pidlist:
if len(pid)==10:
if checksumPid(pid[0:-2]) != pid:
print "Warning: PID " + pid + " has incorrect checksum, should have been "+checksumPid(pid[0:-2])
goodpids.append(pid[0:-2])
elif len(pid)==8:
goodpids.append(pid)
if self.crypto_type == 1:
t1_keyvec = "QDCVEPMU675RUBSZ"
if self.magic == 'TEXtREAd':
bookkey_data = self.sect[0x0E:0x0E+16]
elif self.mobi_version < 0:
bookkey_data = self.sect[0x90:0x90+16]
else: else:
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32] name = item
pid = "00000000" argres = []
found_key = PC1(t1_keyvec, bookkey_data) if (name.endswith(path)):
else : result = argres
# calculate the keys break
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', self.sect[0xA8:0xA8+16]) if (len(argres) > 0) :
if drm_count == 0: for j in xrange(0,len(argres)):
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.") argres[j] = int(argres[j])
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids) return result
if not found_key:
raise DrmException("No key found. Most likely the correct PID has not been given.") def getDataatPos(self, path, pos):
# kill the drm keys result = None
self.patchSection(0, "\0" * drm_size, drm_ptr) item = self.flatdoc[pos]
# kill the drm pointers if item.find('=') >= 0:
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8) (name, argt) = item.split('=')
argres = argt.split('|')
if pid=="00000000":
print "File has default encryption, no specific PID."
else: else:
print "File is encoded with PID "+checksumPid(pid)+"." name = item
argres = []
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
if (name.endswith(path)):
result = argres
return result
# clear the crypto type def getDataTemp(self, path):
self.patchSection(0, "\0" * 2, 0xC) result = None
cnt = len(self.temp)
for j in xrange(cnt):
item = self.temp[j]
if item.find('=') >= 0:
(name, argt) = item.split('=')
argres = argt.split('|')
else:
name = item
argres = []
if (name.endswith(path)):
result = argres
self.temp.pop(j)
break
if (len(argres) > 0) :
for j in xrange(0,len(argres)):
argres[j] = int(argres[j])
return result
# decrypt sections def getImages(self):
print "Decrypting. Please wait . . .", result = []
self.mobi_data = self.data_file[:self.sections[1][0]] self.temp = self.flatdoc
for i in xrange(1, self.records+1): while (self.getDataTemp('img') != None):
data = self.loadSection(i) h = self.getDataTemp('img.h')[0]
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags) w = self.getDataTemp('img.w')[0]
if i%100 == 0: x = self.getDataTemp('img.x')[0]
print ".", y = self.getDataTemp('img.y')[0]
# print "record %d, extra_size %d" %(i,extra_size) src = self.getDataTemp('img.src')[0]
decoded_data = PC1(found_key, data[0:len(data) - extra_size]) result.append('<image xlink:href="../img/img%04d.jpg" x="%d" y="%d" width="%d" height="%d" />\n' % (src, x, y, w, h))
if i==1: return result
self.print_replica = (decoded_data[0:4] == '%MOP')
self.mobi_data += decoded_data
if extra_size > 0:
self.mobi_data += data[-extra_size:]
if self.num_sections > self.records+1:
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
print "done"
return
def getUnencryptedBook(infile,pid): def getGlyphs(self):
if not os.path.isfile(infile): result = []
raise DrmException('Input File Not Found') if (self.gid != None) and (len(self.gid) > 0):
book = MobiBook(infile) glyphs = []
book.processBook([pid]) for j in set(self.gid):
return book.mobi_data glyphs.append(j)
glyphs.sort()
def getUnencryptedBookWithList(infile,pidlist): for gid in glyphs:
if not os.path.isfile(infile): id='id="gl%d"' % gid
raise DrmException('Input File Not Found') path = self.gd.lookup(id)
book = MobiBook(infile) if path:
book.processBook(pidlist) result.append(id + ' ' + path)
return book.mobi_data return result
def main(argv=sys.argv): def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
print ('MobiDeDrm v%(__version__)s. ' mlst = []
'Copyright 2008-2011 The Dark Reverser et al.' % globals()) pp = PParser(gdict, flat_xml, meta_array)
if len(argv)<3 or len(argv)>4: mlst.append('<?xml version="1.0" standalone="no"?>\n')
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks" if (raw):
print "Usage:" mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0] mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
return 1 mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
else: else:
infile = argv[1] mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
outfile = argv[2] mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
if len(argv) is 4: mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
pidlist = argv[3].split(',') mlst.append('<script><![CDATA[\n')
mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
mlst.append('var dpi=%d;\n' % scaledpi)
if (previd) :
mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
if (nextid) :
mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
mlst.append('window.onload=setsize;\n')
mlst.append(']]></script>\n')
mlst.append('</head>\n')
mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
mlst.append('<div style="white-space:nowrap;">\n')
if previd == None:
mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else: else:
pidlist = {} mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
try:
stripped_file = getUnencryptedBookWithList(infile, pidlist)
file(outfile, 'wb').write(stripped_file)
except DrmException, e:
print "Error: %s" % e
return 1
return 0
mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
if __name__ == "__main__": if (pp.gid != None):
sys.exit(main()) mlst.append('<defs>\n')
gdefs = pp.getGlyphs()
for j in xrange(0,len(gdefs)):
mlst.append(gdefs[j])
mlst.append('</defs>\n')
img = pp.getImages()
if (img != None):
for j in xrange(0,len(img)):
mlst.append(img[j])
if (pp.gid != None):
for j in xrange(0,len(pp.gid)):
mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
xpos = "%d" % (pp.pw // 3)
ypos = "%d" % (pp.ph // 3)
mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
if (raw) :
mlst.append('</svg>')
else :
mlst.append('</svg></a>\n')
if nextid == None:
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else :
mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
mlst.append('</div>\n')
mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
mlst.append('</body>\n')
mlst.append('</html>\n')
return "".join(mlst)

View file

@ -24,17 +24,17 @@
<key>CFBundleExecutable</key> <key>CFBundleExecutable</key>
<string>droplet</string> <string>droplet</string>
<key>CFBundleGetInfoString</key> <key>CFBundleGetInfoString</key>
<string>DeDRM 3.1, Written 20102011 by Apprentice Alf and others.</string> <string>DeDRM 5.0, Written 20102012 by Apprentice Alf and others.</string>
<key>CFBundleIconFile</key> <key>CFBundleIconFile</key>
<string>droplet</string> <string>droplet</string>
<key>CFBundleInfoDictionaryVersion</key> <key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string> <string>6.0</string>
<key>CFBundleName</key> <key>CFBundleName</key>
<string>DeDRM 3.1</string> <string>DeDRM 5.0</string>
<key>CFBundlePackageType</key> <key>CFBundlePackageType</key>
<string>APPL</string> <string>APPL</string>
<key>CFBundleShortVersionString</key> <key>CFBundleShortVersionString</key>
<string>3.1</string> <string>5.0</string>
<key>CFBundleSignature</key> <key>CFBundleSignature</key>
<string>dplt</string> <string>dplt</string>
<key>LSMinimumSystemVersion</key> <key>LSMinimumSystemVersion</key>
@ -52,7 +52,7 @@
<key>positionOfDivider</key> <key>positionOfDivider</key>
<real>460</real> <real>460</real>
<key>savedFrame</key> <key>savedFrame</key>
<string>39 106 1316 746 0 0 1440 878 </string> <string>-2 132 1316 746 0 0 1440 878 </string>
<key>selectedTabView</key> <key>selectedTabView</key>
<string>event log</string> <string>event log</string>
</dict> </dict>

View file

@ -0,0 +1,568 @@
#! /usr/bin/env python
"""
Routines for doing AES CBC in one file
Modified by some_updates to extract
and combine only those parts needed for AES CBC
into one simple to add python file
Original Version
Copyright (c) 2002 by Paul A. Lambert
Under:
CryptoPy Artisitic License Version 1.0
See the wonderful pure python package cryptopy-1.2.5
and read its LICENSE.txt for complete license details.
"""
class CryptoError(Exception):
""" Base class for crypto exceptions """
def __init__(self,errorMessage='Error!'):
self.message = errorMessage
def __str__(self):
return self.message
class InitCryptoError(CryptoError):
""" Crypto errors during algorithm initialization """
class BadKeySizeError(InitCryptoError):
""" Bad key size error """
class EncryptError(CryptoError):
""" Error in encryption processing """
class DecryptError(CryptoError):
""" Error in decryption processing """
class DecryptNotBlockAlignedError(DecryptError):
""" Error in decryption processing """
def xorS(a,b):
""" XOR two strings """
assert len(a)==len(b)
x = []
for i in range(len(a)):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
def xor(a,b):
""" XOR two strings """
x = []
for i in range(min(len(a),len(b))):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
"""
Base 'BlockCipher' and Pad classes for cipher instances.
BlockCipher supports automatic padding and type conversion. The BlockCipher
class was written to make the actual algorithm code more readable and
not for performance.
"""
class BlockCipher:
""" Block ciphers """
def __init__(self):
self.reset()
def reset(self):
self.resetEncrypt()
self.resetDecrypt()
def resetEncrypt(self):
self.encryptBlockCount = 0
self.bytesToEncrypt = ''
def resetDecrypt(self):
self.decryptBlockCount = 0
self.bytesToDecrypt = ''
def encrypt(self, plainText, more = None):
""" Encrypt a string and return a binary string """
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
cipherText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
self.encryptBlockCount += 1
cipherText += ctBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # no more data expected from caller
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
if len(finalBytes) > 0:
ctBlock = self.encryptBlock(finalBytes)
self.encryptBlockCount += 1
cipherText += ctBlock
self.resetEncrypt()
return cipherText
def decrypt(self, cipherText, more = None):
""" Decrypt a string and return a string """
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
if more == None: # no more calls to decrypt, should have all the data
if numExtraBytes != 0:
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
# hold back some bytes in case last decrypt has zero len
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
numBlocks -= 1
numExtraBytes = self.blockSize
plainText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
self.decryptBlockCount += 1
plainText += ptBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # last decrypt remove padding
plainText = self.padding.removePad(plainText, self.blockSize)
self.resetDecrypt()
return plainText
class Pad:
def __init__(self):
pass # eventually could put in calculation of min and max size extension
class padWithPadLen(Pad):
""" Pad a binary string with the length of the padding """
def addPad(self, extraBytes, blockSize):
""" Add padding to a binary string to make it an even multiple
of the block size """
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
padLength = blockSize - numExtraBytes
return extraBytes + padLength*chr(padLength)
def removePad(self, paddedBinaryString, blockSize):
""" Remove padding from a binary string """
if not(0<len(paddedBinaryString)):
raise DecryptNotBlockAlignedError, 'Expected More Data'
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
class noPadding(Pad):
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
def addPad(self, extraBytes, blockSize):
""" Add no padding """
return extraBytes
def removePad(self, paddedBinaryString, blockSize):
""" Remove no padding """
return paddedBinaryString
"""
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
"""
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
"""
AES Encryption Algorithm
The AES algorithm is just Rijndael algorithm restricted to the default
blockSize of 128 bits.
"""
class AES(Rijndael):
""" The AES algorithm is the Rijndael block cipher restricted to block
sizes of 128 bits and key sizes of 128, 192 or 256 bits
"""
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
""" Initialize AES, keySize is in bytes """
if not (keySize == 16 or keySize == 24 or keySize == 32) :
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
self.name = 'AES'
"""
CBC mode of encryption for block ciphers.
This algorithm mode wraps any BlockCipher to make a
Cipher Block Chaining mode.
"""
from random import Random # should change to crypto.random!!!
class CBC(BlockCipher):
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
algorithms. The initialization (IV) is automatic if set to None. Padding
is also automatic based on the Pad class used to initialize the algorithm
"""
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
""" CBC algorithms are created by initializing with a BlockCipher instance """
self.baseCipher = blockCipherInstance
self.name = self.baseCipher.name + '_CBC'
self.blockSize = self.baseCipher.blockSize
self.keySize = self.baseCipher.keySize
self.padding = padding
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
self.r = Random() # for IV generation, currently uses
# mediocre standard distro version <----------------
import time
newSeed = time.ctime()+str(self.r) # seed with instance location
self.r.seed(newSeed) # to make unique
self.reset()
def setKey(self, key):
self.baseCipher.setKey(key)
# Overload to reset both CBC state and the wrapped baseCipher
def resetEncrypt(self):
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
def resetDecrypt(self):
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
def encrypt(self, plainText, iv=None, more=None):
""" CBC encryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.encryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to encrypt'
return BlockCipher.encrypt(self,plainText, more=more)
def decrypt(self, cipherText, iv=None, more=None):
""" CBC decryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.decryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to decrypt'
return BlockCipher.decrypt(self, cipherText, more=more)
def encryptBlock(self, plainTextBlock):
""" CBC block encryption, IV is set with 'encrypt' """
auto_IV = ''
if self.encryptBlockCount == 0:
if self.iv == None:
# generate IV and use
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
self.prior_encr_CT_block = self.iv
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
else: # application provided IV
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
self.prior_encr_CT_block = self.iv
""" encrypt the prior CT XORed with the PT """
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
self.prior_encr_CT_block = ct
return auto_IV+ct
def decryptBlock(self, encryptedBlock):
""" Decrypt a single block """
if self.decryptBlockCount == 0: # first call, process IV
if self.iv == None: # auto decrypt IV?
self.prior_CT_block = encryptedBlock
return ''
else:
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
self.prior_CT_block = self.iv
dct = self.baseCipher.decryptBlock(encryptedBlock)
""" XOR the prior decrypted CT with the prior CT """
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
self.prior_CT_block = encryptedBlock
return dct_XOR_priorCT
"""
AES_CBC Encryption Algorithm
"""
class AES_CBC(CBC):
""" AES encryption in CBC feedback mode """
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
self.name = 'AES_CBC'

View file

@ -0,0 +1,290 @@
#! /usr/bin/env python
import sys, os
import hmac
from struct import pack
import hashlib
# interface to needed routines libalfcrypto
def _load_libalfcrypto():
import ctypes
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
pointer_size = ctypes.sizeof(ctypes.c_voidp)
name_of_lib = None
if sys.platform.startswith('darwin'):
name_of_lib = 'libalfcrypto.dylib'
elif sys.platform.startswith('win'):
if pointer_size == 4:
name_of_lib = 'alfcrypto.dll'
else:
name_of_lib = 'alfcrypto64.dll'
else:
if pointer_size == 4:
name_of_lib = 'libalfcrypto32.so'
else:
name_of_lib = 'libalfcrypto64.so'
libalfcrypto = sys.path[0] + os.sep + name_of_lib
if not os.path.isfile(libalfcrypto):
raise Exception('libalfcrypto not found')
libalfcrypto = CDLL(libalfcrypto)
c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int)
def F(restype, name, argtypes):
func = getattr(libalfcrypto, name)
func.restype = restype
func.argtypes = argtypes
return func
# aes cbc decryption
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
#
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
#
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key,
# unsigned char *ivec, const int enc);
AES_MAXNR = 14
class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY)
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# Pukall 1 Cipher
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
# unsigned char *dest, unsigned int len, int decryption);
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
# Topaz Encryption
# typedef struct _TpzCtx {
# unsigned int v[2];
# } TpzCtx;
#
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
class TPZ_CTX(Structure):
_fields_ = [('v', c_long * 2)]
TPZ_CTX_p = POINTER(TPZ_CTX)
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
class AES_CBC(object):
def __init__(self):
self._blocksize = 0
self._keyctx = None
self._iv = 0
def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey)
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
raise Exception('AES CBC improper key used')
return
keyctx = self._keyctx = AES_KEY()
self._iv = iv
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0:
raise Exception('Failed to initialize AES CBC key')
def decrypt(self, data):
out = create_string_buffer(len(data))
mutable_iv = create_string_buffer(self._iv, len(self._iv))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
if rv == 0:
raise Exception('AES CBC decryption failed')
return out.raw
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
self.key = key
out = create_string_buffer(len(src))
de = 0
if decryption:
de = 1
rv = PC1(key, len(key), src, out, len(src), de)
return out.raw
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
tpz_ctx = self._ctx = TPZ_CTX()
topazCryptoInit(tpz_ctx, key, len(key))
return tpz_ctx
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
out = create_string_buffer(len(data))
topazCryptoDecrypt(ctx, data, out, len(data))
return out.raw
print "Using Library AlfCrypto DLL/DYLIB/SO"
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_python_alfcrypto():
import aescbc
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
sum1 = 0;
sum2 = 0;
keyXorVal = 0;
if len(key)!=16:
print "Bad key length!"
return None
wkey = []
for i in xrange(8):
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
self._ctx = [ctx1, ctx2]
return [ctx1,ctx2]
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
class AES_CBC(object):
def __init__(self):
self._key = None
self._iv = None
self.aes = None
def set_decrypt_key(self, userkey, iv):
self._key = userkey
self._iv = iv
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
def decrypt(self, data):
iv = self._iv
cleartext = self.aes.decrypt(iv + data)
return cleartext
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_crypto():
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
for loader in cryptolist:
try:
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
break
except (ImportError, Exception):
pass
return AES_CBC, Pukall_Cipher, Topaz_Cipher
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
class KeyIVGen(object):
# this only exists in openssl so we will use pure python implementation instead
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
def pbkdf2(self, passwd, salt, iter, keylen):
def xorstr( a, b ):
if len(a) != len(b):
raise Exception("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T
sha = hashlib.sha1
digest_size = sha().digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( passwd, None, sha )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, iter, i )
return T[0: keylen]

View file

@ -23,7 +23,7 @@ from struct import unpack
class TpzDRMError(Exception): class TpzDRMError(Exception):
pass pass
# Get a 7 bit encoded number from string. The most # Get a 7 bit encoded number from string. The most
# significant byte comes first and has the high bit (8th) set # significant byte comes first and has the high bit (8th) set
def readEncodedNumber(file): def readEncodedNumber(file):
@ -32,57 +32,57 @@ def readEncodedNumber(file):
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# returns a binary string that encodes a number into 7 bits # returns a binary string that encodes a number into 7 bits
# most significant byte first which has the high bit set # most significant byte first which has the high bit set
def encodeNumber(number): def encodeNumber(number):
result = "" result = ""
negative = False negative = False
flag = 0 flag = 0
if number < 0 : if number < 0 :
number = -number + 1 number = -number + 1
negative = True negative = True
while True: while True:
byte = number & 0x7F byte = number & 0x7F
number = number >> 7 number = number >> 7
byte += flag byte += flag
result += chr(byte) result += chr(byte)
flag = 0x80 flag = 0x80
if number == 0 : if number == 0 :
if (byte == 0xFF and negative == False) : if (byte == 0xFF and negative == False) :
result += chr(0x80) result += chr(0x80)
break break
if negative: if negative:
result += chr(0xFF) result += chr(0xFF)
return result[::-1] return result[::-1]
# create / read a length prefixed string from the file # create / read a length prefixed string from the file
@ -97,9 +97,9 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
# convert a binary string generated by encodeNumber (7 bit encoded number) # convert a binary string generated by encodeNumber (7 bit encoded number)
# to the value you would find inside the page*.dat files to be processed # to the value you would find inside the page*.dat files to be processed
@ -265,6 +265,8 @@ class PageParser(object):
'paragraph.gridSize' : (1, 'scalar_number', 0, 0), 'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
'word_semantic' : (1, 'snippets', 1, 1), 'word_semantic' : (1, 'snippets', 1, 1),
@ -284,6 +286,8 @@ class PageParser(object):
'_span.gridSize' : (1, 'scalar_number', 0, 0), '_span.gridSize' : (1, 'scalar_number', 0, 0),
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0), '_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0), '_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'span' : (1, 'snippets', 1, 0), 'span' : (1, 'snippets', 1, 0),
'span.firstWord' : (1, 'scalar_number', 0, 0), 'span.firstWord' : (1, 'scalar_number', 0, 0),
@ -291,6 +295,8 @@ class PageParser(object):
'span.gridSize' : (1, 'scalar_number', 0, 0), 'span.gridSize' : (1, 'scalar_number', 0, 0),
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'span.gridTopCenter' : (1, 'scalar_number', 0, 0), 'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'extratokens' : (1, 'snippets', 1, 0), 'extratokens' : (1, 'snippets', 1, 0),
'extratokens.type' : (1, 'scalar_text', 0, 0), 'extratokens.type' : (1, 'scalar_text', 0, 0),
@ -376,14 +382,14 @@ class PageParser(object):
for j in xrange(i+1, cnt) : for j in xrange(i+1, cnt) :
result += '.' + self.tagpath[j] result += '.' + self.tagpath[j]
return result return result
# list of absolute command byte values values that indicate # list of absolute command byte values values that indicate
# various types of loop meachanisms typically used to generate vectors # various types of loop meachanisms typically used to generate vectors
cmd_list = (0x76, 0x76) cmd_list = (0x76, 0x76)
# peek at and return 1 byte that is ahead by i bytes # peek at and return 1 byte that is ahead by i bytes
def peek(self, aheadi): def peek(self, aheadi):
c = self.fo.read(aheadi) c = self.fo.read(aheadi)
if (len(c) == 0): if (len(c) == 0):
@ -416,7 +422,7 @@ class PageParser(object):
return result return result
# process the next tag token, recursively handling subtags, # process the next tag token, recursively handling subtags,
# arguments, and commands # arguments, and commands
def procToken(self, token): def procToken(self, token):
@ -438,7 +444,7 @@ class PageParser(object):
if known_token : if known_token :
# handle subtags if present # handle subtags if present
subtagres = [] subtagres = []
if (splcase == 1): if (splcase == 1):
# this type of tag uses of escape marker 0x74 indicate subtag count # this type of tag uses of escape marker 0x74 indicate subtag count
@ -447,7 +453,7 @@ class PageParser(object):
subtags = 1 subtags = 1
num_args = 0 num_args = 0
if (subtags == 1): if (subtags == 1):
ntags = readEncodedNumber(self.fo) ntags = readEncodedNumber(self.fo)
if self.debug : print 'subtags: ' + token + ' has ' + str(ntags) if self.debug : print 'subtags: ' + token + ' has ' + str(ntags)
for j in xrange(ntags): for j in xrange(ntags):
@ -478,7 +484,7 @@ class PageParser(object):
return result return result
# all tokens that need to be processed should be in the hash # all tokens that need to be processed should be in the hash
# table if it may indicate a problem, either new token # table if it may indicate a problem, either new token
# or an out of sync condition # or an out of sync condition
else: else:
result = [] result = []
@ -530,7 +536,7 @@ class PageParser(object):
# dispatches loop commands bytes with various modes # dispatches loop commands bytes with various modes
# The 0x76 style loops are used to build vectors # The 0x76 style loops are used to build vectors
# This was all derived by trial and error and # This was all derived by trial and error and
# new loop types may exist that are not handled here # new loop types may exist that are not handled here
# since they did not appear in the test cases # since they did not appear in the test cases
@ -549,7 +555,7 @@ class PageParser(object):
return result return result
# add full tag path to injected snippets # add full tag path to injected snippets
def updateName(self, tag, prefix): def updateName(self, tag, prefix):
name = tag[0] name = tag[0]
@ -577,7 +583,7 @@ class PageParser(object):
argtype = tag[2] argtype = tag[2]
argList = tag[3] argList = tag[3]
nsubtagList = [] nsubtagList = []
if len(argList) > 0 : if len(argList) > 0 :
for j in argList: for j in argList:
asnip = self.snippetList[j] asnip = self.snippetList[j]
aso, atag = self.injectSnippets(asnip) aso, atag = self.injectSnippets(asnip)
@ -633,7 +639,7 @@ class PageParser(object):
return result return result
# flatten tag # flatten tag
def flattenTag(self, node): def flattenTag(self, node):
name = node[0] name = node[0]
subtagList = node[1] subtagList = node[1]
@ -712,7 +718,7 @@ class PageParser(object):
first_token = None first_token = None
v = self.getNext() v = self.getNext()
if (v == None): if (v == None):
break break
if (v == 0x72): if (v == 0x72):
@ -723,7 +729,7 @@ class PageParser(object):
self.doc.append(tag) self.doc.append(tag)
else: else:
if self.debug: if self.debug:
print "Main Loop: Unknown value: %x" % v print "Main Loop: Unknown value: %x" % v
if (v == 0): if (v == 0):
if (self.peek(1) == 0x5f): if (self.peek(1) == 0x5f):
skip = self.fo.read(1) skip = self.fo.read(1)
@ -776,7 +782,7 @@ def usage():
# #
# Main # Main
# #
def main(argv): def main(argv):
dictFile = "" dictFile = ""
@ -797,11 +803,11 @@ def main(argv):
print str(err) # will print something like "option -a not recognized" print str(err) # will print something like "option -a not recognized"
usage() usage()
sys.exit(2) sys.exit(2)
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o =="-d": if o =="-d":
debug=True debug=True

View file

@ -29,18 +29,17 @@ def usage(progname):
def cli_main(argv=sys.argv): def cli_main(argv=sys.argv):
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
if len(argv)<2: if len(argv)<2:
usage(progname) usage(progname)
sys.exit(2) sys.exit(2)
keypath = argv[1] keypath = argv[1]
with open(keypath, 'rb') as f: with open(keypath, 'rb') as f:
keyder = f.read() keyder = f.read()
print keyder.encode('base64') print keyder.encode('base64')
return 0 return 0
if __name__ == '__main__': if __name__ == '__main__':
sys.exit(cli_main()) sys.exit(cli_main())

View file

@ -16,7 +16,7 @@
# Custom version 0.03 - no change to eReader support, only usability changes # Custom version 0.03 - no change to eReader support, only usability changes
# - start of pep-8 indentation (spaces not tab), fix trailing blanks # - start of pep-8 indentation (spaces not tab), fix trailing blanks
# - version variable, only one place to change # - version variable, only one place to change
# - added main routine, now callable as a library/module, # - added main routine, now callable as a library/module,
# means tools can add optional support for ereader2html # means tools can add optional support for ereader2html
# - outdir is no longer a mandatory parameter (defaults based on input name if missing) # - outdir is no longer a mandatory parameter (defaults based on input name if missing)
# - time taken output to stdout # - time taken output to stdout
@ -59,8 +59,8 @@
# 0.18 - on Windows try PyCrypto first and OpenSSL next # 0.18 - on Windows try PyCrypto first and OpenSSL next
# 0.19 - Modify the interface to allow use of import # 0.19 - Modify the interface to allow use of import
# 0.20 - modify to allow use inside new interface for calibre plugins # 0.20 - modify to allow use inside new interface for calibre plugins
# 0.21 - Support eReader (drm) version 11. # 0.21 - Support eReader (drm) version 11.
# - Don't reject dictionary format. # - Don't reject dictionary format.
# - Ignore sidebars for dictionaries (different format?) # - Ignore sidebars for dictionaries (different format?)
__version__='0.21' __version__='0.21'
@ -178,7 +178,7 @@ def sanitizeFileName(s):
def fixKey(key): def fixKey(key):
def fixByte(b): def fixByte(b):
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80) return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
return "".join([chr(fixByte(ord(a))) for a in key]) return "".join([chr(fixByte(ord(a))) for a in key])
def deXOR(text, sp, table): def deXOR(text, sp, table):
r='' r=''
@ -212,7 +212,7 @@ class EreaderProcessor(object):
for i in xrange(len(data)): for i in xrange(len(data)):
j = (j + shuf) % len(data) j = (j + shuf) % len(data)
r[j] = data[i] r[j] = data[i]
assert len("".join(r)) == len(data) assert len("".join(r)) == len(data)
return "".join(r) return "".join(r)
r = unshuff(input[0:-8], cookie_shuf) r = unshuff(input[0:-8], cookie_shuf)
@ -314,7 +314,7 @@ class EreaderProcessor(object):
# offname = deXOR(chaps, j, self.xortable) # offname = deXOR(chaps, j, self.xortable)
# offset = struct.unpack('>L', offname[0:4])[0] # offset = struct.unpack('>L', offname[0:4])[0]
# name = offname[4:].strip('\0') # name = offname[4:].strip('\0')
# cv += '%d|%s\n' % (offset, name) # cv += '%d|%s\n' % (offset, name)
# return cv # return cv
# def getLinkNamePMLOffsetData(self): # def getLinkNamePMLOffsetData(self):
@ -326,7 +326,7 @@ class EreaderProcessor(object):
# offname = deXOR(links, j, self.xortable) # offname = deXOR(links, j, self.xortable)
# offset = struct.unpack('>L', offname[0:4])[0] # offset = struct.unpack('>L', offname[0:4])[0]
# name = offname[4:].strip('\0') # name = offname[4:].strip('\0')
# lv += '%d|%s\n' % (offset, name) # lv += '%d|%s\n' % (offset, name)
# return lv # return lv
# def getExpandedTextSizesData(self): # def getExpandedTextSizesData(self):
@ -354,7 +354,7 @@ class EreaderProcessor(object):
for i in xrange(self.num_text_pages): for i in xrange(self.num_text_pages):
logging.debug('get page %d', i) logging.debug('get page %d', i)
r += zlib.decompress(des.decrypt(self.section_reader(1 + i))) r += zlib.decompress(des.decrypt(self.section_reader(1 + i)))
# now handle footnotes pages # now handle footnotes pages
if self.num_footnote_pages > 0: if self.num_footnote_pages > 0:
r += '\n' r += '\n'
@ -399,12 +399,12 @@ class EreaderProcessor(object):
return r return r
def cleanPML(pml): def cleanPML(pml):
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255) # Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
pml2 = pml pml2 = pml
for k in xrange(128,256): for k in xrange(128,256):
badChar = chr(k) badChar = chr(k)
pml2 = pml2.replace(badChar, '\\a%03d' % k) pml2 = pml2.replace(badChar, '\\a%03d' % k)
return pml2 return pml2
def convertEreaderToPml(infile, name, cc, outdir): def convertEreaderToPml(infile, name, cc, outdir):
if not os.path.exists(outdir): if not os.path.exists(outdir):
@ -435,7 +435,7 @@ def convertEreaderToPml(infile, name, cc, outdir):
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo) # file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
def decryptBook(infile, outdir, name, cc, make_pmlz): def decryptBook(infile, outdir, name, cc, make_pmlz):
if make_pmlz : if make_pmlz :
# ignore specified outdir, use tempdir instead # ignore specified outdir, use tempdir instead
@ -468,7 +468,7 @@ def decryptBook(infile, outdir, name, cc, make_pmlz):
shutil.rmtree(outdir, True) shutil.rmtree(outdir, True)
print 'output is %s' % zipname print 'output is %s' % zipname
else : else :
print 'output in %s' % outdir print 'output in %s' % outdir
print "done" print "done"
except ValueError, e: except ValueError, e:
print "Error: %s" % e print "Error: %s" % e
@ -505,7 +505,7 @@ def main(argv=None):
return 0 return 0
elif o == "--make-pmlz": elif o == "--make-pmlz":
make_pmlz = True make_pmlz = True
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__ print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
if len(args)!=3 and len(args)!=4: if len(args)!=3 and len(args)!=4:
@ -524,4 +524,3 @@ def main(argv=None):
if __name__ == "__main__": if __name__ == "__main__":
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -68,7 +68,7 @@ class DocParser(object):
ys = [] ys = []
gdefs = [] gdefs = []
# get path defintions, positions, dimensions for each glyph # get path defintions, positions, dimensions for each glyph
# that makes up the image, and find min x and min y to reposition origin # that makes up the image, and find min x and min y to reposition origin
minx = -1 minx = -1
miny = -1 miny = -1
@ -79,7 +79,7 @@ class DocParser(object):
xs.append(gxList[j]) xs.append(gxList[j])
if minx == -1: minx = gxList[j] if minx == -1: minx = gxList[j]
else : minx = min(minx, gxList[j]) else : minx = min(minx, gxList[j])
ys.append(gyList[j]) ys.append(gyList[j])
if miny == -1: miny = gyList[j] if miny == -1: miny = gyList[j]
else : miny = min(miny, gyList[j]) else : miny = min(miny, gyList[j])
@ -124,12 +124,12 @@ class DocParser(object):
item = self.docList[pos] item = self.docList[pos]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
return name, argres return name, argres
# find tag in doc if within pos to end inclusive # find tag in doc if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -142,10 +142,10 @@ class DocParser(object):
item = self.docList[j] item = self.docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -182,13 +182,13 @@ class DocParser(object):
# class names are an issue given topaz may start them with numerals (not allowed), # class names are an issue given topaz may start them with numerals (not allowed),
# use a mix of cases (which cause some browsers problems), and actually # use a mix of cases (which cause some browsers problems), and actually
# attach numbers after "_reclustered*" to the end to deal classeses that inherit # attach numbers after "_reclustered*" to the end to deal classeses that inherit
# from a base class (but then not actually provide all of these _reclustereed # from a base class (but then not actually provide all of these _reclustereed
# classes in the stylesheet! # classes in the stylesheet!
# so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass # so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass
# that exists in the stylesheet first, and then adding this specific class # that exists in the stylesheet first, and then adding this specific class
# after # after
# also some class names have spaces in them so need to convert to dashes # also some class names have spaces in them so need to convert to dashes
if nclass != None : if nclass != None :
nclass = nclass.replace(' ','-') nclass = nclass.replace(' ','-')
@ -211,7 +211,7 @@ class DocParser(object):
return nclass return nclass
# develop a sorted description of the starting positions of # develop a sorted description of the starting positions of
# groups and regions on the page, as well as the page type # groups and regions on the page, as well as the page type
def PageDescription(self): def PageDescription(self):
@ -267,7 +267,7 @@ class DocParser(object):
result = [] result = []
# paragraph # paragraph
(pos, pclass) = self.findinDoc('paragraph.class',start,end) (pos, pclass) = self.findinDoc('paragraph.class',start,end)
pclass = self.getClass(pclass) pclass = self.getClass(pclass)
@ -281,17 +281,22 @@ class DocParser(object):
if (sfirst != None) and (slast != None) : if (sfirst != None) and (slast != None) :
first = int(sfirst) first = int(sfirst)
last = int(slast) last = int(slast)
makeImage = (regtype == 'vertical') or (regtype == 'table') makeImage = (regtype == 'vertical') or (regtype == 'table')
makeImage = makeImage or (extraglyphs != None) makeImage = makeImage or (extraglyphs != None)
if self.fixedimage: if self.fixedimage:
makeImage = makeImage or (regtype == 'fixed') makeImage = makeImage or (regtype == 'fixed')
if (pclass != None): if (pclass != None):
makeImage = makeImage or (pclass.find('.inverted') >= 0) makeImage = makeImage or (pclass.find('.inverted') >= 0)
if self.fixedimage : if self.fixedimage :
makeImage = makeImage or (pclass.find('cl-f-') >= 0) makeImage = makeImage or (pclass.find('cl-f-') >= 0)
# before creating an image make sure glyph info exists
gidList = self.getData('info.glyph.glyphID',0,-1)
makeImage = makeImage & (len(gidList) > 0)
if not makeImage : if not makeImage :
# standard all word paragraph # standard all word paragraph
for wordnum in xrange(first, last): for wordnum in xrange(first, last):
@ -332,10 +337,10 @@ class DocParser(object):
result.append(('svg', num)) result.append(('svg', num))
return pclass, result return pclass, result
# this type of paragraph may be made up of multiple spans, inline # this type of paragraph may be made up of multiple spans, inline
# word monograms (images), and words with semantic meaning, # word monograms (images), and words with semantic meaning,
# plus glyphs used to form starting letter of first word # plus glyphs used to form starting letter of first word
# need to parse this type line by line # need to parse this type line by line
line = start + 1 line = start + 1
word_class = '' word_class = ''
@ -344,7 +349,7 @@ class DocParser(object):
if end == -1 : if end == -1 :
end = self.docSize end = self.docSize
# seems some xml has last* coming before first* so we have to # seems some xml has last* coming before first* so we have to
# handle any order # handle any order
sp_first = -1 sp_first = -1
sp_last = -1 sp_last = -1
@ -382,10 +387,10 @@ class DocParser(object):
ws_last = int(argres) ws_last = int(argres)
elif name.endswith('word.class'): elif name.endswith('word.class'):
(cname, space) = argres.split('-',1) (cname, space) = argres.split('-',1)
if space == '' : space = '0' if space == '' : space = '0'
if (cname == 'spaceafter') and (int(space) > 0) : if (cname == 'spaceafter') and (int(space) > 0) :
word_class = 'sa' word_class = 'sa'
elif name.endswith('word.img.src'): elif name.endswith('word.img.src'):
result.append(('img' + word_class, int(argres))) result.append(('img' + word_class, int(argres)))
@ -416,11 +421,11 @@ class DocParser(object):
result.append(('ocr', wordnum)) result.append(('ocr', wordnum))
ws_first = -1 ws_first = -1
ws_last = -1 ws_last = -1
line += 1 line += 1
return pclass, result return pclass, result
def buildParagraph(self, pclass, pdesc, type, regtype) : def buildParagraph(self, pclass, pdesc, type, regtype) :
parares = '' parares = ''
@ -433,7 +438,7 @@ class DocParser(object):
br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical') br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical')
handle_links = len(self.link_id) > 0 handle_links = len(self.link_id) > 0
if (type == 'full') or (type == 'begin') : if (type == 'full') or (type == 'begin') :
parares += '<p' + classres + '>' parares += '<p' + classres + '>'
@ -462,7 +467,7 @@ class DocParser(object):
if linktype == 'external' : if linktype == 'external' :
linkhref = self.link_href[link-1] linkhref = self.link_href[link-1]
linkhtml = '<a href="%s">' % linkhref linkhtml = '<a href="%s">' % linkhref
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkhtml = '<a href="#page%04d">' % ptarget linkhtml = '<a href="#page%04d">' % ptarget
@ -509,7 +514,7 @@ class DocParser(object):
elif wtype == 'svg' : elif wtype == 'svg' :
sep = '' sep = ''
parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num
parares += sep parares += sep
if len(sep) > 0 : parares = parares[0:-1] if len(sep) > 0 : parares = parares[0:-1]
@ -551,7 +556,7 @@ class DocParser(object):
title = '' title = ''
alt_title = '' alt_title = ''
linkpage = '' linkpage = ''
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkpage = '%04d' % ptarget linkpage = '%04d' % ptarget
@ -584,7 +589,7 @@ class DocParser(object):
# walk the document tree collecting the information needed # walk the document tree collecting the information needed
# to build an html page using the ocrText # to build an html page using the ocrText
@ -602,8 +607,8 @@ class DocParser(object):
# determine if first paragraph is continued from previous page # determine if first paragraph is continued from previous page
(pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1) (pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1)
first_para_continued = (self.parastems_stemid != None) first_para_continued = (self.parastems_stemid != None)
# determine if last paragraph is continued onto the next page # determine if last paragraph is continued onto the next page
(pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1) (pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1)
last_para_continued = (self.paracont_stemid != None) last_para_continued = (self.paracont_stemid != None)
@ -631,24 +636,24 @@ class DocParser(object):
# get a descriptions of the starting points of the regions # get a descriptions of the starting points of the regions
# and groups on the page # and groups on the page
(pagetype, pageDesc) = self.PageDescription() (pagetype, pageDesc) = self.PageDescription()
regcnt = len(pageDesc) - 1 regcnt = len(pageDesc) - 1
anchorSet = False anchorSet = False
breakSet = False breakSet = False
inGroup = False inGroup = False
# process each region on the page and convert what you can to html # process each region on the page and convert what you can to html
for j in xrange(regcnt): for j in xrange(regcnt):
(etype, start) = pageDesc[j] (etype, start) = pageDesc[j]
(ntype, end) = pageDesc[j+1] (ntype, end) = pageDesc[j+1]
# set anchor for link target on this page # set anchor for link target on this page
if not anchorSet and not first_para_continued: if not anchorSet and not first_para_continued:
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="' htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="'
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n' htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n'
anchorSet = True anchorSet = True
@ -660,7 +665,7 @@ class DocParser(object):
gcstr = ' class="' + grptype + '"' gcstr = ' class="' + grptype + '"'
htmlpage += '<div' + gcstr + '>' htmlpage += '<div' + gcstr + '>'
inGroup = True inGroup = True
elif (etype == 'grpend'): elif (etype == 'grpend'):
if inGroup: if inGroup:
htmlpage += '</div>\n' htmlpage += '</div>\n'
@ -676,7 +681,7 @@ class DocParser(object):
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc) htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc)
else: else:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc)
elif regtype == 'chapterheading' : elif regtype == 'chapterheading' :
(pclass, pdesc) = self.getParaDescription(start,end, regtype) (pclass, pdesc) = self.getParaDescription(start,end, regtype)
if not breakSet: if not breakSet:

View file

@ -15,7 +15,7 @@ class PParser(object):
self.flatdoc = flatxml.split('\n') self.flatdoc = flatxml.split('\n')
self.docSize = len(self.flatdoc) self.docSize = len(self.flatdoc)
self.temp = [] self.temp = []
self.ph = -1 self.ph = -1
self.pw = -1 self.pw = -1
startpos = self.posinDoc('page.h') or self.posinDoc('book.h') startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
@ -26,7 +26,7 @@ class PParser(object):
for p in startpos: for p in startpos:
(name, argres) = self.lineinDoc(p) (name, argres) = self.lineinDoc(p)
self.pw = max(self.pw, int(argres)) self.pw = max(self.pw, int(argres))
if self.ph <= 0: if self.ph <= 0:
self.ph = int(meta_array.get('pageHeight', '11000')) self.ph = int(meta_array.get('pageHeight', '11000'))
if self.pw <= 0: if self.pw <= 0:
@ -215,9 +215,9 @@ def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n'
else: else:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n' ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n'
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph) ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph)
if (pp.gid != None): if (pp.gid != None):
ml += '<defs>\n' ml += '<defs>\n'
gdefs = pp.getGlyphs() gdefs = pp.getGlyphs()
for j in xrange(0,len(gdefs)): for j in xrange(0,len(gdefs)):
@ -227,7 +227,7 @@ def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array
if (img != None): if (img != None):
for j in xrange(0,len(img)): for j in xrange(0,len(img)):
ml += img[j] ml += img[j]
if (pp.gid != None): if (pp.gid != None):
for j in xrange(0,len(pp.gid)): for j in xrange(0,len(pp.gid)):
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]) ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j])
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0): if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
@ -247,4 +247,3 @@ def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array
ml += '</body>\n' ml += '</body>\n'
ml += '</html>\n' ml += '</html>\n'
return ml return ml

View file

@ -46,27 +46,27 @@ def readEncodedNumber(file):
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# Get a length prefixed string from the file # Get a length prefixed string from the file
def lengthPrefixString(data): def lengthPrefixString(data):
return encodeNumber(len(data))+data return encodeNumber(len(data))+data
@ -77,7 +77,7 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
def getMetaArray(metaFile): def getMetaArray(metaFile):
# parse the meta file # parse the meta file
@ -141,10 +141,10 @@ class PageDimParser(object):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=') (name, argres) = item.split('=')
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -336,7 +336,7 @@ def generateBook(bookDir, raw, fixedimage):
print 'Processing Meta Data and creating OPF' print 'Processing Meta Data and creating OPF'
meta_array = getMetaArray(metaFile) meta_array = getMetaArray(metaFile)
# replace special chars in title and authors like & < > # replace special chars in title and authors like & < >
title = meta_array.get('Title','No Title Provided') title = meta_array.get('Title','No Title Provided')
title = title.replace('&','&amp;') title = title.replace('&','&amp;')
title = title.replace('<','&lt;') title = title.replace('<','&lt;')
@ -451,7 +451,7 @@ def generateBook(bookDir, raw, fixedimage):
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n'
htmlstr += '<head>\n' htmlstr += '<head>\n'
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n' htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n'
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n' htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n'
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n'
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n'
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
@ -463,7 +463,7 @@ def generateBook(bookDir, raw, fixedimage):
print 'Processing Pages' print 'Processing Pages'
# Books are at 1440 DPI. This is rendering at twice that size for # Books are at 1440 DPI. This is rendering at twice that size for
# readability when rendering to the screen. # readability when rendering to the screen.
scaledpi = 1440.0 scaledpi = 1440.0
filenames = os.listdir(pageDir) filenames = os.listdir(pageDir)
@ -486,13 +486,13 @@ def generateBook(bookDir, raw, fixedimage):
# first get the html # first get the html
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage) pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
tocentries += tocinfo tocentries += tocinfo
htmlstr += pagehtml htmlstr += pagehtml
# finish up the html string and output it # finish up the html string and output it
htmlstr += '</body>\n</html>\n' htmlstr += '</body>\n</html>\n'
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr) file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
print " " print " "
print 'Extracting Table of Contents from Amazon OCR' print 'Extracting Table of Contents from Amazon OCR'
@ -663,7 +663,7 @@ def main(argv):
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
return 1 return 1
raw = 0 raw = 0
fixedimage = True fixedimage = True

View file

@ -14,7 +14,7 @@ from __future__ import with_statement
# 2 - Added OS X support by using OpenSSL when available # 2 - Added OS X support by using OpenSSL when available
# 3 - screen out improper key lengths to prevent segfaults on Linux # 3 - screen out improper key lengths to prevent segfaults on Linux
# 3.1 - Allow Windows versions of libcrypto to be found # 3.1 - Allow Windows versions of libcrypto to be found
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml # 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
# 3.3 - On Windows try PyCrypto first and OpenSSL next # 3.3 - On Windows try PyCrypto first and OpenSSL next
# 3.4 - Modify interace to allow use with import # 3.4 - Modify interace to allow use with import
@ -50,7 +50,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -58,13 +58,13 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
@ -73,7 +73,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey): def __init__(self, userkey):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -84,7 +84,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES key') raise IGNOBLEError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -122,7 +122,7 @@ def _load_crypto():
AES = _load_crypto() AES = _load_crypto()
""" """
Decrypt Barnes & Noble ADEPT encrypted EPUB books. Decrypt Barnes & Noble ADEPT encrypted EPUB books.

View file

@ -53,7 +53,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -61,28 +61,28 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key', AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey, iv): def __init__(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
self._iv = iv self._iv = iv
key = self._key = AES_KEY() key = self._key = AES_KEY()
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES Encrypt key') raise IGNOBLEError('Failed to initialize AES Encrypt key')
def encrypt(self, data): def encrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1) rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
if rv == 0: if rv == 0:

View file

@ -67,25 +67,25 @@ def _load_crypto_libcrypto():
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey', d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey',
[RSA_p, c_char_pp, c_long]) [RSA_p, c_char_pp, c_long])
RSA_size = F(c_int, 'RSA_size', [RSA_p]) RSA_size = F(c_int, 'RSA_size', [RSA_p])
@ -97,7 +97,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class RSA(object): class RSA(object):
def __init__(self, der): def __init__(self, der):
buf = create_string_buffer(der) buf = create_string_buffer(der)
@ -105,7 +105,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -114,7 +114,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[:dlen] return to[:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -130,7 +130,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise ADEPTError('Failed to initialize AES key') raise ADEPTError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -148,13 +148,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -164,22 +164,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -189,19 +189,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -209,13 +209,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -224,7 +224,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -252,7 +252,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)

View file

@ -76,13 +76,13 @@ if sys.platform.startswith('win'):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key', AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
@ -427,8 +427,8 @@ def extractKeyfile(keypath):
print "Key generation Error: " + str(e) print "Key generation Error: " + str(e)
return 1 return 1
except Exception, e: except Exception, e:
print "General Error: " + str(e) print "General Error: " + str(e)
return 1 return 1
if not success: if not success:
return 1 return 1
return 0 return 0

View file

@ -4,7 +4,7 @@
from __future__ import with_statement from __future__ import with_statement
# To run this program install Python 2.6 from http://www.python.org/download/ # To run this program install Python 2.6 from http://www.python.org/download/
# and OpenSSL (already installed on Mac OS X and Linux) OR # and OpenSSL (already installed on Mac OS X and Linux) OR
# PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto # PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# (make sure to install the version for Python 2.6). Save this script file as # (make sure to install the version for Python 2.6). Save this script file as
# ineptpdf.pyw and double-click on it to run it. # ineptpdf.pyw and double-click on it to run it.
@ -83,7 +83,7 @@ def _load_crypto_libcrypto():
AES_MAXNR = 14 AES_MAXNR = 14
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -98,13 +98,13 @@ def _load_crypto_libcrypto():
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
@ -125,7 +125,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -134,7 +134,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[1:dlen] return to[1:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -196,13 +196,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -212,22 +212,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -237,19 +237,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -257,13 +257,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -272,7 +272,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -315,7 +315,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)
@ -410,7 +410,7 @@ class PSLiteral(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
name = [] name = []
for char in self.name: for char in self.name:
@ -429,22 +429,22 @@ class PSKeyword(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
return self.name return self.name
# PSSymbolTable # PSSymbolTable
class PSSymbolTable(object): class PSSymbolTable(object):
''' '''
Symbol table that stores PSLiteral or PSKeyword. Symbol table that stores PSLiteral or PSKeyword.
''' '''
def __init__(self, classe): def __init__(self, classe):
self.dic = {} self.dic = {}
self.classe = classe self.classe = classe
return return
def intern(self, name): def intern(self, name):
if name in self.dic: if name in self.dic:
lit = self.dic[name] lit = self.dic[name]
@ -514,11 +514,11 @@ class PSBaseParser(object):
def flush(self): def flush(self):
return return
def close(self): def close(self):
self.flush() self.flush()
return return
def tell(self): def tell(self):
return self.bufpos+self.charpos return self.bufpos+self.charpos
@ -554,7 +554,7 @@ class PSBaseParser(object):
raise PSEOF('Unexpected EOF') raise PSEOF('Unexpected EOF')
self.charpos = 0 self.charpos = 0
return return
def parse_main(self, s, i): def parse_main(self, s, i):
m = NONSPC.search(s, i) m = NONSPC.search(s, i)
if not m: if not m:
@ -589,11 +589,11 @@ class PSBaseParser(object):
return (self.parse_wclose, j+1) return (self.parse_wclose, j+1)
self.add_token(KWD(c)) self.add_token(KWD(c))
return (self.parse_main, j+1) return (self.parse_main, j+1)
def add_token(self, obj): def add_token(self, obj):
self.tokens.append((self.tokenstart, obj)) self.tokens.append((self.tokenstart, obj))
return return
def parse_comment(self, s, i): def parse_comment(self, s, i):
m = EOL.search(s, i) m = EOL.search(s, i)
if not m: if not m:
@ -604,7 +604,7 @@ class PSBaseParser(object):
# We ignore comments. # We ignore comments.
#self.tokens.append(self.token) #self.tokens.append(self.token)
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal(self, s, i): def parse_literal(self, s, i):
m = END_LITERAL.search(s, i) m = END_LITERAL.search(s, i)
if not m: if not m:
@ -618,7 +618,7 @@ class PSBaseParser(object):
return (self.parse_literal_hex, j+1) return (self.parse_literal_hex, j+1)
self.add_token(LIT(self.token)) self.add_token(LIT(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal_hex(self, s, i): def parse_literal_hex(self, s, i):
c = s[i] c = s[i]
if HEX.match(c) and len(self.hex) < 2: if HEX.match(c) and len(self.hex) < 2:
@ -653,7 +653,7 @@ class PSBaseParser(object):
self.token += s[i:j] self.token += s[i:j]
self.add_token(float(self.token)) self.add_token(float(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_keyword(self, s, i): def parse_keyword(self, s, i):
m = END_KEYWORD.search(s, i) m = END_KEYWORD.search(s, i)
if not m: if not m:
@ -801,7 +801,7 @@ class PSStackParser(PSBaseParser):
PSBaseParser.__init__(self, fp) PSBaseParser.__init__(self, fp)
self.reset() self.reset()
return return
def reset(self): def reset(self):
self.context = [] self.context = []
self.curtype = None self.curtype = None
@ -842,10 +842,10 @@ class PSStackParser(PSBaseParser):
def do_keyword(self, pos, token): def do_keyword(self, pos, token):
return return
def nextobject(self, direct=False): def nextobject(self, direct=False):
''' '''
Yields a list of objects: keywords, literals, strings, Yields a list of objects: keywords, literals, strings,
numbers, arrays and dictionaries. Arrays and dictionaries numbers, arrays and dictionaries. Arrays and dictionaries
are represented as Python sequence and dictionaries. are represented as Python sequence and dictionaries.
''' '''
@ -914,7 +914,7 @@ class PDFNotImplementedError(PSException): pass
## PDFObjRef ## PDFObjRef
## ##
class PDFObjRef(PDFObject): class PDFObjRef(PDFObject):
def __init__(self, doc, objid, genno): def __init__(self, doc, objid, genno):
if objid == 0: if objid == 0:
if STRICT: if STRICT:
@ -1029,25 +1029,25 @@ def stream_value(x):
# ascii85decode(data) # ascii85decode(data)
def ascii85decode(data): def ascii85decode(data):
n = b = 0 n = b = 0
out = '' out = ''
for c in data: for c in data:
if '!' <= c and c <= 'u': if '!' <= c and c <= 'u':
n += 1 n += 1
b = b*85+(ord(c)-33) b = b*85+(ord(c)-33)
if n == 5: if n == 5:
out += struct.pack('>L',b) out += struct.pack('>L',b)
n = b = 0 n = b = 0
elif c == 'z': elif c == 'z':
assert n == 0 assert n == 0
out += '\0\0\0\0' out += '\0\0\0\0'
elif c == '~': elif c == '~':
if n: if n:
for _ in range(5-n): for _ in range(5-n):
b = b*85+84 b = b*85+84
out += struct.pack('>L',b)[:n-1] out += struct.pack('>L',b)[:n-1]
break break
return out return out
## PDFStream type ## PDFStream type
@ -1064,7 +1064,7 @@ class PDFStream(PDFObject):
else: else:
if eol in ('\r', '\n', '\r\n'): if eol in ('\r', '\n', '\r\n'):
rawdata = rawdata[:length] rawdata = rawdata[:length]
self.dic = dic self.dic = dic
self.rawdata = rawdata self.rawdata = rawdata
self.decipher = decipher self.decipher = decipher
@ -1078,7 +1078,7 @@ class PDFStream(PDFObject):
self.objid = objid self.objid = objid
self.genno = genno self.genno = genno
return return
def __repr__(self): def __repr__(self):
if self.rawdata: if self.rawdata:
return '<PDFStream(%r): raw=%d, %r>' % \ return '<PDFStream(%r): raw=%d, %r>' % \
@ -1162,7 +1162,7 @@ class PDFStream(PDFObject):
data = self.decipher(self.objid, self.genno, data) data = self.decipher(self.objid, self.genno, data)
return data return data
## PDF Exceptions ## PDF Exceptions
## ##
class PDFSyntaxError(PDFException): pass class PDFSyntaxError(PDFException): pass
@ -1227,7 +1227,7 @@ class PDFXRef(object):
self.offsets[objid] = (int(genno), int(pos)) self.offsets[objid] = (int(genno), int(pos))
self.load_trailer(parser) self.load_trailer(parser)
return return
KEYWORD_TRAILER = PSKeywordTable.intern('trailer') KEYWORD_TRAILER = PSKeywordTable.intern('trailer')
def load_trailer(self, parser): def load_trailer(self, parser):
try: try:
@ -1268,7 +1268,7 @@ class PDFXRefStream(object):
for first, size in self.index: for first, size in self.index:
for objid in xrange(first, first + size): for objid in xrange(first, first + size):
yield objid yield objid
def load(self, parser, debug=0): def load(self, parser, debug=0):
(_,objid) = parser.nexttoken() # ignored (_,objid) = parser.nexttoken() # ignored
(_,genno) = parser.nexttoken() # ignored (_,genno) = parser.nexttoken() # ignored
@ -1286,7 +1286,7 @@ class PDFXRefStream(object):
self.entlen = self.fl1+self.fl2+self.fl3 self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.dic self.trailer = stream.dic
return return
def getpos(self, objid): def getpos(self, objid):
offset = 0 offset = 0
for first, size in self.index: for first, size in self.index:
@ -1337,7 +1337,7 @@ class PDFDocument(object):
self.parser = parser self.parser = parser
# The document is set to be temporarily ready during collecting # The document is set to be temporarily ready during collecting
# all the basic information about the document, e.g. # all the basic information about the document, e.g.
# the header, the encryption information, and the access rights # the header, the encryption information, and the access rights
# for the document. # for the document.
self.ready = True self.ready = True
# Retrieve the information of each header that was appended # Retrieve the information of each header that was appended
@ -1413,7 +1413,7 @@ class PDFDocument(object):
length = int_value(param.get('Length', 0)) / 8 length = int_value(param.get('Length', 0)) / 8
edcdata = str_value(param.get('EDCData')).decode('base64') edcdata = str_value(param.get('EDCData')).decode('base64')
pdrllic = str_value(param.get('PDRLLic')).decode('base64') pdrllic = str_value(param.get('PDRLLic')).decode('base64')
pdrlpol = str_value(param.get('PDRLPol')).decode('base64') pdrlpol = str_value(param.get('PDRLPol')).decode('base64')
edclist = [] edclist = []
for pair in edcdata.split('\n'): for pair in edcdata.split('\n'):
edclist.append(pair) edclist.append(pair)
@ -1433,9 +1433,9 @@ class PDFDocument(object):
raise ADEPTError('Could not decrypt PDRLPol, aborting ...') raise ADEPTError('Could not decrypt PDRLPol, aborting ...')
else: else:
cutter = -1 * ord(pdrlpol[-1]) cutter = -1 * ord(pdrlpol[-1])
pdrlpol = pdrlpol[:cutter] pdrlpol = pdrlpol[:cutter]
return plaintext[:16] return plaintext[:16]
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \ PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \
'\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz' '\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
# experimental aes pw support # experimental aes pw support
@ -1455,14 +1455,14 @@ class PDFDocument(object):
EncMetadata = str_value(param['EncryptMetadata']) EncMetadata = str_value(param['EncryptMetadata'])
except: except:
EncMetadata = 'True' EncMetadata = 'True'
self.is_printable = bool(P & 4) self.is_printable = bool(P & 4)
self.is_modifiable = bool(P & 8) self.is_modifiable = bool(P & 8)
self.is_extractable = bool(P & 16) self.is_extractable = bool(P & 16)
self.is_annotationable = bool(P & 32) self.is_annotationable = bool(P & 32)
self.is_formsenabled = bool(P & 256) self.is_formsenabled = bool(P & 256)
self.is_textextractable = bool(P & 512) self.is_textextractable = bool(P & 512)
self.is_assemblable = bool(P & 1024) self.is_assemblable = bool(P & 1024)
self.is_formprintable = bool(P & 2048) self.is_formprintable = bool(P & 2048)
# Algorithm 3.2 # Algorithm 3.2
password = (password+self.PASSWORD_PADDING)[:32] # 1 password = (password+self.PASSWORD_PADDING)[:32] # 1
hash = hashlib.md5(password) # 2 hash = hashlib.md5(password) # 2
@ -1537,10 +1537,10 @@ class PDFDocument(object):
if length > 0: if length > 0:
if len(bookkey) == length: if len(bookkey) == length:
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
elif len(bookkey) == length + 1: elif len(bookkey) == length + 1:
V = ord(bookkey[0]) V = ord(bookkey[0])
bookkey = bookkey[1:] bookkey = bookkey[1:]
else: else:
@ -1554,7 +1554,7 @@ class PDFDocument(object):
print "length is %d and len(bookkey) is %d" % (length, len(bookkey)) print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
print "bookkey[0] is %d" % ord(bookkey[0]) print "bookkey[0] is %d" % ord(bookkey[0])
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
self.decrypt_key = bookkey self.decrypt_key = bookkey
@ -1571,7 +1571,7 @@ class PDFDocument(object):
hash = hashlib.md5(key) hash = hashlib.md5(key)
key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)] key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)]
return key return key
def genkey_v3(self, objid, genno): def genkey_v3(self, objid, genno):
objid = struct.pack('<L', objid ^ 0x3569ac) objid = struct.pack('<L', objid ^ 0x3569ac)
genno = struct.pack('<L', genno ^ 0xca96) genno = struct.pack('<L', genno ^ 0xca96)
@ -1611,14 +1611,14 @@ class PDFDocument(object):
#print cutter #print cutter
plaintext = plaintext[:cutter] plaintext = plaintext[:cutter]
return plaintext return plaintext
def decrypt_rc4(self, objid, genno, data): def decrypt_rc4(self, objid, genno, data):
key = self.genkey(objid, genno) key = self.genkey(objid, genno)
return ARC4.new(key).decrypt(data) return ARC4.new(key).decrypt(data)
KEYWORD_OBJ = PSKeywordTable.intern('obj') KEYWORD_OBJ = PSKeywordTable.intern('obj')
def getobj(self, objid): def getobj(self, objid):
if not self.ready: if not self.ready:
raise PDFException('PDFDocument not initialized') raise PDFException('PDFDocument not initialized')
@ -1688,7 +1688,7 @@ class PDFDocument(object):
## if x: ## if x:
## objid1 = x[-2] ## objid1 = x[-2]
## genno = x[-1] ## genno = x[-1]
## ##
if kwd is not self.KEYWORD_OBJ: if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError( raise PDFSyntaxError(
'Invalid object spec: offset=%r' % index) 'Invalid object spec: offset=%r' % index)
@ -1700,7 +1700,7 @@ class PDFDocument(object):
self.objs[objid] = obj self.objs[objid] = obj
return obj return obj
class PDFObjStmRef(object): class PDFObjStmRef(object):
maxindex = 0 maxindex = 0
def __init__(self, objid, stmid, index): def __init__(self, objid, stmid, index):
@ -1710,7 +1710,7 @@ class PDFObjStmRef(object):
if index > PDFObjStmRef.maxindex: if index > PDFObjStmRef.maxindex:
PDFObjStmRef.maxindex = index PDFObjStmRef.maxindex = index
## PDFParser ## PDFParser
## ##
class PDFParser(PSStackParser): class PDFParser(PSStackParser):
@ -1736,7 +1736,7 @@ class PDFParser(PSStackParser):
if token is self.KEYWORD_ENDOBJ: if token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4)) self.add_results(*self.pop(4))
return return
if token is self.KEYWORD_R: if token is self.KEYWORD_R:
# reference to indirect object # reference to indirect object
try: try:
@ -1747,7 +1747,7 @@ class PDFParser(PSStackParser):
except PSSyntaxError: except PSSyntaxError:
pass pass
return return
if token is self.KEYWORD_STREAM: if token is self.KEYWORD_STREAM:
# stream object # stream object
((_,dic),) = self.pop(1) ((_,dic),) = self.pop(1)
@ -1787,7 +1787,7 @@ class PDFParser(PSStackParser):
obj = PDFStream(dic, data, self.doc.decipher) obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj)) self.push((pos, obj))
return return
# others # others
self.push((pos, token)) self.push((pos, token))
return return
@ -1823,7 +1823,7 @@ class PDFParser(PSStackParser):
xref.load(self) xref.load(self)
else: else:
if token is not self.KEYWORD_XREF: if token is not self.KEYWORD_XREF:
raise PDFNoValidXRef('xref not found: pos=%d, token=%r' % raise PDFNoValidXRef('xref not found: pos=%d, token=%r' %
(pos, token)) (pos, token))
self.nextline() self.nextline()
xref = PDFXRef() xref = PDFXRef()
@ -1838,7 +1838,7 @@ class PDFParser(PSStackParser):
pos = int_value(trailer['Prev']) pos = int_value(trailer['Prev'])
self.read_xref_from(pos, xrefs) self.read_xref_from(pos, xrefs)
return return
# read xref tables and trailers # read xref tables and trailers
def read_xref(self): def read_xref(self):
xrefs = [] xrefs = []
@ -1957,7 +1957,7 @@ class PDFSerializer(object):
self.write("%010d 00000 n \n" % xrefs[objid][0]) self.write("%010d 00000 n \n" % xrefs[objid][0])
else: else:
self.write("%010d %05d f \n" % (0, 65535)) self.write("%010d %05d f \n" % (0, 65535))
self.write('trailer\n') self.write('trailer\n')
self.serialize_object(trailer) self.serialize_object(trailer)
self.write('\nstartxref\n%d\n%%%%EOF' % startxref) self.write('\nstartxref\n%d\n%%%%EOF' % startxref)
@ -1977,7 +1977,7 @@ class PDFSerializer(object):
while maxindex >= power: while maxindex >= power:
fl3 += 1 fl3 += 1
power *= 256 power *= 256
index = [] index = []
first = None first = None
prev = None prev = None
@ -2004,14 +2004,14 @@ class PDFSerializer(object):
# we force all generation numbers to be 0 # we force all generation numbers to be 0
# f3 = objref[1] # f3 = objref[1]
f3 = 0 f3 = 0
data.append(struct.pack('>B', f1)) data.append(struct.pack('>B', f1))
data.append(struct.pack('>L', f2)[-fl2:]) data.append(struct.pack('>L', f2)[-fl2:])
data.append(struct.pack('>L', f3)[-fl3:]) data.append(struct.pack('>L', f3)[-fl3:])
index.extend((first, prev - first + 1)) index.extend((first, prev - first + 1))
data = zlib.compress(''.join(data)) data = zlib.compress(''.join(data))
dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index, dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index,
'W': [1, fl2, fl3], 'Length': len(data), 'W': [1, fl2, fl3], 'Length': len(data),
'Filter': LITERALS_FLATE_DECODE[0], 'Filter': LITERALS_FLATE_DECODE[0],
'Root': trailer['Root'],} 'Root': trailer['Root'],}
if 'Info' in trailer: if 'Info' in trailer:
@ -2033,9 +2033,9 @@ class PDFSerializer(object):
string = string.replace(')', r'\)') string = string.replace(')', r'\)')
# get rid of ciando id # get rid of ciando id
regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}') regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}')
if regularexp.match(string): return ('http://www.ciando.com') if regularexp.match(string): return ('http://www.ciando.com')
return string return string
def serialize_object(self, obj): def serialize_object(self, obj):
if isinstance(obj, dict): if isinstance(obj, dict):
# Correct malformed Mac OS resource forks for Stanza # Correct malformed Mac OS resource forks for Stanza
@ -2059,21 +2059,21 @@ class PDFSerializer(object):
elif isinstance(obj, bool): elif isinstance(obj, bool):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj).lower()) self.write(str(obj).lower())
elif isinstance(obj, (int, long, float)): elif isinstance(obj, (int, long, float)):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj)) self.write(str(obj))
elif isinstance(obj, PDFObjRef): elif isinstance(obj, PDFObjRef):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write('%d %d R' % (obj.objid, 0)) self.write('%d %d R' % (obj.objid, 0))
elif isinstance(obj, PDFStream): elif isinstance(obj, PDFStream):
### If we don't generate cross ref streams the object streams ### If we don't generate cross ref streams the object streams
### are no longer useful, as we have extracted all objects from ### are no longer useful, as we have extracted all objects from
### them. Therefore leave them out from the output. ### them. Therefore leave them out from the output.
if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm: if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm:
self.write('(deleted)') self.write('(deleted)')
else: else:
data = obj.get_decdata() data = obj.get_decdata()
self.serialize_object(obj.dic) self.serialize_object(obj.dic)
@ -2085,7 +2085,7 @@ class PDFSerializer(object):
if data[0].isalnum() and self.last.isalnum(): if data[0].isalnum() and self.last.isalnum():
self.write(' ') self.write(' ')
self.write(data) self.write(data)
def serialize_indirect(self, objid, obj): def serialize_indirect(self, objid, obj):
self.write('%d 0 obj' % (objid,)) self.write('%d 0 obj' % (objid,))
self.serialize_object(obj) self.serialize_object(obj)
@ -2097,7 +2097,7 @@ class PDFSerializer(object):
class DecryptionDialog(Tkinter.Frame): class DecryptionDialog(Tkinter.Frame):
def __init__(self, root): def __init__(self, root):
Tkinter.Frame.__init__(self, root, border=5) Tkinter.Frame.__init__(self, root, border=5)
ltext='Select file for decryption\n' ltext='Select file for decryption\n'
self.status = Tkinter.Label(self, text=ltext) self.status = Tkinter.Label(self, text=ltext)
self.status.pack(fill=Tkconstants.X, expand=1) self.status.pack(fill=Tkconstants.X, expand=1)
body = Tkinter.Frame(self) body = Tkinter.Frame(self)
@ -2123,7 +2123,7 @@ class DecryptionDialog(Tkinter.Frame):
button.grid(row=2, column=2) button.grid(row=2, column=2)
buttons = Tkinter.Frame(self) buttons = Tkinter.Frame(self)
buttons.pack() buttons.pack()
botton = Tkinter.Button( botton = Tkinter.Button(
buttons, text="Decrypt", width=10, command=self.decrypt) buttons, text="Decrypt", width=10, command=self.decrypt)
@ -2132,7 +2132,7 @@ class DecryptionDialog(Tkinter.Frame):
button = Tkinter.Button( button = Tkinter.Button(
buttons, text="Quit", width=10, command=self.quit) buttons, text="Quit", width=10, command=self.quit)
button.pack(side=Tkconstants.RIGHT) button.pack(side=Tkconstants.RIGHT)
def get_keypath(self): def get_keypath(self):
keypath = tkFileDialog.askopenfilename( keypath = tkFileDialog.askopenfilename(

View file

@ -1,9 +1,9 @@
# engine to remove drm from Kindle for Mac books # engine to remove drm from Kindle for Mac books
# for personal use for archiving and converting your ebooks # for personal use for archiving and converting your ebooks
# PLEASE DO NOT PIRATE! # PLEASE DO NOT PIRATE!
# We want all authors and Publishers, and eBook stores to live long and prosperous lives # We want all authors and Publishers, and eBook stores to live long and prosperous lives
# #
# it borrows heavily from works by CMBDTC, IHeartCabbages, skindle, # it borrows heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DiapDealer, some_updates and many many others # unswindle, DiapDealer, some_updates and many many others
from __future__ import with_statement from __future__ import with_statement
@ -75,20 +75,20 @@ def _load_crypto_libcrypto():
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)] _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
class LibCrypto(object): class LibCrypto(object):
def __init__(self): def __init__(self):
self._blocksize = 0 self._blocksize = 0
@ -168,7 +168,7 @@ def GetVolumeSerialNumber():
sernum = '9999999999' sernum = '9999999999'
return sernum return sernum
# uses unix env to get username instead of using sysctlbyname # uses unix env to get username instead of using sysctlbyname
def GetUserName(): def GetUserName():
username = os.getenv('USER') username = os.getenv('USER')
return username return username
@ -183,7 +183,7 @@ global kindleDatabase
# Various character maps used to decrypt books. Probably supposed to act as obfuscation # Various character maps used to decrypt books. Probably supposed to act as obfuscation
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M" charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM" charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/" charMap3 = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789" charMap4 = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
@ -197,7 +197,7 @@ def encode(data, map):
result += map[Q] result += map[Q]
result += map[R] result += map[R]
return result return result
# Hash the bytes in data and then encode the digest with the characters in map # Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map): def encodeHash(data,map):
return encode(MD5(data),map) return encode(MD5(data),map)
@ -254,7 +254,7 @@ def getKindleInfoValueForHash(hashedKey):
encryptedValue = decode(kindleDatabase[hashedKey],charMap2) encryptedValue = decode(kindleDatabase[hashedKey],charMap2)
cleartext = CryptUnprotectData(encryptedValue) cleartext = CryptUnprotectData(encryptedValue)
return decode(cleartext, charMap1) return decode(cleartext, charMap1)
# Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record # Get a record from the Kindle.info file for the string in "key" (plaintext). Return the decoded and decrypted record
def getKindleInfoValueForKey(key): def getKindleInfoValueForKey(key):
return getKindleInfoValueForHash(encodeHash(key,charMap2)) return getKindleInfoValueForHash(encodeHash(key,charMap2))
@ -265,10 +265,10 @@ def findNameForHash(hash):
result = "" result = ""
for name in names: for name in names:
if hash == encodeHash(name, charMap2): if hash == encodeHash(name, charMap2):
result = name result = name
break break
return result return result
# Print all the records from the kindle.info file (option -i) # Print all the records from the kindle.info file (option -i)
def printKindleInfo(): def printKindleInfo():
for record in kindleDatabase: for record in kindleDatabase:
@ -284,7 +284,7 @@ def printKindleInfo():
# #
# PID generation routines # PID generation routines
# #
# Returns two bit at offset from a bit field # Returns two bit at offset from a bit field
def getTwoBitsFromBitField(bitField,offset): def getTwoBitsFromBitField(bitField,offset):
byteNumber = offset // 4 byteNumber = offset // 4
@ -293,10 +293,10 @@ def getTwoBitsFromBitField(bitField,offset):
# Returns the six bits at offset from a bit field # Returns the six bits at offset from a bit field
def getSixBitsFromBitField(bitField,offset): def getSixBitsFromBitField(bitField,offset):
offset *= 3 offset *= 3
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2) value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
return value return value
# 8 bits to six bits encoding from hash to generate PID string # 8 bits to six bits encoding from hash to generate PID string
def encodePID(hash): def encodePID(hash):
global charMap3 global charMap3
@ -304,29 +304,29 @@ def encodePID(hash):
for position in range (0,8): for position in range (0,8):
PID += charMap3[getSixBitsFromBitField(hash,position)] PID += charMap3[getSixBitsFromBitField(hash,position)]
return PID return PID
# #
# Main # Main
# #
def main(argv=sys.argv): def main(argv=sys.argv):
global kindleDatabase global kindleDatabase
kindleDatabase = None kindleDatabase = None
# #
# Read the encrypted database # Read the encrypted database
# #
try: try:
kindleDatabase = parseKindleInfo() kindleDatabase = parseKindleInfo()
except Exception, message: except Exception, message:
print(message) print(message)
if kindleDatabase != None : if kindleDatabase != None :
printKindleInfo() printKindleInfo()
return 0 return 0
if __name__ == '__main__': if __name__ == '__main__':

View file

@ -5,19 +5,19 @@ from __future__ import with_statement
# engine to remove drm from Kindle for Mac and Kindle for PC books # engine to remove drm from Kindle for Mac and Kindle for PC books
# for personal use for archiving and converting your ebooks # for personal use for archiving and converting your ebooks
# PLEASE DO NOT PIRATE EBOOKS! # PLEASE DO NOT PIRATE EBOOKS!
# We want all authors and publishers, and eBook stores to live # We want all authors and publishers, and eBook stores to live
# long and prosperous lives but at the same time we just want to # long and prosperous lives but at the same time we just want to
# be able to read OUR books on whatever device we want and to keep # be able to read OUR books on whatever device we want and to keep
# readable for a long, long time # readable for a long, long time
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle, # This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates # unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
# and many many others # and many many others
__version__ = '3.9' __version__ = '4.0'
class Unbuffered: class Unbuffered:
def __init__(self, stream): def __init__(self, stream):
@ -50,7 +50,7 @@ else:
import mobidedrm import mobidedrm
import topazextract import topazextract
import kgenpids import kgenpids
# cleanup bytestring filenames # cleanup bytestring filenames
# borrowed from calibre from calibre/src/calibre/__init__.py # borrowed from calibre from calibre/src/calibre/__init__.py
@ -100,14 +100,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
outfilename = outfilename + "_" + filenametitle outfilename = outfilename + "_" + filenametitle
elif outfilename[:8] != filenametitle[:8]: elif outfilename[:8] != filenametitle[:8]:
outfilename = outfilename[:8] + "_" + filenametitle outfilename = outfilename[:8] + "_" + filenametitle
# avoid excessively long file names # avoid excessively long file names
if len(outfilename)>150: if len(outfilename)>150:
outfilename = outfilename[:150] outfilename = outfilename[:150]
# build pid list # build pid list
md1, md2 = mb.getPIDMetaInfo() md1, md2 = mb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
try: try:
mb.processBook(pidlst) mb.processBook(pidlst)
@ -128,9 +128,9 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
else: else:
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi') outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
mb.getMobiFile(outfile) mb.getMobiFile(outfile)
return 0 return 0
# topaz: # topaz:
print " Creating NoDRM HTMLZ Archive" print " Creating NoDRM HTMLZ Archive"
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz') zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
mb.getHTMLZip(zipname) mb.getHTMLZip(zipname)
@ -156,7 +156,7 @@ def usage(progname):
# #
# Main # Main
# #
def main(argv=sys.argv): def main(argv=sys.argv):
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
@ -164,9 +164,9 @@ def main(argv=sys.argv):
kInfoFiles = [] kInfoFiles = []
serials = [] serials = []
pids = [] pids = []
print ('K4MobiDeDrm v%(__version__)s ' print ('K4MobiDeDrm v%(__version__)s '
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals()) 'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
@ -177,7 +177,7 @@ def main(argv=sys.argv):
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
@ -195,8 +195,8 @@ def main(argv=sys.argv):
# try with built in Kindle Info files # try with built in Kindle Info files
k4 = True k4 = True
if sys.platform.startswith('linux'): if sys.platform.startswith('linux'):
k4 = False k4 = False
kInfoFiles = None kInfoFiles = None
infile = args[0] infile = args[0]
outdir = args[1] outdir = args[1]
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids) return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
@ -205,4 +205,3 @@ def main(argv=sys.argv):
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -5,7 +5,8 @@ from __future__ import with_statement
import sys import sys
import os import os
import os.path import os.path
import re
import copy
import subprocess import subprocess
from struct import pack, unpack, unpack_from from struct import pack, unpack, unpack_from
@ -24,6 +25,25 @@ def _load_crypto_libcrypto():
raise DrmException('libcrypto not found') raise DrmException('libcrypto not found')
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
# From OpenSSL's crypto aes header
#
# AES_ENCRYPT 1
# AES_DECRYPT 0
# AES_MAXNR 14 (in bytes)
# AES_BLOCK_SIZE 16 (in bytes)
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
# note: the ivec string, and output buffer are mutable
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -31,25 +51,31 @@ def _load_crypto_libcrypto():
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)] _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', # From OpenSSL's Crypto evp/p5_crpt2.c
#
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# int keylen, unsigned char *out);
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
class LibCrypto(object): class LibCrypto(object):
def __init__(self): def __init__(self):
self._blocksize = 0 self._blocksize = 0
self._keyctx = None self._keyctx = None
self.iv = 0 self._iv = 0
def set_decrypt_key(self, userkey, iv): def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -57,14 +83,17 @@ def _load_crypto_libcrypto():
raise DrmException('AES improper key used') raise DrmException('AES improper key used')
return return
keyctx = self._keyctx = AES_KEY() keyctx = self._keyctx = AES_KEY()
self.iv = iv self._iv = iv
self._userkey = userkey
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0: if rv < 0:
raise DrmException('Failed to initialize AES key') raise DrmException('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0) mutable_iv = create_string_buffer(self._iv, len(self._iv))
keyctx = self._keyctx
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
if rv == 0: if rv == 0:
raise DrmException('AES decryption failed') raise DrmException('AES decryption failed')
return out.raw return out.raw
@ -111,13 +140,17 @@ def SHA256(message):
# Various character maps used to decrypt books. Probably supposed to act as obfuscation # Various character maps used to decrypt books. Probably supposed to act as obfuscation
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M" charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM" charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
# For kinf approach of K4PC/K4Mac # For kinf approach of K4Mac 1.6.X or later
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" # On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# For Mac they seem to re-use charMap2 here # For Mac they seem to re-use charMap2 here
charMap5 = charMap2 charMap5 = charMap2
# new in K4M 1.9.X
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
def encode(data, map): def encode(data, map):
result = "" result = ""
for char in data: for char in data:
@ -144,7 +177,7 @@ def decode(data,map):
result += pack("B",value) result += pack("B",value)
return result return result
# For .kinf approach of K4PC and now K4Mac # For K4M 1.6.X and later
# generate table of prime number less than or equal to int n # generate table of prime number less than or equal to int n
def primes(n): def primes(n):
if n==2: return [2] if n==2: return [2]
@ -271,7 +304,7 @@ def GetDiskPartitionUUID(diskpart):
if not foundIt: if not foundIt:
uuidnum = '' uuidnum = ''
return uuidnum return uuidnum
def GetMACAddressMunged(): def GetMACAddressMunged():
macnum = os.getenv('MYMACNUM') macnum = os.getenv('MYMACNUM')
if macnum != None: if macnum != None:
@ -315,33 +348,11 @@ def GetMACAddressMunged():
return macnum return macnum
# uses unix env to get username instead of using sysctlbyname # uses unix env to get username instead of using sysctlbyname
def GetUserName(): def GetUserName():
username = os.getenv('USER') username = os.getenv('USER')
return username return username
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
def CryptUnprotectData(encryptedData):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
iter = 0x3e8
keylen = 0x80
crp = LibCrypto()
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
def isNewInstall(): def isNewInstall():
home = os.getenv('HOME') home = os.getenv('HOME')
# soccer game fan anyone # soccer game fan anyone
@ -350,7 +361,7 @@ def isNewInstall():
if os.path.exists(dpath): if os.path.exists(dpath):
return True return True
return False return False
def GetIDString(): def GetIDString():
# K4Mac now has an extensive set of ids strings it uses # K4Mac now has an extensive set of ids strings it uses
@ -359,13 +370,13 @@ def GetIDString():
# BUT Amazon has now become nasty enough to detect when its app # BUT Amazon has now become nasty enough to detect when its app
# is being run under a debugger and actually changes code paths # is being run under a debugger and actually changes code paths
# including which one of these strings is chosen, all to try # including which one of these strings is chosen, all to try
# to prevent reverse engineering # to prevent reverse engineering
# Sad really ... they will only hurt their own sales ... # Sad really ... they will only hurt their own sales ...
# true book lovers really want to keep their books forever # true book lovers really want to keep their books forever
# and move them to their devices and DRM prevents that so they # and move them to their devices and DRM prevents that so they
# will just buy from someplace else that they can remove # will just buy from someplace else that they can remove
# the DRM from # the DRM from
# Amazon should know by now that true book lover's are not like # Amazon should know by now that true book lover's are not like
@ -388,27 +399,91 @@ def GetIDString():
return '9999999999' return '9999999999'
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
class CryptUnprotectData(object):
def __init__(self):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
self.crp = LibCrypto()
iter = 0x3e8
keylen = 0x80
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine # implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.6.0 # used for Kindle for Mac Versions >= 1.6.0
def CryptUnprotectDataV2(encryptedData): class CryptUnprotectDataV2(object):
sp = GetUserName() + ':&%:' + GetIDString() def __init__(self):
passwdData = encode(SHA256(sp),charMap5) sp = GetUserName() + ':&%:' + GetIDString()
# salt generation as per the code passwdData = encode(SHA256(sp),charMap5)
salt = 0x0512981d * 2 * 1 * 1 # salt generation as per the code
salt = str(salt) + GetUserName() salt = 0x0512981d * 2 * 1 * 1
salt = encode(salt,charMap5) salt = str(salt) + GetUserName()
salt = encode(salt,charMap5)
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext
# unprotect the new header blob in .kinf2011
# used in Kindle for Mac Version >= 1.9.0
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
crp = LibCrypto() crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = crp.keyivgen(passwdData, salt, iter, keylen) key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32] key = key_iv[0:32]
iv = key_iv[32:48] iv = key_iv[32:48]
crp.set_decrypt_key(key,iv) crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData) cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.9.0
class CryptUnprotectDataV3(object):
def __init__(self, entropy):
sp = GetUserName() + '+@#$%+' + GetIDString()
passwdData = encode(SHA256(sp),charMap2)
salt = entropy
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap2)
return cleartext
# Locate the .kindle-info files # Locate the .kindle-info files
def getKindleInfoFiles(kInfoFiles): def getKindleInfoFiles(kInfoFiles):
# first search for current .kindle-info files # first search for current .kindle-info files
@ -424,12 +499,22 @@ def getKindleInfoFiles(kInfoFiles):
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
found = True found = True
# add any .kinf files # add any .rainier*-kinf files
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"' cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
cmdline = cmdline.encode(sys.getfilesystemencoding()) cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate() out1, out2 = p1.communicate()
reslst = out1.split('\n') reslst = out1.split('\n')
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
# add any .kinf2011 files
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
for resline in reslst: for resline in reslst:
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
@ -438,7 +523,7 @@ def getKindleInfoFiles(kInfoFiles):
print('No kindle-info files have been found.') print('No kindle-info files have been found.')
return kInfoFiles return kInfoFiles
# determine type of kindle info provided and return a # determine type of kindle info provided and return a
# database of keynames and values # database of keynames and values
def getDBfromFile(kInfoFile): def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"] names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
@ -449,7 +534,9 @@ def getDBfromFile(kInfoFile):
data = infoReader.read() data = infoReader.read()
if data.find('[') != -1 : if data.find('[') != -1 :
# older style kindle-info file # older style kindle-info file
cud = CryptUnprotectData()
items = data.split('[') items = data.split('[')
for item in items: for item in items:
if item != '': if item != '':
@ -462,87 +549,175 @@ def getDBfromFile(kInfoFile):
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
encryptedValue = decode(rawdata,charMap2) encryptedValue = decode(rawdata,charMap2)
cleartext = CryptUnprotectData(encryptedValue) cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1
if cnt == 0: if cnt == 0:
DB = None DB = None
return DB return DB
# else newer style .kinf file used by K4Mac >= 1.6.0 if hdr == '/':
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split # else newer style .kinf file used by K4Mac >= 1.6.0
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
cud = CryptUnprotectDataV2()
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
# "entropy" not used for K4Mac only K4PC
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# the latest .kinf2011 version for K4M 1.9.1
# put back the hdr char, it is needed
data = hdr + data
data = data[:-1] data = data[:-1]
items = data.split('/') items = data.split('/')
# the headerblob is the encrypted information needed to build the entropy string
headerblob = items.pop(0)
encryptedValue = decode(headerblob, charMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces in the same way
# this version is different from K4PC it scales the build number by multipying by 735
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
cud = CryptUnprotectDataV3(entropy)
# loop through the item records until all are processed # loop through the item records until all are processed
while len(items) > 0: while len(items) > 0:
# get the first item record # get the first item record
item = items.pop(0) item = items.pop(0)
# the first 32 chars of the first record of a group # the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5 # is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32] keyhash = item[0:32]
keyname = "unknown" keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual # unlike K4PC the keyhash is not used in generating entropy
# CryptProtectData Blob that represents that keys contents # entropy = SHA1(keyhash) + added_entropy
# "entropy" not used for K4Mac only K4PC # entropy = added_entropy
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation # has the ':' split char followed by the string representation
# of the number of records that follow # of the number of records that follow
# and make up the contents # and make up the contents
srcnt = decode(item[34:],charMap5) srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt) rcnt = int(srcnt)
# read and store in rcnt records of data # read and store in rcnt records of data
# that make up the contents value # that make up the contents value
edlst = [] edlst = []
for i in xrange(rcnt): for i in xrange(rcnt):
item = items.pop(0) item = items.pop(0)
edlst.append(item) edlst.append(item)
keyname = "unknown" keyname = "unknown"
for name in names: for name in names:
if encodeHash(name,charMap5) == keyhash: if encodeHash(name,testMap8) == keyhash:
keyname = name keyname = name
break break
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
# the charMap5 encoded contents data has had a length # the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved # of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from # to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing # working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding. # CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be: # The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3) # len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through) # (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5 # move first offsets chars to end to align for decode by testMap8
encdata = "".join(edlst) encdata = "".join(edlst)
contlen = len(encdata) contlen = len(encdata)
# now properly split and recombine # now properly split and recombine
# by moving noffset chars from the start of the # by moving noffset chars from the start of the
# string to the end of the string # string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1] noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset] pfx = encdata[0:noffset]
encdata = encdata[noffset:] encdata = encdata[noffset:]
encdata = encdata + pfx encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data # decode using testMap8 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5) encryptedValue = decode(encdata,testMap8)
cleartext = CryptUnprotectDataV2(encryptedValue) cleartext = cud.decrypt(encryptedValue)
# Debugging
# print keyname # print keyname
# print cleartext # print cleartext
# print cleartext.encode('hex')
# print
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1

View file

@ -3,7 +3,7 @@
from __future__ import with_statement from __future__ import with_statement
import sys, os import sys, os, re
from struct import pack, unpack, unpack_from from struct import pack, unpack, unpack_from
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
@ -11,9 +11,7 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
string_at, Structure, c_void_p, cast string_at, Structure, c_void_p, cast
import _winreg as winreg import _winreg as winreg
MAX_PATH = 255 MAX_PATH = 255
kernel32 = windll.kernel32 kernel32 = windll.kernel32
advapi32 = windll.advapi32 advapi32 = windll.advapi32
crypt32 = windll.crypt32 crypt32 = windll.crypt32
@ -33,9 +31,39 @@ def SHA1(message):
ctx.update(message) ctx.update(message)
return ctx.digest() return ctx.digest()
def SHA256(message):
ctx = hashlib.sha256()
ctx.update(message)
return ctx.digest()
# For K4PC 1.9.X
# need to use routines from openssl
# AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
# AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
# but the user may not have openssl installed or their version is a hacked one that was shipped
# with many ethernet cards that used software instead of hardware routines
# so using pure python implementations
from pbkdf2 import pbkdf2
import aescbc
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
key_iv = pbkdf2(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
aes=aescbc.AES_CBC(key, aescbc.noPadding() ,32)
cleartext = aes.decrypt(iv + encryptedData)
return cleartext
# simple primes table (<= n) calculator # simple primes table (<= n) calculator
def primes(n): def primes(n):
if n==2: return [2] if n==2: return [2]
elif n<2: return [] elif n<2: return []
s=range(3,n+1,2) s=range(3,n+1,2)
@ -59,6 +87,10 @@ def primes(n):
# Probably supposed to act as obfuscation # Probably supposed to act as obfuscation
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_" charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# New maps in K4PC 1.9.0
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -73,7 +105,7 @@ def encode(data, map):
result += map[Q] result += map[Q]
result += map[R] result += map[R]
return result return result
# Hash the bytes in data and then encode the digest with the characters in map # Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map): def encodeHash(data,map):
return encode(MD5(data),map) return encode(MD5(data),map)
@ -165,7 +197,8 @@ def CryptUnprotectData():
outdata = DataBlob() outdata = DataBlob()
if not _CryptUnprotectData(byref(indata), None, byref(entropy), if not _CryptUnprotectData(byref(indata), None, byref(entropy),
None, None, flags, byref(outdata)): None, None, flags, byref(outdata)):
raise DrmException("Failed to Unprotect Data") # raise DrmException("Failed to Unprotect Data")
return 'failed'
return string_at(outdata.pbData, outdata.cbData) return string_at(outdata.pbData, outdata.cbData)
return CryptUnprotectData return CryptUnprotectData
CryptUnprotectData = CryptUnprotectData() CryptUnprotectData = CryptUnprotectData()
@ -198,10 +231,17 @@ def getKindleInfoFiles(kInfoFiles):
else: else:
kInfoFiles.append(kinfopath) kInfoFiles.append(kinfopath)
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
if not os.path.isfile(kinfopath):
print('No K4PC 1.9.X .kinf files have not been found.')
else:
kInfoFiles.append(kinfopath)
return kInfoFiles return kInfoFiles
# determine type of kindle info provided and return a # determine type of kindle info provided and return a
# database of keynames and values # database of keynames and values
def getDBfromFile(kInfoFile): def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"] names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
@ -232,12 +272,97 @@ def getDBfromFile(kInfoFile):
DB = None DB = None
return DB return DB
# else newer style .kinf file if hdr == '/':
# else rainier-2-1-1 .kinf file
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
# the raw keyhash string is used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents)-largest prime number <= int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
noffset = contlen - primes(int(contlen/3))[-1]
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using Map5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# else newest .kinf2011 style .kinf file
# the .kinf file uses "/" to separate it into records # the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split # so remove the trailing "/" to make it easy to use split
data = data[:-1] # need to put back the first char read because it it part
# of the added entropy blob
data = hdr + data[:-1]
items = data.split('/') items = data.split('/')
# starts with and encoded and encrypted header blob
headerblob = items.pop(0)
encryptedValue = decode(headerblob, testMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces that form the added entropy
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
added_entropy = m.group(2) + m.group(4)
# loop through the item records until all are processed # loop through the item records until all are processed
while len(items) > 0: while len(items) > 0:
@ -248,11 +373,11 @@ def getDBfromFile(kInfoFile):
# is the MD5 hash of the key name encoded by charMap5 # is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32] keyhash = item[0:32]
# the raw keyhash string is also used to create entropy for the actual # the sha1 of raw keyhash string is used to create entropy along
# CryptProtectData Blob that represents that keys contents # with the added entropy provided above from the headerblob
entropy = SHA1(keyhash) entropy = SHA1(keyhash) + added_entropy
# the remainder of the first record when decoded with charMap5 # the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation # has the ':' split char followed by the string representation
# of the number of records that follow # of the number of records that follow
# and make up the contents # and make up the contents
@ -266,43 +391,39 @@ def getDBfromFile(kInfoFile):
item = items.pop(0) item = items.pop(0)
edlst.append(item) edlst.append(item)
# key names now use the new testMap8 encoding
keyname = "unknown" keyname = "unknown"
for name in names: for name in names:
if encodeHash(name,charMap5) == keyhash: if encodeHash(name,testMap8) == keyhash:
keyname = name keyname = name
break break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length # the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved # of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from # to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing # working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding. # CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be: # The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3) # len(contents)-largest prime number <= int(len(content)/3)
# (in other words split "about" 2/3rds of the way through) # (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5 # move first offsets chars to end to align for decode by testMap8
# by moving noffset chars from the start of the
# string to the end of the string
encdata = "".join(edlst) encdata = "".join(edlst)
contlen = len(encdata) contlen = len(encdata)
noffset = contlen - primes(int(contlen/3))[-1] noffset = contlen - primes(int(contlen/3))[-1]
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
pfx = encdata[0:noffset] pfx = encdata[0:noffset]
encdata = encdata[noffset:] encdata = encdata[noffset:]
encdata = encdata + pfx encdata = encdata + pfx
# decode using Map5 to get the CryptProtect Data # decode using new testMap8 to get the original CryptProtect Data
encryptedValue = decode(encdata,charMap5) encryptedValue = decode(encdata,testMap8)
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1) cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1
if cnt == 0: if cnt == 0:
DB = None DB = None
return DB return DB

View file

@ -62,7 +62,7 @@ def encode(data, map):
result += map[Q] result += map[Q]
result += map[R] result += map[R]
return result return result
# Hash the bytes in data and then encode the digest with the characters in map # Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map): def encodeHash(data,map):
return encode(MD5(data),map) return encode(MD5(data),map)
@ -78,11 +78,11 @@ def decode(data,map):
value = (((high * len(map)) ^ 0x80) & 0xFF) + low value = (((high * len(map)) ^ 0x80) & 0xFF) + low
result += pack("B",value) result += pack("B",value)
return result return result
# #
# PID generation routines # PID generation routines
# #
# Returns two bit at offset from a bit field # Returns two bit at offset from a bit field
def getTwoBitsFromBitField(bitField,offset): def getTwoBitsFromBitField(bitField,offset):
byteNumber = offset // 4 byteNumber = offset // 4
@ -91,10 +91,10 @@ def getTwoBitsFromBitField(bitField,offset):
# Returns the six bits at offset from a bit field # Returns the six bits at offset from a bit field
def getSixBitsFromBitField(bitField,offset): def getSixBitsFromBitField(bitField,offset):
offset *= 3 offset *= 3
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2) value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
return value return value
# 8 bits to six bits encoding from hash to generate PID string # 8 bits to six bits encoding from hash to generate PID string
def encodePID(hash): def encodePID(hash):
global charMap3 global charMap3
@ -121,8 +121,8 @@ def generatePidEncryptionTable() :
def generatePidSeed(table,dsn) : def generatePidSeed(table,dsn) :
value = 0 value = 0
for counter in range (0,4) : for counter in range (0,4) :
index = (ord(dsn[counter]) ^ value) &0xFF index = (ord(dsn[counter]) ^ value) &0xFF
value = (value >> 8) ^ table[index] value = (value >> 8) ^ table[index]
return value return value
# Generate the device PID # Generate the device PID
@ -141,7 +141,7 @@ def generateDevicePID(table,dsn,nbRoll):
return pidAscii return pidAscii
def crc32(s): def crc32(s):
return (~binascii.crc32(s,-1))&0xFFFFFFFF return (~binascii.crc32(s,-1))&0xFFFFFFFF
# convert from 8 digit PID to 10 digit PID with checksum # convert from 8 digit PID to 10 digit PID with checksum
def checksumPid(s): def checksumPid(s):
@ -204,10 +204,10 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
print(message) print(message)
kindleDatabase = None kindleDatabase = None
pass pass
if kindleDatabase == None : if kindleDatabase == None :
return pidlst return pidlst
try: try:
# Get the Mazama Random number # Get the Mazama Random number
MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"] MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"]
@ -217,7 +217,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
except KeyError: except KeyError:
print "Keys not found in " + kInfoFile print "Keys not found in " + kInfoFile
return pidlst return pidlst
# Get the ID string used # Get the ID string used
encodedIDString = encodeHash(GetIDString(),charMap1) encodedIDString = encodeHash(GetIDString(),charMap1)
@ -226,7 +226,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
# concat, hash and encode to calculate the DSN # concat, hash and encode to calculate the DSN
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1) DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
# Compute the device PID (for which I can tell, is used for nothing). # Compute the device PID (for which I can tell, is used for nothing).
table = generatePidEncryptionTable() table = generatePidEncryptionTable()
devicePID = generateDevicePID(table,DSN,4) devicePID = generateDevicePID(table,DSN,4)
@ -258,7 +258,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
def getPidList(md1, md2, k4, pids, serials, kInfoFiles): def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
pidlst = [] pidlst = []
if kInfoFiles is None: if kInfoFiles is None:
kInfoFiles = [] kInfoFiles = []
if k4: if k4:
kInfoFiles = getKindleInfoFiles(kInfoFiles) kInfoFiles = getKindleInfoFiles(kInfoFiles)
for infoFile in kInfoFiles: for infoFile in kInfoFiles:

View file

@ -27,8 +27,8 @@
# files reveals that a confusion has arisen because trailing data entries # files reveals that a confusion has arisen because trailing data entries
# are not encrypted, but it turns out that the multibyte entries # are not encrypted, but it turns out that the multibyte entries
# in utf8 file are encrypted. (Although neither kind gets compressed.) # in utf8 file are encrypted. (Although neither kind gets compressed.)
# This knowledge leads to a simplification of the test for the # This knowledge leads to a simplification of the test for the
# trailing data byte flags - version 5 and higher AND header size >= 0xE4. # trailing data byte flags - version 5 and higher AND header size >= 0xE4.
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files. # 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility. # 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
# 0.17 - added modifications to support its use as an imported python module # 0.17 - added modifications to support its use as an imported python module
@ -42,7 +42,7 @@
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file. # 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
# 0.21 - Added support for multiple pids # 0.21 - Added support for multiple pids
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface # 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
# 0.23 - fixed problem with older files with no EXTH section # 0.23 - fixed problem with older files with no EXTH section
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well # 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption # 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100% # 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
@ -54,8 +54,10 @@
# 0.30 - Modified interface slightly to work better with new calibre plugin style # 0.30 - Modified interface slightly to work better with new calibre plugin style
# 0.31 - The multibyte encrytion info is true for version 7 files too. # 0.31 - The multibyte encrytion info is true for version 7 files too.
# 0.32 - Added support for "Print Replica" Kindle ebooks # 0.32 - Added support for "Print Replica" Kindle ebooks
# 0.33 - Performance improvements for large files (concatenation)
# 0.34 - Performance improvements in decryption (libalfcrypto)
__version__ = '0.32' __version__ = '0.34'
import sys import sys
@ -72,6 +74,7 @@ sys.stdout=Unbuffered(sys.stdout)
import os import os
import struct import struct
import binascii import binascii
from alfcrypto import Pukall_Cipher
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -83,36 +86,37 @@ class DrmException(Exception):
# Implementation of Pukall Cipher 1 # Implementation of Pukall Cipher 1
def PC1(key, src, decryption=True): def PC1(key, src, decryption=True):
sum1 = 0; return Pukall_Cipher().PC1(key,src,decryption)
sum2 = 0; # sum1 = 0;
keyXorVal = 0; # sum2 = 0;
if len(key)!=16: # keyXorVal = 0;
print "Bad key length!" # if len(key)!=16:
return None # print "Bad key length!"
wkey = [] # return None
for i in xrange(8): # wkey = []
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1])) # for i in xrange(8):
dst = "" # wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
for i in xrange(len(src)): # dst = ""
temp1 = 0; # for i in xrange(len(src)):
byteXorVal = 0; # temp1 = 0;
for j in xrange(8): # byteXorVal = 0;
temp1 ^= wkey[j] # for j in xrange(8):
sum2 = (sum2+j)*20021 + sum1 # temp1 ^= wkey[j]
sum1 = (temp1*346)&0xFFFF # sum2 = (sum2+j)*20021 + sum1
sum2 = (sum2+sum1)&0xFFFF # sum1 = (temp1*346)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF # sum2 = (sum2+sum1)&0xFFFF
byteXorVal ^= temp1 ^ sum2 # temp1 = (temp1*20021+1)&0xFFFF
curByte = ord(src[i]) # byteXorVal ^= temp1 ^ sum2
if not decryption: # curByte = ord(src[i])
keyXorVal = curByte * 257; # if not decryption:
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF # keyXorVal = curByte * 257;
if decryption: # curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
keyXorVal = curByte * 257; # if decryption:
for j in xrange(8): # keyXorVal = curByte * 257;
wkey[j] ^= keyXorVal; # for j in xrange(8):
dst+=chr(curByte) # wkey[j] ^= keyXorVal;
return dst # dst+=chr(curByte)
# return dst
def checksumPid(s): def checksumPid(s):
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789" letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
@ -236,7 +240,7 @@ class MobiBook:
self.meta_array = {} self.meta_array = {}
pass pass
self.print_replica = False self.print_replica = False
def getBookTitle(self): def getBookTitle(self):
codec_map = { codec_map = {
1252 : 'windows-1252', 1252 : 'windows-1252',
@ -319,7 +323,7 @@ class MobiBook:
def getMobiFile(self, outpath): def getMobiFile(self, outpath):
file(outpath,'wb').write(self.mobi_data) file(outpath,'wb').write(self.mobi_data)
def getPrintReplica(self): def getPrintReplica(self):
return self.print_replica return self.print_replica
@ -355,9 +359,9 @@ class MobiBook:
if self.magic == 'TEXtREAd': if self.magic == 'TEXtREAd':
bookkey_data = self.sect[0x0E:0x0E+16] bookkey_data = self.sect[0x0E:0x0E+16]
elif self.mobi_version < 0: elif self.mobi_version < 0:
bookkey_data = self.sect[0x90:0x90+16] bookkey_data = self.sect[0x90:0x90+16]
else: else:
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32] bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
pid = "00000000" pid = "00000000"
found_key = PC1(t1_keyvec, bookkey_data) found_key = PC1(t1_keyvec, bookkey_data)
else : else :
@ -372,7 +376,7 @@ class MobiBook:
self.patchSection(0, "\0" * drm_size, drm_ptr) self.patchSection(0, "\0" * drm_size, drm_ptr)
# kill the drm pointers # kill the drm pointers
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8) self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
if pid=="00000000": if pid=="00000000":
print "File has default encryption, no specific PID." print "File has default encryption, no specific PID."
else: else:
@ -383,7 +387,8 @@ class MobiBook:
# decrypt sections # decrypt sections
print "Decrypting. Please wait . . .", print "Decrypting. Please wait . . .",
self.mobi_data = self.data_file[:self.sections[1][0]] mobidataList = []
mobidataList.append(self.data_file[:self.sections[1][0]])
for i in xrange(1, self.records+1): for i in xrange(1, self.records+1):
data = self.loadSection(i) data = self.loadSection(i)
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags) extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
@ -393,11 +398,12 @@ class MobiBook:
decoded_data = PC1(found_key, data[0:len(data) - extra_size]) decoded_data = PC1(found_key, data[0:len(data) - extra_size])
if i==1: if i==1:
self.print_replica = (decoded_data[0:4] == '%MOP') self.print_replica = (decoded_data[0:4] == '%MOP')
self.mobi_data += decoded_data mobidataList.append(decoded_data)
if extra_size > 0: if extra_size > 0:
self.mobi_data += data[-extra_size:] mobidataList.append(data[-extra_size:])
if self.num_sections > self.records+1: if self.num_sections > self.records+1:
self.mobi_data += self.data_file[self.sections[self.records+1][0]:] mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
self.mobi_data = "".join(mobidataList)
print "done" print "done"
return return

View file

@ -0,0 +1,444 @@
#!/usr/bin/python
#
# This is a python script. You need a Python interpreter to run it.
# For example, ActiveState Python, which exists for windows.
#
# Changelog
# 0.01 - Initial version
# 0.02 - Huffdic compressed books were not properly decrypted
# 0.03 - Wasn't checking MOBI header length
# 0.04 - Wasn't sanity checking size of data record
# 0.05 - It seems that the extra data flags take two bytes not four
# 0.06 - And that low bit does mean something after all :-)
# 0.07 - The extra data flags aren't present in MOBI header < 0xE8 in size
# 0.08 - ...and also not in Mobi header version < 6
# 0.09 - ...but they are there with Mobi header version 6, header size 0xE4!
# 0.10 - Outputs unencrypted files as-is, so that when run as a Calibre
# import filter it works when importing unencrypted files.
# Also now handles encrypted files that don't need a specific PID.
# 0.11 - use autoflushed stdout and proper return values
# 0.12 - Fix for problems with metadata import as Calibre plugin, report errors
# 0.13 - Formatting fixes: retabbed file, removed trailing whitespace
# and extra blank lines, converted CR/LF pairs at ends of each line,
# and other cosmetic fixes.
# 0.14 - Working out when the extra data flags are present has been problematic
# Versions 7 through 9 have tried to tweak the conditions, but have been
# only partially successful. Closer examination of lots of sample
# files reveals that a confusion has arisen because trailing data entries
# are not encrypted, but it turns out that the multibyte entries
# in utf8 file are encrypted. (Although neither kind gets compressed.)
# This knowledge leads to a simplification of the test for the
# trailing data byte flags - version 5 and higher AND header size >= 0xE4.
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
# 0.17 - added modifications to support its use as an imported python module
# both inside calibre and also in other places (ie K4DeDRM tools)
# 0.17a- disabled the standalone plugin feature since a plugin can not import
# a plugin
# 0.18 - It seems that multibyte entries aren't encrypted in a v7 file...
# Removed the disabled Calibre plug-in code
# Permit use of 8-digit PIDs
# 0.19 - It seems that multibyte entries aren't encrypted in a v6 file either.
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
# 0.21 - Added support for multiple pids
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
# 0.23 - fixed problem with older files with no EXTH section
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
# 0.27 - Correct pid metadata token generation to match that used by skindle (Thank You Bart!)
# 0.28 - slight additional changes to metadata token generation (None -> '')
# 0.29 - It seems that the ideas about when multibyte trailing characters were
# included in the encryption were wrong. They are for DOC compressed
# files, but they are not for HUFF/CDIC compress files!
# 0.30 - Modified interface slightly to work better with new calibre plugin style
# 0.31 - The multibyte encrytion info is true for version 7 files too.
# 0.32 - Added support for "Print Replica" Kindle ebooks
__version__ = '0.32'
import sys
class Unbuffered:
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
sys.stdout=Unbuffered(sys.stdout)
import os
import struct
import binascii
class DrmException(Exception):
pass
#
# MobiBook Utility Routines
#
# Implementation of Pukall Cipher 1
def PC1(key, src, decryption=True):
sum1 = 0;
sum2 = 0;
keyXorVal = 0;
if len(key)!=16:
print "Bad key length!"
return None
wkey = []
for i in xrange(8):
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
def checksumPid(s):
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
crc = (~binascii.crc32(s,-1))&0xFFFFFFFF
crc = crc ^ (crc >> 16)
res = s
l = len(letters)
for i in (0,1):
b = crc & 0xff
pos = (b // l) ^ (b % l)
res += letters[pos%l]
crc >>= 8
return res
def getSizeOfTrailingDataEntries(ptr, size, flags):
def getSizeOfTrailingDataEntry(ptr, size):
bitpos, result = 0, 0
if size <= 0:
return result
while True:
v = ord(ptr[size-1])
result |= (v & 0x7F) << bitpos
bitpos += 7
size -= 1
if (v & 0x80) != 0 or (bitpos >= 28) or (size == 0):
return result
num = 0
testflags = flags >> 1
while testflags:
if testflags & 1:
num += getSizeOfTrailingDataEntry(ptr, size - num)
testflags >>= 1
# Check the low bit to see if there's multibyte data present.
# if multibyte data is included in the encryped data, we'll
# have already cleared this flag.
if flags & 1:
num += (ord(ptr[size - num - 1]) & 0x3) + 1
return num
class MobiBook:
def loadSection(self, section):
if (section + 1 == self.num_sections):
endoff = len(self.data_file)
else:
endoff = self.sections[section + 1][0]
off = self.sections[section][0]
return self.data_file[off:endoff]
def __init__(self, infile):
print ('MobiDeDrm v%(__version__)s. '
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
# initial sanity check on file
self.data_file = file(infile, 'rb').read()
self.mobi_data = ''
self.header = self.data_file[0:78]
if self.header[0x3C:0x3C+8] != 'BOOKMOBI' and self.header[0x3C:0x3C+8] != 'TEXtREAd':
raise DrmException("invalid file format")
self.magic = self.header[0x3C:0x3C+8]
self.crypto_type = -1
# build up section offset and flag info
self.num_sections, = struct.unpack('>H', self.header[76:78])
self.sections = []
for i in xrange(self.num_sections):
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data_file[78+i*8:78+i*8+8])
flags, val = a1, a2<<16|a3<<8|a4
self.sections.append( (offset, flags, val) )
# parse information from section 0
self.sect = self.loadSection(0)
self.records, = struct.unpack('>H', self.sect[0x8:0x8+2])
self.compression, = struct.unpack('>H', self.sect[0x0:0x0+2])
if self.magic == 'TEXtREAd':
print "Book has format: ", self.magic
self.extra_data_flags = 0
self.mobi_length = 0
self.mobi_version = -1
self.meta_array = {}
return
self.mobi_length, = struct.unpack('>L',self.sect[0x14:0x18])
self.mobi_codepage, = struct.unpack('>L',self.sect[0x1c:0x20])
self.mobi_version, = struct.unpack('>L',self.sect[0x68:0x6C])
print "MOBI header version = %d, length = %d" %(self.mobi_version, self.mobi_length)
self.extra_data_flags = 0
if (self.mobi_length >= 0xE4) and (self.mobi_version >= 5):
self.extra_data_flags, = struct.unpack('>H', self.sect[0xF2:0xF4])
print "Extra Data Flags = %d" % self.extra_data_flags
if (self.compression != 17480):
# multibyte utf8 data is included in the encryption for PalmDoc compression
# so clear that byte so that we leave it to be decrypted.
self.extra_data_flags &= 0xFFFE
# if exth region exists parse it for metadata array
self.meta_array = {}
try:
exth_flag, = struct.unpack('>L', self.sect[0x80:0x84])
exth = 'NONE'
if exth_flag & 0x40:
exth = self.sect[16 + self.mobi_length:]
if (len(exth) >= 4) and (exth[:4] == 'EXTH'):
nitems, = struct.unpack('>I', exth[8:12])
pos = 12
for i in xrange(nitems):
type, size = struct.unpack('>II', exth[pos: pos + 8])
content = exth[pos + 8: pos + size]
self.meta_array[type] = content
# reset the text to speech flag and clipping limit, if present
if type == 401 and size == 9:
# set clipping limit to 100%
self.patchSection(0, "\144", 16 + self.mobi_length + pos + 8)
elif type == 404 and size == 9:
# make sure text to speech is enabled
self.patchSection(0, "\0", 16 + self.mobi_length + pos + 8)
# print type, size, content, content.encode('hex')
pos += size
except:
self.meta_array = {}
pass
self.print_replica = False
def getBookTitle(self):
codec_map = {
1252 : 'windows-1252',
65001 : 'utf-8',
}
title = ''
if 503 in self.meta_array:
title = self.meta_array[503]
else :
toff, tlen = struct.unpack('>II', self.sect[0x54:0x5c])
tend = toff + tlen
title = self.sect[toff:tend]
if title == '':
title = self.header[:32]
title = title.split("\0")[0]
codec = 'windows-1252'
if self.mobi_codepage in codec_map.keys():
codec = codec_map[self.mobi_codepage]
return unicode(title, codec).encode('utf-8')
def getPIDMetaInfo(self):
rec209 = ''
token = ''
if 209 in self.meta_array:
rec209 = self.meta_array[209]
data = rec209
# The 209 data comes in five byte groups. Interpret the last four bytes
# of each group as a big endian unsigned integer to get a key value
# if that key exists in the meta_array, append its contents to the token
for i in xrange(0,len(data),5):
val, = struct.unpack('>I',data[i+1:i+5])
sval = self.meta_array.get(val,'')
token += sval
return rec209, token
def patch(self, off, new):
self.data_file = self.data_file[:off] + new + self.data_file[off+len(new):]
def patchSection(self, section, new, in_off = 0):
if (section + 1 == self.num_sections):
endoff = len(self.data_file)
else:
endoff = self.sections[section + 1][0]
off = self.sections[section][0]
assert off + in_off + len(new) <= endoff
self.patch(off + in_off, new)
def parseDRM(self, data, count, pidlist):
found_key = None
keyvec1 = "\x72\x38\x33\xB0\xB4\xF2\xE3\xCA\xDF\x09\x01\xD6\xE2\xE0\x3F\x96"
for pid in pidlist:
bigpid = pid.ljust(16,'\0')
temp_key = PC1(keyvec1, bigpid, False)
temp_key_sum = sum(map(ord,temp_key)) & 0xff
found_key = None
for i in xrange(count):
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
if cksum == temp_key_sum:
cookie = PC1(temp_key, cookie)
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
if verification == ver and (flags & 0x1F) == 1:
found_key = finalkey
break
if found_key != None:
break
if not found_key:
# Then try the default encoding that doesn't require a PID
pid = "00000000"
temp_key = keyvec1
temp_key_sum = sum(map(ord,temp_key)) & 0xff
for i in xrange(count):
verification, size, type, cksum, cookie = struct.unpack('>LLLBxxx32s', data[i*0x30:i*0x30+0x30])
if cksum == temp_key_sum:
cookie = PC1(temp_key, cookie)
ver,flags,finalkey,expiry,expiry2 = struct.unpack('>LL16sLL', cookie)
if verification == ver:
found_key = finalkey
break
return [found_key,pid]
def getMobiFile(self, outpath):
file(outpath,'wb').write(self.mobi_data)
def getPrintReplica(self):
return self.print_replica
def processBook(self, pidlist):
crypto_type, = struct.unpack('>H', self.sect[0xC:0xC+2])
print 'Crypto Type is: ', crypto_type
self.crypto_type = crypto_type
if crypto_type == 0:
print "This book is not encrypted."
# we must still check for Print Replica
self.print_replica = (self.loadSection(1)[0:4] == '%MOP')
self.mobi_data = self.data_file
return
if crypto_type != 2 and crypto_type != 1:
raise DrmException("Cannot decode unknown Mobipocket encryption type %d" % crypto_type)
if 406 in self.meta_array:
data406 = self.meta_array[406]
val406, = struct.unpack('>Q',data406)
if val406 != 0:
raise DrmException("Cannot decode library or rented ebooks.")
goodpids = []
for pid in pidlist:
if len(pid)==10:
if checksumPid(pid[0:-2]) != pid:
print "Warning: PID " + pid + " has incorrect checksum, should have been "+checksumPid(pid[0:-2])
goodpids.append(pid[0:-2])
elif len(pid)==8:
goodpids.append(pid)
if self.crypto_type == 1:
t1_keyvec = "QDCVEPMU675RUBSZ"
if self.magic == 'TEXtREAd':
bookkey_data = self.sect[0x0E:0x0E+16]
elif self.mobi_version < 0:
bookkey_data = self.sect[0x90:0x90+16]
else:
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
pid = "00000000"
found_key = PC1(t1_keyvec, bookkey_data)
else :
# calculate the keys
drm_ptr, drm_count, drm_size, drm_flags = struct.unpack('>LLLL', self.sect[0xA8:0xA8+16])
if drm_count == 0:
raise DrmException("Not yet initialised with PID. Must be opened with Mobipocket Reader first.")
found_key, pid = self.parseDRM(self.sect[drm_ptr:drm_ptr+drm_size], drm_count, goodpids)
if not found_key:
raise DrmException("No key found. Most likely the correct PID has not been given.")
# kill the drm keys
self.patchSection(0, "\0" * drm_size, drm_ptr)
# kill the drm pointers
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
if pid=="00000000":
print "File has default encryption, no specific PID."
else:
print "File is encoded with PID "+checksumPid(pid)+"."
# clear the crypto type
self.patchSection(0, "\0" * 2, 0xC)
# decrypt sections
print "Decrypting. Please wait . . .",
self.mobi_data = self.data_file[:self.sections[1][0]]
for i in xrange(1, self.records+1):
data = self.loadSection(i)
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
if i%100 == 0:
print ".",
# print "record %d, extra_size %d" %(i,extra_size)
decoded_data = PC1(found_key, data[0:len(data) - extra_size])
if i==1:
self.print_replica = (decoded_data[0:4] == '%MOP')
self.mobi_data += decoded_data
if extra_size > 0:
self.mobi_data += data[-extra_size:]
if self.num_sections > self.records+1:
self.mobi_data += self.data_file[self.sections[self.records+1][0]:]
print "done"
return
def getUnencryptedBook(infile,pid):
if not os.path.isfile(infile):
raise DrmException('Input File Not Found')
book = MobiBook(infile)
book.processBook([pid])
return book.mobi_data
def getUnencryptedBookWithList(infile,pidlist):
if not os.path.isfile(infile):
raise DrmException('Input File Not Found')
book = MobiBook(infile)
book.processBook(pidlist)
return book.mobi_data
def main(argv=sys.argv):
print ('MobiDeDrm v%(__version__)s. '
'Copyright 2008-2011 The Dark Reverser et al.' % globals())
if len(argv)<3 or len(argv)>4:
print "Removes protection from Kindle/Mobipocket and Kindle/Print Replica ebooks"
print "Usage:"
print " %s <infile> <outfile> [<Comma separated list of PIDs to try>]" % sys.argv[0]
return 1
else:
infile = argv[1]
outfile = argv[2]
if len(argv) is 4:
pidlist = argv[3].split(',')
else:
pidlist = {}
try:
stripped_file = getUnencryptedBookWithList(infile, pidlist)
file(outfile, 'wb').write(stripped_file)
except DrmException, e:
print "Error: %s" % e
return 1
return 0
if __name__ == "__main__":
sys.exit(main())

View file

@ -18,7 +18,7 @@ def load_libcrypto():
return None return None
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
# typedef struct DES_ks # typedef struct DES_ks
# { # {
# union # union
@ -30,7 +30,7 @@ def load_libcrypto():
# } ks[16]; # } ks[16];
# } DES_key_schedule; # } DES_key_schedule;
# just create a big enough place to hold everything # just create a big enough place to hold everything
# it will have alignment of structure so we should be okay (16 byte aligned?) # it will have alignment of structure so we should be okay (16 byte aligned?)
class DES_KEY_SCHEDULE(Structure): class DES_KEY_SCHEDULE(Structure):
_fields_ = [('DES_cblock1', c_char * 16), _fields_ = [('DES_cblock1', c_char * 16),
@ -61,7 +61,7 @@ def load_libcrypto():
DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p]) DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p])
DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int]) DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int])
class DES(object): class DES(object):
def __init__(self, key): def __init__(self, key):
if len(key) != 8 : if len(key) != 8 :
@ -87,4 +87,3 @@ def load_libcrypto():
return ''.join(result) return ''.join(result)
return DES return DES

View file

@ -0,0 +1,68 @@
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
# for details. Basically, it derives a key from a password and salt.
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
# This code may be freely used and modified for any purpose.
# Revision history
# v0.1 October 2004 - Initial release
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
# v0.3 "" the correct digest_size rather than always 20
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
import sys
import hmac
from struct import pack
try:
# only in python 2.5
import hashlib
sha = hashlib.sha1
md5 = hashlib.md5
sha256 = hashlib.sha256
except ImportError: # pragma: NO COVERAGE
# fallback
import sha
import md5
# this is what you want to call.
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
try:
# depending whether the hashfn is from hashlib or sha/md5
digest_size = hashfn().digest_size
except TypeError: # pragma: NO COVERAGE
digest_size = hashfn.digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( password, None, hashfn )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, itercount, i )
return T[0: keylen]
def xorstr( a, b ):
if len(a) != len(b):
raise ValueError("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
# Helper as per the spec. h is a hmac which has been created seeded with the
# password, it will be copy()ed and not modified.
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T

View file

@ -6,6 +6,7 @@ import csv
import sys import sys
import os import os
import getopt import getopt
import re
from struct import pack from struct import pack
from struct import unpack from struct import unpack
@ -43,8 +44,8 @@ class DocParser(object):
'pos-right' : 'text-align: right;', 'pos-right' : 'text-align: right;',
'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;', 'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
} }
# find tag if within pos to end inclusive # find tag if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -59,10 +60,10 @@ class DocParser(object):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -82,12 +83,19 @@ class DocParser(object):
return startpos return startpos
# returns a vector of integers for the tagpath # returns a vector of integers for the tagpath
def getData(self, tagpath, pos, end): def getData(self, tagpath, pos, end, clean=False):
if clean:
digits_only = re.compile(r'''([0-9]+)''')
argres=[] argres=[]
(foundat, argt) = self.findinDoc(tagpath, pos, end) (foundat, argt) = self.findinDoc(tagpath, pos, end)
if (argt != None) and (len(argt) > 0) : if (argt != None) and (len(argt) > 0) :
argList = argt.split('|') argList = argt.split('|')
argres = [ int(strval) for strval in argList] for strval in argList:
if clean:
m = re.search(digits_only, strval)
if m != None:
strval = m.group()
argres.append(int(strval))
return argres return argres
def process(self): def process(self):
@ -112,7 +120,7 @@ class DocParser(object):
(pos, tag) = self.findinDoc('style._tag',start,end) (pos, tag) = self.findinDoc('style._tag',start,end)
if tag == None : if tag == None :
(pos, tag) = self.findinDoc('style.type',start,end) (pos, tag) = self.findinDoc('style.type',start,end)
# Is this something we know how to convert to css # Is this something we know how to convert to css
if tag in self.stags : if tag in self.stags :
@ -121,7 +129,7 @@ class DocParser(object):
if sclass != None: if sclass != None:
sclass = sclass.replace(' ','-') sclass = sclass.replace(' ','-')
sclass = '.cl-' + sclass.lower() sclass = '.cl-' + sclass.lower()
else : else :
sclass = '' sclass = ''
# check for any "after class" specifiers # check for any "after class" specifiers
@ -129,7 +137,7 @@ class DocParser(object):
if aftclass != None: if aftclass != None:
aftclass = aftclass.replace(' ','-') aftclass = aftclass.replace(' ','-')
aftclass = '.cl-' + aftclass.lower() aftclass = '.cl-' + aftclass.lower()
else : else :
aftclass = '' aftclass = ''
cssargs = {} cssargs = {}
@ -140,7 +148,7 @@ class DocParser(object):
(pos2, val) = self.findinDoc('style.rule.value', start, end) (pos2, val) = self.findinDoc('style.rule.value', start, end)
if attr == None : break if attr == None : break
if (attr == 'display') or (attr == 'pos') or (attr == 'align'): if (attr == 'display') or (attr == 'pos') or (attr == 'align'):
# handle text based attributess # handle text based attributess
attr = attr + '-' + val attr = attr + '-' + val
@ -168,7 +176,7 @@ class DocParser(object):
if aftclass != "" : keep = False if aftclass != "" : keep = False
if keep : if keep :
# make sure line-space does not go below 100% or above 300% since # make sure line-space does not go below 100% or above 300% since
# it can be wacky in some styles # it can be wacky in some styles
if 'line-space' in cssargs: if 'line-space' in cssargs:
seg = cssargs['line-space'][0] seg = cssargs['line-space'][0]
@ -178,7 +186,7 @@ class DocParser(object):
del cssargs['line-space'] del cssargs['line-space']
cssargs['line-space'] = (self.attr_val_map['line-space'], val) cssargs['line-space'] = (self.attr_val_map['line-space'], val)
# handle modifications for css style hanging indents # handle modifications for css style hanging indents
if 'hang' in cssargs: if 'hang' in cssargs:
hseg = cssargs['hang'][0] hseg = cssargs['hang'][0]
@ -211,7 +219,7 @@ class DocParser(object):
if sclass != '' : if sclass != '' :
classlst += sclass + '\n' classlst += sclass + '\n'
# handle special case of paragraph class used inside chapter heading # handle special case of paragraph class used inside chapter heading
# and non-chapter headings # and non-chapter headings
if sclass != '' : if sclass != '' :
@ -232,7 +240,7 @@ class DocParser(object):
if cssline != ' { }': if cssline != ' { }':
csspage += self.stags[tag] + cssline + '\n' csspage += self.stags[tag] + cssline + '\n'
return csspage, classlst return csspage, classlst
@ -251,5 +259,5 @@ def convert2CSS(flatxml, fontsize, ph, pw):
def getpageIDMap(flatxml): def getpageIDMap(flatxml):
dp = DocParser(flatxml, 0, 0, 0) dp = DocParser(flatxml, 0, 0, 0)
pageidnumbers = dp.getData('info.original.pid', 0, -1) pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
return pageidnumbers return pageidnumbers

View file

@ -52,7 +52,7 @@ class Process(object):
self.__stdout_thread = threading.Thread( self.__stdout_thread = threading.Thread(
name="stdout-thread", name="stdout-thread",
target=self.__reader, args=(self.__collected_outdata, target=self.__reader, args=(self.__collected_outdata,
self.__process.stdout)) self.__process.stdout))
self.__stdout_thread.setDaemon(True) self.__stdout_thread.setDaemon(True)
self.__stdout_thread.start() self.__stdout_thread.start()
@ -60,7 +60,7 @@ class Process(object):
self.__stderr_thread = threading.Thread( self.__stderr_thread = threading.Thread(
name="stderr-thread", name="stderr-thread",
target=self.__reader, args=(self.__collected_errdata, target=self.__reader, args=(self.__collected_errdata,
self.__process.stderr)) self.__process.stderr))
self.__stderr_thread.setDaemon(True) self.__stderr_thread.setDaemon(True)
self.__stderr_thread.start() self.__stderr_thread.start()
@ -146,4 +146,3 @@ class Process(object):
self.__quit = True self.__quit = True
self.__inputsem.release() self.__inputsem.release()
self.__lock.release() self.__lock.release()

View file

@ -20,11 +20,12 @@ import os, csv, getopt
import zlib, zipfile, tempfile, shutil import zlib, zipfile, tempfile, shutil
from struct import pack from struct import pack
from struct import unpack from struct import unpack
from alfcrypto import Topaz_Cipher
class TpzDRMError(Exception): class TpzDRMError(Exception):
pass pass
# local support routines # local support routines
if inCalibre: if inCalibre:
from calibre_plugins.k4mobidedrm import kgenpids from calibre_plugins.k4mobidedrm import kgenpids
@ -58,22 +59,22 @@ def bookReadEncodedNumber(fo):
flag = False flag = False
data = ord(fo.read(1)) data = ord(fo.read(1))
if data == 0xFF: if data == 0xFF:
flag = True flag = True
data = ord(fo.read(1)) data = ord(fo.read(1))
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
data = ord(fo.read(1)) data = ord(fo.read(1))
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# Get a length prefixed string from file # Get a length prefixed string from file
def bookReadString(fo): def bookReadString(fo):
stringLength = bookReadEncodedNumber(fo) stringLength = bookReadEncodedNumber(fo)
return unpack(str(stringLength)+"s",fo.read(stringLength))[0] return unpack(str(stringLength)+"s",fo.read(stringLength))[0]
# #
# crypto routines # crypto routines
@ -81,25 +82,28 @@ def bookReadString(fo):
# Context initialisation for the Topaz Crypto # Context initialisation for the Topaz Crypto
def topazCryptoInit(key): def topazCryptoInit(key):
ctx1 = 0x0CAFFE19E return Topaz_Cipher().ctx_init(key)
for keyChar in key:
keyByte = ord(keyChar) # ctx1 = 0x0CAFFE19E
ctx2 = ctx1 # for keyChar in key:
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF ) # keyByte = ord(keyChar)
return [ctx1,ctx2] # ctx2 = ctx1
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
# return [ctx1,ctx2]
# decrypt data with the context prepared by topazCryptoInit() # decrypt data with the context prepared by topazCryptoInit()
def topazCryptoDecrypt(data, ctx): def topazCryptoDecrypt(data, ctx):
ctx1 = ctx[0] return Topaz_Cipher().decrypt(data, ctx)
ctx2 = ctx[1] # ctx1 = ctx[0]
plainText = "" # ctx2 = ctx[1]
for dataChar in data: # plainText = ""
dataByte = ord(dataChar) # for dataChar in data:
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF # dataByte = ord(dataChar)
ctx2 = ctx1 # m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF) # ctx2 = ctx1
plainText += chr(m) # ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
return plainText # plainText += chr(m)
# return plainText
# Decrypt data with the PID # Decrypt data with the PID
def decryptRecord(data,PID): def decryptRecord(data,PID):
@ -153,7 +157,7 @@ class TopazBook:
def parseTopazHeaders(self): def parseTopazHeaders(self):
def bookReadHeaderRecordData(): def bookReadHeaderRecordData():
# Read and return the data of one header record at the current book file position # Read and return the data of one header record at the current book file position
# [[offset,decompressedLength,compressedLength],...] # [[offset,decompressedLength,compressedLength],...]
nbValues = bookReadEncodedNumber(self.fo) nbValues = bookReadEncodedNumber(self.fo)
values = [] values = []
@ -213,11 +217,11 @@ class TopazBook:
self.bookKey = key self.bookKey = key
def getBookPayloadRecord(self, name, index): def getBookPayloadRecord(self, name, index):
# Get a record in the book payload, given its name and index. # Get a record in the book payload, given its name and index.
# decrypted and decompressed if necessary # decrypted and decompressed if necessary
encrypted = False encrypted = False
compressed = False compressed = False
try: try:
recordOffset = self.bookHeaderRecords[name][index][0] recordOffset = self.bookHeaderRecords[name][index][0]
except: except:
raise TpzDRMError("Parse Error : Invalid Record, record not found") raise TpzDRMError("Parse Error : Invalid Record, record not found")
@ -268,8 +272,8 @@ class TopazBook:
rv = genbook.generateBook(self.outdir, raw, fixedimage) rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0: if rv == 0:
print "\nBook Successfully generated" print "\nBook Successfully generated"
return rv return rv
# try each pid to decode the file # try each pid to decode the file
bookKey = None bookKey = None
for pid in pidlst: for pid in pidlst:
@ -297,7 +301,7 @@ class TopazBook:
rv = genbook.generateBook(self.outdir, raw, fixedimage) rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0: if rv == 0:
print "\nBook Successfully generated" print "\nBook Successfully generated"
return rv return rv
def createBookDirectory(self): def createBookDirectory(self):
outdir = self.outdir outdir = self.outdir
@ -378,7 +382,7 @@ def usage(progname):
print "Removes DRM protection from Topaz ebooks and extract the contents" print "Removes DRM protection from Topaz ebooks and extract the contents"
print "Usage:" print "Usage:"
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
# Main # Main
def main(argv=sys.argv): def main(argv=sys.argv):
@ -387,7 +391,7 @@ def main(argv=sys.argv):
pids = [] pids = []
serials = [] serials = []
kInfoFiles = [] kInfoFiles = []
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
except getopt.GetoptError, err: except getopt.GetoptError, err:
@ -397,7 +401,7 @@ def main(argv=sys.argv):
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
return 1 return 1
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
@ -429,7 +433,7 @@ def main(argv=sys.argv):
title = tb.getBookTitle() title = tb.getBookTitle()
print "Processing Book: ", title print "Processing Book: ", title
keysRecord, keysRecordRecord = tb.getPIDMetaInfo() keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
try: try:
print "Decrypting Book" print "Decrypting Book"
@ -461,9 +465,8 @@ def main(argv=sys.argv):
return 1 return 1
return 0 return 0
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -30,8 +30,8 @@ class fixZip:
self.inzip = zipfile.ZipFile(zinput,'r') self.inzip = zipfile.ZipFile(zinput,'r')
self.outzip = zipfile.ZipFile(zoutput,'w') self.outzip = zipfile.ZipFile(zoutput,'w')
# open the input zip for reading only as a raw file # open the input zip for reading only as a raw file
self.bzf = file(zinput,'rb') self.bzf = file(zinput,'rb')
def getlocalname(self, zi): def getlocalname(self, zi):
local_header_offset = zi.header_offset local_header_offset = zi.header_offset
self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET) self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET)
@ -86,7 +86,7 @@ class fixZip:
return data return data
def fix(self): def fix(self):
# get the zipinfo for each member of the input archive # get the zipinfo for each member of the input archive
@ -103,7 +103,7 @@ class fixZip:
if zinfo.filename != "mimetype" or self.ztype == '.zip': if zinfo.filename != "mimetype" or self.ztype == '.zip':
data = None data = None
nzinfo = zinfo nzinfo = zinfo
try: try:
data = self.inzip.read(zinfo.filename) data = self.inzip.read(zinfo.filename)
except zipfile.BadZipfile or zipfile.error: except zipfile.BadZipfile or zipfile.error:
local_name = self.getlocalname(zinfo) local_name = self.getlocalname(zinfo)
@ -126,7 +126,7 @@ def usage():
inputzip is the source zipfile to fix inputzip is the source zipfile to fix
outputzip is the fixed zip archive outputzip is the fixed zip archive
""" """
def repairBook(infile, outfile): def repairBook(infile, outfile):
if not os.path.exists(infile): if not os.path.exists(infile):
@ -152,5 +152,3 @@ def main(argv=sys.argv):
if __name__ == '__main__' : if __name__ == '__main__' :
sys.exit(main()) sys.exit(main())

View file

@ -0,0 +1,48 @@
{\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360
{\fonttbl\f0\fswiss\fcharset0 Helvetica;}
{\colortbl;\red255\green255\blue255;}
\paperw11900\paperh16840\margl1440\margr1440\vieww10320\viewh9840\viewkind0
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
\f0\b\fs24 \cf0 ReadMe_DeDRM_X.X
\b0 \
\
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\qj\pardirnatural
\cf0 DeDRM_X.X is an AppleScript droplet that allows users to drag and drop ebooks or folders of ebooks onto the DeDRM droplet to have the DRM removed. It repackages the all the "tools" DeDRM python software in one easy to use program that remembers preferences and settings.\
\
It should work without manual configuration with Kindle for Mac ebooks and Adobe Adept epub and pdf ebooks.\
\
To remove the DRM from standalone Kindle ebooks, eReader pdb ebooks, Barnes and Noble epubs, and Mobipocket ebooks requires the user to double-click the DeDRM droplet and set some additional Preferences including:\
\
Kindle 16 digit Serial Number\
Barnes & Noble key files (bnepubkey.b64)\
eReader Social DRM: (Name:Last 8 digits of CC number)\
MobiPocket, Kindle for iPhone/iPad/iPodTouch 10 digit PID\
Location for DRM-free ebooks.\
\
Once these preferences have been set, the user can simply drag and drop ebooks onto the DeDRM droplet to remove the DRM.\
\
This program requires Mac OS X 10.5, 10.5 or 10.7 (Leopard, Snow Leopard or Lion) \
\pard\tx566\tx1133\tx1700\tx2267\tx2834\tx3401\tx3968\tx4535\tx5102\tx5669\tx6236\tx6803\ql\qnatural\pardirnatural
\cf0 \
\
\
\b Installation\
\b0 \
1. From tools_vX.X\\DeDRM_Applications\\, double click on DeDRM_X.X.zip to extract its contents. \
\
2. Move the resulting DeDRM X.X.app AppleScript droplet to whereever you keep you other applications. (Typically your Applications folder.)\
\
3. Optionally drag it into your dock, to make it easily available.\
\
\
\
\b Use\
\b0 \
1. To set the preferences simply double-click the Applescript droplet in your Applications folder or click on its icon in your dock, and follow the instructions in the dialogs.\
\
2. Drag & Drop DRMed ebooks or folders containing DRMed ebooks onto the Application, either in your Applications folder, or the icon in your dock.}

View file

@ -18,6 +18,9 @@ from subasyncio import Process
import re import re
import simpleprefs import simpleprefs
__version__ = '5.0'
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -29,7 +32,7 @@ class MainApp(Tk):
self.apphome = apphome self.apphome = apphome
# preference settings # preference settings
# [dictionary key, file in preferences directory where info is stored] # [dictionary key, file in preferences directory where info is stored]
description = [ ['pids' , 'pidlist.txt' ], description = [ ['pids' , 'pidlist.txt' ],
['serials', 'seriallist.txt'], ['serials', 'seriallist.txt'],
['sdrms' , 'sdrmlist.txt' ], ['sdrms' , 'sdrmlist.txt' ],
['outdir' , 'outdir.txt' ]] ['outdir' , 'outdir.txt' ]]
@ -123,7 +126,7 @@ class PrefsDialog(Toplevel):
self.bnkpath.insert(0, path) self.bnkpath.insert(0, path)
button = Tkinter.Button(body, text="...", command=self.get_bnkpath) button = Tkinter.Button(body, text="...", command=self.get_bnkpath)
button.grid(row=1, column=2) button.grid(row=1, column=2)
Tkinter.Label(body, text='Additional kindle.info or .kinf file').grid(row=2, sticky=Tkconstants.E) Tkinter.Label(body, text='Additional kindle.info or .kinf file').grid(row=2, sticky=Tkconstants.E)
self.altinfopath = Tkinter.Entry(body, width=50) self.altinfopath = Tkinter.Entry(body, width=50)
self.altinfopath.grid(row=2, column=1, sticky=sticky) self.altinfopath.grid(row=2, column=1, sticky=sticky)
@ -180,7 +183,7 @@ class PrefsDialog(Toplevel):
self.bookpath.grid(row=9, column=1, sticky=sticky) self.bookpath.grid(row=9, column=1, sticky=sticky)
button = Tkinter.Button(body, text="...", command=self.get_bookpath) button = Tkinter.Button(body, text="...", command=self.get_bookpath)
button.grid(row=9, column=2) button.grid(row=9, column=2)
Tkinter.Label(body, font=("Helvetica", "10", "italic"), text='*To DeDRM multiple ebooks simultaneously, set your preferences and quit.\nThen drag and drop ebooks or folders onto the DeDRM_Drop_Target').grid(row=10, column=1, sticky=Tkconstants.E) Tkinter.Label(body, font=("Helvetica", "10", "italic"), text='*To DeDRM multiple ebooks simultaneously, set your preferences and quit.\nThen drag and drop ebooks or folders onto the DeDRM_Drop_Target').grid(row=10, column=1, sticky=Tkconstants.E)
Tkinter.Label(body, text='').grid(row=11, column=0, columnspan=2, sticky=Tkconstants.E) Tkinter.Label(body, text='').grid(row=11, column=0, columnspan=2, sticky=Tkconstants.E)
@ -365,7 +368,7 @@ class ConvDialog(Toplevel):
def conversion_done(self): def conversion_done(self):
self.hide() self.hide()
self.master.alldone() self.master.alldone()
def processBooks(self): def processBooks(self):
while self.running == 'inactive': while self.running == 'inactive':
rscpath = self.prefs_array['dir'] rscpath = self.prefs_array['dir']
@ -429,7 +432,7 @@ class ConvDialog(Toplevel):
# nothing to wait for so just return # nothing to wait for so just return
return return
poll = self.p2.wait('nowait') poll = self.p2.wait('nowait')
if poll != None: if poll != None:
self.bar.stop() self.bar.stop()
if poll == 0: if poll == 0:
msg = 'Success\n' msg = 'Success\n'
@ -451,7 +454,7 @@ class ConvDialog(Toplevel):
self.running = 'inactive' self.running = 'inactive'
self.after(50,self.processBooks) self.after(50,self.processBooks)
return return
# make sure we get invoked again by event loop after interval # make sure we get invoked again by event loop after interval
self.stext.after(self.interval,self.processPipe) self.stext.after(self.interval,self.processPipe)
return return
@ -481,7 +484,7 @@ def runit(apphome, ncmd, nparms):
if sys.platform.startswith('win'): if sys.platform.startswith('win'):
search_path = os.environ['PATH'] search_path = os.environ['PATH']
search_path = search_path.lower() search_path = search_path.lower()
if search_path.find('python') < 0: if search_path.find('python') < 0:
# if no python hope that win registry finds what is associated with py extension # if no python hope that win registry finds what is associated with py extension
cmdline = '"' + os.path.join(apphome, ncmd) + '" ' cmdline = '"' + os.path.join(apphome, ncmd) + '" '
cmdline += nparms cmdline += nparms
@ -585,4 +588,3 @@ def main(argv=sys.argv):
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -7,7 +7,7 @@ class ActivityBar(Tkinter.Frame):
def __init__(self, master, length=300, height=20, barwidth=15, interval=50, bg='white', fillcolor='orchid1',\ def __init__(self, master, length=300, height=20, barwidth=15, interval=50, bg='white', fillcolor='orchid1',\
bd=2, relief=Tkconstants.GROOVE, *args, **kw): bd=2, relief=Tkconstants.GROOVE, *args, **kw):
Tkinter.Frame.__init__(self, master, bg=bg, width=length, height=height, *args, **kw) Tkinter.Frame.__init__(self, master, bg=bg, width=length, height=height, *args, **kw)
self._master = master self._master = master
self._interval = interval self._interval = interval
self._maximum = length self._maximum = length
self._startx = 0 self._startx = 0
@ -24,7 +24,7 @@ class ActivityBar(Tkinter.Frame):
highlightthickness=0, relief=relief, bd=bd) highlightthickness=0, relief=relief, bd=bd)
self._canv.pack(fill='both', expand=1) self._canv.pack(fill='both', expand=1)
self._rect = self._canv.create_rectangle(0, 0, self._canv.winfo_reqwidth(), self._canv.winfo_reqheight(), fill=fillcolor, width=0) self._rect = self._canv.create_rectangle(0, 0, self._canv.winfo_reqwidth(), self._canv.winfo_reqheight(), fill=fillcolor, width=0)
self._set() self._set()
self.bind('<Configure>', self._update_coords) self.bind('<Configure>', self._update_coords)
self._running = False self._running = False
@ -64,7 +64,7 @@ class ActivityBar(Tkinter.Frame):
def stop(self): def stop(self):
self._running = False self._running = False
self._set() self._set()
def _step(self): def _step(self):
if self._running: if self._running:
stepsize = self._barwidth / 4 stepsize = self._barwidth / 4

View file

@ -0,0 +1,568 @@
#! /usr/bin/env python
"""
Routines for doing AES CBC in one file
Modified by some_updates to extract
and combine only those parts needed for AES CBC
into one simple to add python file
Original Version
Copyright (c) 2002 by Paul A. Lambert
Under:
CryptoPy Artisitic License Version 1.0
See the wonderful pure python package cryptopy-1.2.5
and read its LICENSE.txt for complete license details.
"""
class CryptoError(Exception):
""" Base class for crypto exceptions """
def __init__(self,errorMessage='Error!'):
self.message = errorMessage
def __str__(self):
return self.message
class InitCryptoError(CryptoError):
""" Crypto errors during algorithm initialization """
class BadKeySizeError(InitCryptoError):
""" Bad key size error """
class EncryptError(CryptoError):
""" Error in encryption processing """
class DecryptError(CryptoError):
""" Error in decryption processing """
class DecryptNotBlockAlignedError(DecryptError):
""" Error in decryption processing """
def xorS(a,b):
""" XOR two strings """
assert len(a)==len(b)
x = []
for i in range(len(a)):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
def xor(a,b):
""" XOR two strings """
x = []
for i in range(min(len(a),len(b))):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
"""
Base 'BlockCipher' and Pad classes for cipher instances.
BlockCipher supports automatic padding and type conversion. The BlockCipher
class was written to make the actual algorithm code more readable and
not for performance.
"""
class BlockCipher:
""" Block ciphers """
def __init__(self):
self.reset()
def reset(self):
self.resetEncrypt()
self.resetDecrypt()
def resetEncrypt(self):
self.encryptBlockCount = 0
self.bytesToEncrypt = ''
def resetDecrypt(self):
self.decryptBlockCount = 0
self.bytesToDecrypt = ''
def encrypt(self, plainText, more = None):
""" Encrypt a string and return a binary string """
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
cipherText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
self.encryptBlockCount += 1
cipherText += ctBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # no more data expected from caller
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
if len(finalBytes) > 0:
ctBlock = self.encryptBlock(finalBytes)
self.encryptBlockCount += 1
cipherText += ctBlock
self.resetEncrypt()
return cipherText
def decrypt(self, cipherText, more = None):
""" Decrypt a string and return a string """
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
if more == None: # no more calls to decrypt, should have all the data
if numExtraBytes != 0:
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
# hold back some bytes in case last decrypt has zero len
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
numBlocks -= 1
numExtraBytes = self.blockSize
plainText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
self.decryptBlockCount += 1
plainText += ptBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # last decrypt remove padding
plainText = self.padding.removePad(plainText, self.blockSize)
self.resetDecrypt()
return plainText
class Pad:
def __init__(self):
pass # eventually could put in calculation of min and max size extension
class padWithPadLen(Pad):
""" Pad a binary string with the length of the padding """
def addPad(self, extraBytes, blockSize):
""" Add padding to a binary string to make it an even multiple
of the block size """
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
padLength = blockSize - numExtraBytes
return extraBytes + padLength*chr(padLength)
def removePad(self, paddedBinaryString, blockSize):
""" Remove padding from a binary string """
if not(0<len(paddedBinaryString)):
raise DecryptNotBlockAlignedError, 'Expected More Data'
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
class noPadding(Pad):
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
def addPad(self, extraBytes, blockSize):
""" Add no padding """
return extraBytes
def removePad(self, paddedBinaryString, blockSize):
""" Remove no padding """
return paddedBinaryString
"""
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
"""
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
"""
AES Encryption Algorithm
The AES algorithm is just Rijndael algorithm restricted to the default
blockSize of 128 bits.
"""
class AES(Rijndael):
""" The AES algorithm is the Rijndael block cipher restricted to block
sizes of 128 bits and key sizes of 128, 192 or 256 bits
"""
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
""" Initialize AES, keySize is in bytes """
if not (keySize == 16 or keySize == 24 or keySize == 32) :
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
self.name = 'AES'
"""
CBC mode of encryption for block ciphers.
This algorithm mode wraps any BlockCipher to make a
Cipher Block Chaining mode.
"""
from random import Random # should change to crypto.random!!!
class CBC(BlockCipher):
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
algorithms. The initialization (IV) is automatic if set to None. Padding
is also automatic based on the Pad class used to initialize the algorithm
"""
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
""" CBC algorithms are created by initializing with a BlockCipher instance """
self.baseCipher = blockCipherInstance
self.name = self.baseCipher.name + '_CBC'
self.blockSize = self.baseCipher.blockSize
self.keySize = self.baseCipher.keySize
self.padding = padding
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
self.r = Random() # for IV generation, currently uses
# mediocre standard distro version <----------------
import time
newSeed = time.ctime()+str(self.r) # seed with instance location
self.r.seed(newSeed) # to make unique
self.reset()
def setKey(self, key):
self.baseCipher.setKey(key)
# Overload to reset both CBC state and the wrapped baseCipher
def resetEncrypt(self):
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
def resetDecrypt(self):
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
def encrypt(self, plainText, iv=None, more=None):
""" CBC encryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.encryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to encrypt'
return BlockCipher.encrypt(self,plainText, more=more)
def decrypt(self, cipherText, iv=None, more=None):
""" CBC decryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.decryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to decrypt'
return BlockCipher.decrypt(self, cipherText, more=more)
def encryptBlock(self, plainTextBlock):
""" CBC block encryption, IV is set with 'encrypt' """
auto_IV = ''
if self.encryptBlockCount == 0:
if self.iv == None:
# generate IV and use
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
self.prior_encr_CT_block = self.iv
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
else: # application provided IV
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
self.prior_encr_CT_block = self.iv
""" encrypt the prior CT XORed with the PT """
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
self.prior_encr_CT_block = ct
return auto_IV+ct
def decryptBlock(self, encryptedBlock):
""" Decrypt a single block """
if self.decryptBlockCount == 0: # first call, process IV
if self.iv == None: # auto decrypt IV?
self.prior_CT_block = encryptedBlock
return ''
else:
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
self.prior_CT_block = self.iv
dct = self.baseCipher.decryptBlock(encryptedBlock)
""" XOR the prior decrypted CT with the prior CT """
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
self.prior_CT_block = encryptedBlock
return dct_XOR_priorCT
"""
AES_CBC Encryption Algorithm
"""
class AES_CBC(CBC):
""" AES encryption in CBC feedback mode """
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
self.name = 'AES_CBC'

View file

@ -0,0 +1,290 @@
#! /usr/bin/env python
import sys, os
import hmac
from struct import pack
import hashlib
# interface to needed routines libalfcrypto
def _load_libalfcrypto():
import ctypes
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
pointer_size = ctypes.sizeof(ctypes.c_voidp)
name_of_lib = None
if sys.platform.startswith('darwin'):
name_of_lib = 'libalfcrypto.dylib'
elif sys.platform.startswith('win'):
if pointer_size == 4:
name_of_lib = 'alfcrypto.dll'
else:
name_of_lib = 'alfcrypto64.dll'
else:
if pointer_size == 4:
name_of_lib = 'libalfcrypto32.so'
else:
name_of_lib = 'libalfcrypto64.so'
libalfcrypto = sys.path[0] + os.sep + name_of_lib
if not os.path.isfile(libalfcrypto):
raise Exception('libalfcrypto not found')
libalfcrypto = CDLL(libalfcrypto)
c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int)
def F(restype, name, argtypes):
func = getattr(libalfcrypto, name)
func.restype = restype
func.argtypes = argtypes
return func
# aes cbc decryption
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
#
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
#
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key,
# unsigned char *ivec, const int enc);
AES_MAXNR = 14
class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY)
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# Pukall 1 Cipher
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
# unsigned char *dest, unsigned int len, int decryption);
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
# Topaz Encryption
# typedef struct _TpzCtx {
# unsigned int v[2];
# } TpzCtx;
#
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
class TPZ_CTX(Structure):
_fields_ = [('v', c_long * 2)]
TPZ_CTX_p = POINTER(TPZ_CTX)
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
class AES_CBC(object):
def __init__(self):
self._blocksize = 0
self._keyctx = None
self._iv = 0
def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey)
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
raise Exception('AES CBC improper key used')
return
keyctx = self._keyctx = AES_KEY()
self._iv = iv
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0:
raise Exception('Failed to initialize AES CBC key')
def decrypt(self, data):
out = create_string_buffer(len(data))
mutable_iv = create_string_buffer(self._iv, len(self._iv))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
if rv == 0:
raise Exception('AES CBC decryption failed')
return out.raw
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
self.key = key
out = create_string_buffer(len(src))
de = 0
if decryption:
de = 1
rv = PC1(key, len(key), src, out, len(src), de)
return out.raw
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
tpz_ctx = self._ctx = TPZ_CTX()
topazCryptoInit(tpz_ctx, key, len(key))
return tpz_ctx
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
out = create_string_buffer(len(data))
topazCryptoDecrypt(ctx, data, out, len(data))
return out.raw
print "Using Library AlfCrypto DLL/DYLIB/SO"
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_python_alfcrypto():
import aescbc
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
sum1 = 0;
sum2 = 0;
keyXorVal = 0;
if len(key)!=16:
print "Bad key length!"
return None
wkey = []
for i in xrange(8):
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
self._ctx = [ctx1, ctx2]
return [ctx1,ctx2]
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
class AES_CBC(object):
def __init__(self):
self._key = None
self._iv = None
self.aes = None
def set_decrypt_key(self, userkey, iv):
self._key = userkey
self._iv = iv
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
def decrypt(self, data):
iv = self._iv
cleartext = self.aes.decrypt(iv + data)
return cleartext
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_crypto():
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
for loader in cryptolist:
try:
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
break
except (ImportError, Exception):
pass
return AES_CBC, Pukall_Cipher, Topaz_Cipher
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
class KeyIVGen(object):
# this only exists in openssl so we will use pure python implementation instead
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
def pbkdf2(self, passwd, salt, iter, keylen):
def xorstr( a, b ):
if len(a) != len(b):
raise Exception("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T
sha = hashlib.sha1
digest_size = sha().digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( passwd, None, sha )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, iter, i )
return T[0: keylen]

View file

@ -23,7 +23,7 @@ from struct import unpack
class TpzDRMError(Exception): class TpzDRMError(Exception):
pass pass
# Get a 7 bit encoded number from string. The most # Get a 7 bit encoded number from string. The most
# significant byte comes first and has the high bit (8th) set # significant byte comes first and has the high bit (8th) set
def readEncodedNumber(file): def readEncodedNumber(file):
@ -32,57 +32,57 @@ def readEncodedNumber(file):
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# returns a binary string that encodes a number into 7 bits # returns a binary string that encodes a number into 7 bits
# most significant byte first which has the high bit set # most significant byte first which has the high bit set
def encodeNumber(number): def encodeNumber(number):
result = "" result = ""
negative = False negative = False
flag = 0 flag = 0
if number < 0 : if number < 0 :
number = -number + 1 number = -number + 1
negative = True negative = True
while True: while True:
byte = number & 0x7F byte = number & 0x7F
number = number >> 7 number = number >> 7
byte += flag byte += flag
result += chr(byte) result += chr(byte)
flag = 0x80 flag = 0x80
if number == 0 : if number == 0 :
if (byte == 0xFF and negative == False) : if (byte == 0xFF and negative == False) :
result += chr(0x80) result += chr(0x80)
break break
if negative: if negative:
result += chr(0xFF) result += chr(0xFF)
return result[::-1] return result[::-1]
# create / read a length prefixed string from the file # create / read a length prefixed string from the file
@ -97,9 +97,9 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
# convert a binary string generated by encodeNumber (7 bit encoded number) # convert a binary string generated by encodeNumber (7 bit encoded number)
# to the value you would find inside the page*.dat files to be processed # to the value you would find inside the page*.dat files to be processed
@ -265,6 +265,8 @@ class PageParser(object):
'paragraph.gridSize' : (1, 'scalar_number', 0, 0), 'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
'word_semantic' : (1, 'snippets', 1, 1), 'word_semantic' : (1, 'snippets', 1, 1),
@ -284,6 +286,8 @@ class PageParser(object):
'_span.gridSize' : (1, 'scalar_number', 0, 0), '_span.gridSize' : (1, 'scalar_number', 0, 0),
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0), '_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0), '_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'span' : (1, 'snippets', 1, 0), 'span' : (1, 'snippets', 1, 0),
'span.firstWord' : (1, 'scalar_number', 0, 0), 'span.firstWord' : (1, 'scalar_number', 0, 0),
@ -291,6 +295,8 @@ class PageParser(object):
'span.gridSize' : (1, 'scalar_number', 0, 0), 'span.gridSize' : (1, 'scalar_number', 0, 0),
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'span.gridTopCenter' : (1, 'scalar_number', 0, 0), 'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'extratokens' : (1, 'snippets', 1, 0), 'extratokens' : (1, 'snippets', 1, 0),
'extratokens.type' : (1, 'scalar_text', 0, 0), 'extratokens.type' : (1, 'scalar_text', 0, 0),
@ -376,14 +382,14 @@ class PageParser(object):
for j in xrange(i+1, cnt) : for j in xrange(i+1, cnt) :
result += '.' + self.tagpath[j] result += '.' + self.tagpath[j]
return result return result
# list of absolute command byte values values that indicate # list of absolute command byte values values that indicate
# various types of loop meachanisms typically used to generate vectors # various types of loop meachanisms typically used to generate vectors
cmd_list = (0x76, 0x76) cmd_list = (0x76, 0x76)
# peek at and return 1 byte that is ahead by i bytes # peek at and return 1 byte that is ahead by i bytes
def peek(self, aheadi): def peek(self, aheadi):
c = self.fo.read(aheadi) c = self.fo.read(aheadi)
if (len(c) == 0): if (len(c) == 0):
@ -416,7 +422,7 @@ class PageParser(object):
return result return result
# process the next tag token, recursively handling subtags, # process the next tag token, recursively handling subtags,
# arguments, and commands # arguments, and commands
def procToken(self, token): def procToken(self, token):
@ -438,7 +444,7 @@ class PageParser(object):
if known_token : if known_token :
# handle subtags if present # handle subtags if present
subtagres = [] subtagres = []
if (splcase == 1): if (splcase == 1):
# this type of tag uses of escape marker 0x74 indicate subtag count # this type of tag uses of escape marker 0x74 indicate subtag count
@ -447,7 +453,7 @@ class PageParser(object):
subtags = 1 subtags = 1
num_args = 0 num_args = 0
if (subtags == 1): if (subtags == 1):
ntags = readEncodedNumber(self.fo) ntags = readEncodedNumber(self.fo)
if self.debug : print 'subtags: ' + token + ' has ' + str(ntags) if self.debug : print 'subtags: ' + token + ' has ' + str(ntags)
for j in xrange(ntags): for j in xrange(ntags):
@ -478,7 +484,7 @@ class PageParser(object):
return result return result
# all tokens that need to be processed should be in the hash # all tokens that need to be processed should be in the hash
# table if it may indicate a problem, either new token # table if it may indicate a problem, either new token
# or an out of sync condition # or an out of sync condition
else: else:
result = [] result = []
@ -530,7 +536,7 @@ class PageParser(object):
# dispatches loop commands bytes with various modes # dispatches loop commands bytes with various modes
# The 0x76 style loops are used to build vectors # The 0x76 style loops are used to build vectors
# This was all derived by trial and error and # This was all derived by trial and error and
# new loop types may exist that are not handled here # new loop types may exist that are not handled here
# since they did not appear in the test cases # since they did not appear in the test cases
@ -549,7 +555,7 @@ class PageParser(object):
return result return result
# add full tag path to injected snippets # add full tag path to injected snippets
def updateName(self, tag, prefix): def updateName(self, tag, prefix):
name = tag[0] name = tag[0]
@ -577,7 +583,7 @@ class PageParser(object):
argtype = tag[2] argtype = tag[2]
argList = tag[3] argList = tag[3]
nsubtagList = [] nsubtagList = []
if len(argList) > 0 : if len(argList) > 0 :
for j in argList: for j in argList:
asnip = self.snippetList[j] asnip = self.snippetList[j]
aso, atag = self.injectSnippets(asnip) aso, atag = self.injectSnippets(asnip)
@ -609,65 +615,70 @@ class PageParser(object):
nodename = fullpathname.pop() nodename = fullpathname.pop()
ilvl = len(fullpathname) ilvl = len(fullpathname)
indent = ' ' * (3 * ilvl) indent = ' ' * (3 * ilvl)
result = indent + '<' + nodename + '>' rlst = []
rlst.append(indent + '<' + nodename + '>')
if len(argList) > 0: if len(argList) > 0:
argres = '' alst = []
for j in argList: for j in argList:
if (argtype == 'text') or (argtype == 'scalar_text') : if (argtype == 'text') or (argtype == 'scalar_text') :
argres += j + '|' alst.append(j + '|')
else : else :
argres += str(j) + ',' alst.append(str(j) + ',')
argres = "".join(alst)
argres = argres[0:-1] argres = argres[0:-1]
if argtype == 'snippets' : if argtype == 'snippets' :
result += 'snippets:' + argres rlst.append('snippets:' + argres)
else : else :
result += argres rlst.append(argres)
if len(subtagList) > 0 : if len(subtagList) > 0 :
result += '\n' rlst.append('\n')
for j in subtagList: for j in subtagList:
if len(j) > 0 : if len(j) > 0 :
result += self.formatTag(j) rlst.append(self.formatTag(j))
result += indent + '</' + nodename + '>\n' rlst.append(indent + '</' + nodename + '>\n')
else: else:
result += '</' + nodename + '>\n' rlst.append('</' + nodename + '>\n')
return result return "".join(rlst)
# flatten tag # flatten tag
def flattenTag(self, node): def flattenTag(self, node):
name = node[0] name = node[0]
subtagList = node[1] subtagList = node[1]
argtype = node[2] argtype = node[2]
argList = node[3] argList = node[3]
result = name rlst = []
rlst.append(name)
if (len(argList) > 0): if (len(argList) > 0):
argres = '' alst = []
for j in argList: for j in argList:
if (argtype == 'text') or (argtype == 'scalar_text') : if (argtype == 'text') or (argtype == 'scalar_text') :
argres += j + '|' alst.append(j + '|')
else : else :
argres += str(j) + '|' alst.append(str(j) + '|')
argres = "".join(alst)
argres = argres[0:-1] argres = argres[0:-1]
if argtype == 'snippets' : if argtype == 'snippets' :
result += '.snippets=' + argres rlst.append('.snippets=' + argres)
else : else :
result += '=' + argres rlst.append('=' + argres)
result += '\n' rlst.append('\n')
for j in subtagList: for j in subtagList:
if len(j) > 0 : if len(j) > 0 :
result += self.flattenTag(j) rlst.append(self.flattenTag(j))
return result return "".join(rlst)
# reduce create xml output # reduce create xml output
def formatDoc(self, flat_xml): def formatDoc(self, flat_xml):
result = '' rlst = []
for j in self.doc : for j in self.doc :
if len(j) > 0: if len(j) > 0:
if flat_xml: if flat_xml:
result += self.flattenTag(j) rlst.append(self.flattenTag(j))
else: else:
result += self.formatTag(j) rlst.append(self.formatTag(j))
result = "".join(rlst)
if self.debug : print result if self.debug : print result
return result return result
@ -712,7 +723,7 @@ class PageParser(object):
first_token = None first_token = None
v = self.getNext() v = self.getNext()
if (v == None): if (v == None):
break break
if (v == 0x72): if (v == 0x72):
@ -723,7 +734,7 @@ class PageParser(object):
self.doc.append(tag) self.doc.append(tag)
else: else:
if self.debug: if self.debug:
print "Main Loop: Unknown value: %x" % v print "Main Loop: Unknown value: %x" % v
if (v == 0): if (v == 0):
if (self.peek(1) == 0x5f): if (self.peek(1) == 0x5f):
skip = self.fo.read(1) skip = self.fo.read(1)
@ -776,7 +787,7 @@ def usage():
# #
# Main # Main
# #
def main(argv): def main(argv):
dictFile = "" dictFile = ""
@ -797,11 +808,11 @@ def main(argv):
print str(err) # will print something like "option -a not recognized" print str(err) # will print something like "option -a not recognized"
usage() usage()
sys.exit(2) sys.exit(2)
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o =="-d": if o =="-d":
debug=True debug=True

View file

@ -86,4 +86,3 @@ def main(argv=sys.argv):
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -43,4 +43,3 @@ def main(argv=sys.argv):
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -25,11 +25,11 @@ def main(argv=sys.argv):
rscpath = args[2] rscpath = args[2]
errlog = '' errlog = ''
rv = 1 rv = 1
# determine a good name for the output file # determine a good name for the output file
name, ext = os.path.splitext(os.path.basename(infile)) name, ext = os.path.splitext(os.path.basename(infile))
outfile = os.path.join(outdir, name + '_nodrm.pdf') outfile = os.path.join(outdir, name + '_nodrm.pdf')
# try with any keyfiles (*.der) in the rscpath # try with any keyfiles (*.der) in the rscpath
files = os.listdir(rscpath) files = os.listdir(rscpath)
filefilter = re.compile("\.der$", re.IGNORECASE) filefilter = re.compile("\.der$", re.IGNORECASE)
@ -52,4 +52,3 @@ def main(argv=sys.argv):
if __name__ == "__main__": if __name__ == "__main__":
sys.exit(main()) sys.exit(main())

View file

@ -16,7 +16,7 @@
# Custom version 0.03 - no change to eReader support, only usability changes # Custom version 0.03 - no change to eReader support, only usability changes
# - start of pep-8 indentation (spaces not tab), fix trailing blanks # - start of pep-8 indentation (spaces not tab), fix trailing blanks
# - version variable, only one place to change # - version variable, only one place to change
# - added main routine, now callable as a library/module, # - added main routine, now callable as a library/module,
# means tools can add optional support for ereader2html # means tools can add optional support for ereader2html
# - outdir is no longer a mandatory parameter (defaults based on input name if missing) # - outdir is no longer a mandatory parameter (defaults based on input name if missing)
# - time taken output to stdout # - time taken output to stdout
@ -59,8 +59,8 @@
# 0.18 - on Windows try PyCrypto first and OpenSSL next # 0.18 - on Windows try PyCrypto first and OpenSSL next
# 0.19 - Modify the interface to allow use of import # 0.19 - Modify the interface to allow use of import
# 0.20 - modify to allow use inside new interface for calibre plugins # 0.20 - modify to allow use inside new interface for calibre plugins
# 0.21 - Support eReader (drm) version 11. # 0.21 - Support eReader (drm) version 11.
# - Don't reject dictionary format. # - Don't reject dictionary format.
# - Ignore sidebars for dictionaries (different format?) # - Ignore sidebars for dictionaries (different format?)
__version__='0.21' __version__='0.21'
@ -178,7 +178,7 @@ def sanitizeFileName(s):
def fixKey(key): def fixKey(key):
def fixByte(b): def fixByte(b):
return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80) return b ^ ((b ^ (b<<1) ^ (b<<2) ^ (b<<3) ^ (b<<4) ^ (b<<5) ^ (b<<6) ^ (b<<7) ^ 0x80) & 0x80)
return "".join([chr(fixByte(ord(a))) for a in key]) return "".join([chr(fixByte(ord(a))) for a in key])
def deXOR(text, sp, table): def deXOR(text, sp, table):
r='' r=''
@ -212,7 +212,7 @@ class EreaderProcessor(object):
for i in xrange(len(data)): for i in xrange(len(data)):
j = (j + shuf) % len(data) j = (j + shuf) % len(data)
r[j] = data[i] r[j] = data[i]
assert len("".join(r)) == len(data) assert len("".join(r)) == len(data)
return "".join(r) return "".join(r)
r = unshuff(input[0:-8], cookie_shuf) r = unshuff(input[0:-8], cookie_shuf)
@ -314,7 +314,7 @@ class EreaderProcessor(object):
# offname = deXOR(chaps, j, self.xortable) # offname = deXOR(chaps, j, self.xortable)
# offset = struct.unpack('>L', offname[0:4])[0] # offset = struct.unpack('>L', offname[0:4])[0]
# name = offname[4:].strip('\0') # name = offname[4:].strip('\0')
# cv += '%d|%s\n' % (offset, name) # cv += '%d|%s\n' % (offset, name)
# return cv # return cv
# def getLinkNamePMLOffsetData(self): # def getLinkNamePMLOffsetData(self):
@ -326,7 +326,7 @@ class EreaderProcessor(object):
# offname = deXOR(links, j, self.xortable) # offname = deXOR(links, j, self.xortable)
# offset = struct.unpack('>L', offname[0:4])[0] # offset = struct.unpack('>L', offname[0:4])[0]
# name = offname[4:].strip('\0') # name = offname[4:].strip('\0')
# lv += '%d|%s\n' % (offset, name) # lv += '%d|%s\n' % (offset, name)
# return lv # return lv
# def getExpandedTextSizesData(self): # def getExpandedTextSizesData(self):
@ -354,7 +354,7 @@ class EreaderProcessor(object):
for i in xrange(self.num_text_pages): for i in xrange(self.num_text_pages):
logging.debug('get page %d', i) logging.debug('get page %d', i)
r += zlib.decompress(des.decrypt(self.section_reader(1 + i))) r += zlib.decompress(des.decrypt(self.section_reader(1 + i)))
# now handle footnotes pages # now handle footnotes pages
if self.num_footnote_pages > 0: if self.num_footnote_pages > 0:
r += '\n' r += '\n'
@ -399,12 +399,12 @@ class EreaderProcessor(object):
return r return r
def cleanPML(pml): def cleanPML(pml):
# Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255) # Convert special characters to proper PML code. High ASCII start at (\x80, \a128) and go up to (\xff, \a255)
pml2 = pml pml2 = pml
for k in xrange(128,256): for k in xrange(128,256):
badChar = chr(k) badChar = chr(k)
pml2 = pml2.replace(badChar, '\\a%03d' % k) pml2 = pml2.replace(badChar, '\\a%03d' % k)
return pml2 return pml2
def convertEreaderToPml(infile, name, cc, outdir): def convertEreaderToPml(infile, name, cc, outdir):
if not os.path.exists(outdir): if not os.path.exists(outdir):
@ -435,7 +435,7 @@ def convertEreaderToPml(infile, name, cc, outdir):
# file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo) # file(os.path.join(outdir, 'bookinfo.txt'),'wb').write(bkinfo)
def decryptBook(infile, outdir, name, cc, make_pmlz): def decryptBook(infile, outdir, name, cc, make_pmlz):
if make_pmlz : if make_pmlz :
# ignore specified outdir, use tempdir instead # ignore specified outdir, use tempdir instead
@ -468,7 +468,7 @@ def decryptBook(infile, outdir, name, cc, make_pmlz):
shutil.rmtree(outdir, True) shutil.rmtree(outdir, True)
print 'output is %s' % zipname print 'output is %s' % zipname
else : else :
print 'output in %s' % outdir print 'output in %s' % outdir
print "done" print "done"
except ValueError, e: except ValueError, e:
print "Error: %s" % e print "Error: %s" % e
@ -505,7 +505,7 @@ def main(argv=None):
return 0 return 0
elif o == "--make-pmlz": elif o == "--make-pmlz":
make_pmlz = True make_pmlz = True
print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__ print "eRdr2Pml v%s. Copyright (c) 2009 The Dark Reverser" % __version__
if len(args)!=3 and len(args)!=4: if len(args)!=3 and len(args)!=4:
@ -524,4 +524,3 @@ def main(argv=None):
if __name__ == "__main__": if __name__ == "__main__":
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -68,7 +68,7 @@ class DocParser(object):
ys = [] ys = []
gdefs = [] gdefs = []
# get path defintions, positions, dimensions for each glyph # get path defintions, positions, dimensions for each glyph
# that makes up the image, and find min x and min y to reposition origin # that makes up the image, and find min x and min y to reposition origin
minx = -1 minx = -1
miny = -1 miny = -1
@ -79,7 +79,7 @@ class DocParser(object):
xs.append(gxList[j]) xs.append(gxList[j])
if minx == -1: minx = gxList[j] if minx == -1: minx = gxList[j]
else : minx = min(minx, gxList[j]) else : minx = min(minx, gxList[j])
ys.append(gyList[j]) ys.append(gyList[j])
if miny == -1: miny = gyList[j] if miny == -1: miny = gyList[j]
else : miny = min(miny, gyList[j]) else : miny = min(miny, gyList[j])
@ -124,12 +124,12 @@ class DocParser(object):
item = self.docList[pos] item = self.docList[pos]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
return name, argres return name, argres
# find tag in doc if within pos to end inclusive # find tag in doc if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -142,10 +142,10 @@ class DocParser(object):
item = self.docList[j] item = self.docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -182,13 +182,13 @@ class DocParser(object):
# class names are an issue given topaz may start them with numerals (not allowed), # class names are an issue given topaz may start them with numerals (not allowed),
# use a mix of cases (which cause some browsers problems), and actually # use a mix of cases (which cause some browsers problems), and actually
# attach numbers after "_reclustered*" to the end to deal classeses that inherit # attach numbers after "_reclustered*" to the end to deal classeses that inherit
# from a base class (but then not actually provide all of these _reclustereed # from a base class (but then not actually provide all of these _reclustereed
# classes in the stylesheet! # classes in the stylesheet!
# so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass # so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass
# that exists in the stylesheet first, and then adding this specific class # that exists in the stylesheet first, and then adding this specific class
# after # after
# also some class names have spaces in them so need to convert to dashes # also some class names have spaces in them so need to convert to dashes
if nclass != None : if nclass != None :
nclass = nclass.replace(' ','-') nclass = nclass.replace(' ','-')
@ -211,7 +211,7 @@ class DocParser(object):
return nclass return nclass
# develop a sorted description of the starting positions of # develop a sorted description of the starting positions of
# groups and regions on the page, as well as the page type # groups and regions on the page, as well as the page type
def PageDescription(self): def PageDescription(self):
@ -267,7 +267,7 @@ class DocParser(object):
result = [] result = []
# paragraph # paragraph
(pos, pclass) = self.findinDoc('paragraph.class',start,end) (pos, pclass) = self.findinDoc('paragraph.class',start,end)
pclass = self.getClass(pclass) pclass = self.getClass(pclass)
@ -281,17 +281,22 @@ class DocParser(object):
if (sfirst != None) and (slast != None) : if (sfirst != None) and (slast != None) :
first = int(sfirst) first = int(sfirst)
last = int(slast) last = int(slast)
makeImage = (regtype == 'vertical') or (regtype == 'table') makeImage = (regtype == 'vertical') or (regtype == 'table')
makeImage = makeImage or (extraglyphs != None) makeImage = makeImage or (extraglyphs != None)
if self.fixedimage: if self.fixedimage:
makeImage = makeImage or (regtype == 'fixed') makeImage = makeImage or (regtype == 'fixed')
if (pclass != None): if (pclass != None):
makeImage = makeImage or (pclass.find('.inverted') >= 0) makeImage = makeImage or (pclass.find('.inverted') >= 0)
if self.fixedimage : if self.fixedimage :
makeImage = makeImage or (pclass.find('cl-f-') >= 0) makeImage = makeImage or (pclass.find('cl-f-') >= 0)
# before creating an image make sure glyph info exists
gidList = self.getData('info.glyph.glyphID',0,-1)
makeImage = makeImage & (len(gidList) > 0)
if not makeImage : if not makeImage :
# standard all word paragraph # standard all word paragraph
for wordnum in xrange(first, last): for wordnum in xrange(first, last):
@ -332,10 +337,10 @@ class DocParser(object):
result.append(('svg', num)) result.append(('svg', num))
return pclass, result return pclass, result
# this type of paragraph may be made up of multiple spans, inline # this type of paragraph may be made up of multiple spans, inline
# word monograms (images), and words with semantic meaning, # word monograms (images), and words with semantic meaning,
# plus glyphs used to form starting letter of first word # plus glyphs used to form starting letter of first word
# need to parse this type line by line # need to parse this type line by line
line = start + 1 line = start + 1
word_class = '' word_class = ''
@ -344,7 +349,7 @@ class DocParser(object):
if end == -1 : if end == -1 :
end = self.docSize end = self.docSize
# seems some xml has last* coming before first* so we have to # seems some xml has last* coming before first* so we have to
# handle any order # handle any order
sp_first = -1 sp_first = -1
sp_last = -1 sp_last = -1
@ -382,10 +387,10 @@ class DocParser(object):
ws_last = int(argres) ws_last = int(argres)
elif name.endswith('word.class'): elif name.endswith('word.class'):
(cname, space) = argres.split('-',1) (cname, space) = argres.split('-',1)
if space == '' : space = '0' if space == '' : space = '0'
if (cname == 'spaceafter') and (int(space) > 0) : if (cname == 'spaceafter') and (int(space) > 0) :
word_class = 'sa' word_class = 'sa'
elif name.endswith('word.img.src'): elif name.endswith('word.img.src'):
result.append(('img' + word_class, int(argres))) result.append(('img' + word_class, int(argres)))
@ -416,11 +421,11 @@ class DocParser(object):
result.append(('ocr', wordnum)) result.append(('ocr', wordnum))
ws_first = -1 ws_first = -1
ws_last = -1 ws_last = -1
line += 1 line += 1
return pclass, result return pclass, result
def buildParagraph(self, pclass, pdesc, type, regtype) : def buildParagraph(self, pclass, pdesc, type, regtype) :
parares = '' parares = ''
@ -433,7 +438,7 @@ class DocParser(object):
br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical') br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical')
handle_links = len(self.link_id) > 0 handle_links = len(self.link_id) > 0
if (type == 'full') or (type == 'begin') : if (type == 'full') or (type == 'begin') :
parares += '<p' + classres + '>' parares += '<p' + classres + '>'
@ -462,7 +467,7 @@ class DocParser(object):
if linktype == 'external' : if linktype == 'external' :
linkhref = self.link_href[link-1] linkhref = self.link_href[link-1]
linkhtml = '<a href="%s">' % linkhref linkhtml = '<a href="%s">' % linkhref
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkhtml = '<a href="#page%04d">' % ptarget linkhtml = '<a href="#page%04d">' % ptarget
@ -509,7 +514,7 @@ class DocParser(object):
elif wtype == 'svg' : elif wtype == 'svg' :
sep = '' sep = ''
parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num
parares += sep parares += sep
if len(sep) > 0 : parares = parares[0:-1] if len(sep) > 0 : parares = parares[0:-1]
@ -551,7 +556,7 @@ class DocParser(object):
title = '' title = ''
alt_title = '' alt_title = ''
linkpage = '' linkpage = ''
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkpage = '%04d' % ptarget linkpage = '%04d' % ptarget
@ -584,14 +589,14 @@ class DocParser(object):
# walk the document tree collecting the information needed # walk the document tree collecting the information needed
# to build an html page using the ocrText # to build an html page using the ocrText
def process(self): def process(self):
htmlpage = ''
tocinfo = '' tocinfo = ''
hlst = []
# get the ocr text # get the ocr text
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1) (pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
@ -602,8 +607,8 @@ class DocParser(object):
# determine if first paragraph is continued from previous page # determine if first paragraph is continued from previous page
(pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1) (pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1)
first_para_continued = (self.parastems_stemid != None) first_para_continued = (self.parastems_stemid != None)
# determine if last paragraph is continued onto the next page # determine if last paragraph is continued onto the next page
(pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1) (pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1)
last_para_continued = (self.paracont_stemid != None) last_para_continued = (self.paracont_stemid != None)
@ -631,25 +636,25 @@ class DocParser(object):
# get a descriptions of the starting points of the regions # get a descriptions of the starting points of the regions
# and groups on the page # and groups on the page
(pagetype, pageDesc) = self.PageDescription() (pagetype, pageDesc) = self.PageDescription()
regcnt = len(pageDesc) - 1 regcnt = len(pageDesc) - 1
anchorSet = False anchorSet = False
breakSet = False breakSet = False
inGroup = False inGroup = False
# process each region on the page and convert what you can to html # process each region on the page and convert what you can to html
for j in xrange(regcnt): for j in xrange(regcnt):
(etype, start) = pageDesc[j] (etype, start) = pageDesc[j]
(ntype, end) = pageDesc[j+1] (ntype, end) = pageDesc[j+1]
# set anchor for link target on this page # set anchor for link target on this page
if not anchorSet and not first_para_continued: if not anchorSet and not first_para_continued:
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="' hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n' hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
anchorSet = True anchorSet = True
# handle groups of graphics with text captions # handle groups of graphics with text captions
@ -658,12 +663,12 @@ class DocParser(object):
if grptype != None: if grptype != None:
if grptype == 'graphic': if grptype == 'graphic':
gcstr = ' class="' + grptype + '"' gcstr = ' class="' + grptype + '"'
htmlpage += '<div' + gcstr + '>' hlst.append('<div' + gcstr + '>')
inGroup = True inGroup = True
elif (etype == 'grpend'): elif (etype == 'grpend'):
if inGroup: if inGroup:
htmlpage += '</div>\n' hlst.append('</div>\n')
inGroup = False inGroup = False
else: else:
@ -673,25 +678,25 @@ class DocParser(object):
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
if inGroup: if inGroup:
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc) hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
else: else:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
elif regtype == 'chapterheading' : elif regtype == 'chapterheading' :
(pclass, pdesc) = self.getParaDescription(start,end, regtype) (pclass, pdesc) = self.getParaDescription(start,end, regtype)
if not breakSet: if not breakSet:
htmlpage += '<div style="page-break-after: always;">&nbsp;</div>\n' hlst.append('<div style="page-break-after: always;">&nbsp;</div>\n')
breakSet = True breakSet = True
tag = 'h1' tag = 'h1'
if pclass and (len(pclass) >= 7): if pclass and (len(pclass) >= 7):
if pclass[3:7] == 'ch1-' : tag = 'h1' if pclass[3:7] == 'ch1-' : tag = 'h1'
if pclass[3:7] == 'ch2-' : tag = 'h2' if pclass[3:7] == 'ch2-' : tag = 'h2'
if pclass[3:7] == 'ch3-' : tag = 'h3' if pclass[3:7] == 'ch3-' : tag = 'h3'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
else: else:
htmlpage += '<' + tag + '>' hlst.append('<' + tag + '>')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'): elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
ptype = 'full' ptype = 'full'
@ -705,11 +710,11 @@ class DocParser(object):
if pclass[3:6] == 'h1-' : tag = 'h4' if pclass[3:6] == 'h1-' : tag = 'h4'
if pclass[3:6] == 'h2-' : tag = 'h5' if pclass[3:6] == 'h2-' : tag = 'h5'
if pclass[3:6] == 'h3-' : tag = 'h6' if pclass[3:6] == 'h3-' : tag = 'h6'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
else : else :
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'tocentry') : elif (regtype == 'tocentry') :
ptype = 'full' ptype = 'full'
@ -718,7 +723,7 @@ class DocParser(object):
first_para_continued = False first_para_continued = False
(pclass, pdesc) = self.getParaDescription(start,end, regtype) (pclass, pdesc) = self.getParaDescription(start,end, regtype)
tocinfo += self.buildTOCEntry(pdesc) tocinfo += self.buildTOCEntry(pdesc)
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'vertical') or (regtype == 'table') : elif (regtype == 'vertical') or (regtype == 'table') :
ptype = 'full' ptype = 'full'
@ -728,13 +733,13 @@ class DocParser(object):
ptype = 'end' ptype = 'end'
first_para_continued = False first_para_continued = False
(pclass, pdesc) = self.getParaDescription(start, end, regtype) (pclass, pdesc) = self.getParaDescription(start, end, regtype)
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'synth_fcvr.center'): elif (regtype == 'synth_fcvr.center'):
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
else : else :
print ' Making region type', regtype, print ' Making region type', regtype,
@ -760,18 +765,19 @@ class DocParser(object):
if pclass[3:6] == 'h1-' : tag = 'h4' if pclass[3:6] == 'h1-' : tag = 'h4'
if pclass[3:6] == 'h2-' : tag = 'h5' if pclass[3:6] == 'h2-' : tag = 'h5'
if pclass[3:6] == 'h3-' : tag = 'h6' if pclass[3:6] == 'h3-' : tag = 'h6'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
else : else :
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
else : else :
print ' a "graphic" region' print ' a "graphic" region'
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
htmlpage = "".join(hlst)
if last_para_continued : if last_para_continued :
if htmlpage[-4:] == '</p>': if htmlpage[-4:] == '</p>':
htmlpage = htmlpage[0:-4] htmlpage = htmlpage[0:-4]

View file

@ -15,7 +15,7 @@ class PParser(object):
self.flatdoc = flatxml.split('\n') self.flatdoc = flatxml.split('\n')
self.docSize = len(self.flatdoc) self.docSize = len(self.flatdoc)
self.temp = [] self.temp = []
self.ph = -1 self.ph = -1
self.pw = -1 self.pw = -1
startpos = self.posinDoc('page.h') or self.posinDoc('book.h') startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
@ -26,7 +26,7 @@ class PParser(object):
for p in startpos: for p in startpos:
(name, argres) = self.lineinDoc(p) (name, argres) = self.lineinDoc(p)
self.pw = max(self.pw, int(argres)) self.pw = max(self.pw, int(argres))
if self.ph <= 0: if self.ph <= 0:
self.ph = int(meta_array.get('pageHeight', '11000')) self.ph = int(meta_array.get('pageHeight', '11000'))
if self.pw <= 0: if self.pw <= 0:
@ -181,70 +181,69 @@ class PParser(object):
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi): def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
ml = '' mlst = []
pp = PParser(gdict, flat_xml, meta_array) pp = PParser(gdict, flat_xml, meta_array)
ml += '<?xml version="1.0" standalone="no"?>\n' mlst.append('<?xml version="1.0" standalone="no"?>\n')
if (raw): if (raw):
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n' mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1) mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']) mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
else: else:
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n' mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']) mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
ml += '<script><![CDATA[\n' mlst.append('<script><![CDATA[\n')
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n' mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
ml += 'var dpi=%d;\n' % scaledpi mlst.append('var dpi=%d;\n' % scaledpi)
if (previd) : if (previd) :
ml += 'var prevpage="page%04d.xhtml";\n' % (previd) mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
if (nextid) : if (nextid) :
ml += 'var nextpage="page%04d.xhtml";\n' % (nextid) mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph) mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n' mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n' mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n' mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n' mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n' mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n' mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
ml += 'window.onload=setsize;\n' mlst.append('window.onload=setsize;\n')
ml += ']]></script>\n' mlst.append(']]></script>\n')
ml += '</head>\n' mlst.append('</head>\n')
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n' mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
ml += '<div style="white-space:nowrap;">\n' mlst.append('<div style="white-space:nowrap;">\n')
if previd == None: if previd == None:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else: else:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n' mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph) mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
if (pp.gid != None): if (pp.gid != None):
ml += '<defs>\n' mlst.append('<defs>\n')
gdefs = pp.getGlyphs() gdefs = pp.getGlyphs()
for j in xrange(0,len(gdefs)): for j in xrange(0,len(gdefs)):
ml += gdefs[j] mlst.append(gdefs[j])
ml += '</defs>\n' mlst.append('</defs>\n')
img = pp.getImages() img = pp.getImages()
if (img != None): if (img != None):
for j in xrange(0,len(img)): for j in xrange(0,len(img)):
ml += img[j] mlst.append(img[j])
if (pp.gid != None): if (pp.gid != None):
for j in xrange(0,len(pp.gid)): for j in xrange(0,len(pp.gid)):
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]) mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0): if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
xpos = "%d" % (pp.pw // 3) xpos = "%d" % (pp.pw // 3)
ypos = "%d" % (pp.ph // 3) ypos = "%d" % (pp.ph // 3)
ml += '<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n' mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
if (raw) : if (raw) :
ml += '</svg>' mlst.append('</svg>')
else : else :
ml += '</svg></a>\n' mlst.append('</svg></a>\n')
if nextid == None: if nextid == None:
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else : else :
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n' mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
ml += '</div>\n' mlst.append('</div>\n')
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n' mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
ml += '</body>\n' mlst.append('</body>\n')
ml += '</html>\n' mlst.append('</html>\n')
return ml return "".join(mlst)

View file

@ -39,6 +39,8 @@ else :
import flatxml2svg import flatxml2svg
import stylexml2css import stylexml2css
# global switch
buildXML = False
# Get a 7 bit encoded number from a file # Get a 7 bit encoded number from a file
def readEncodedNumber(file): def readEncodedNumber(file):
@ -46,27 +48,27 @@ def readEncodedNumber(file):
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# Get a length prefixed string from the file # Get a length prefixed string from the file
def lengthPrefixString(data): def lengthPrefixString(data):
return encodeNumber(len(data))+data return encodeNumber(len(data))+data
@ -77,7 +79,7 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
def getMetaArray(metaFile): def getMetaArray(metaFile):
# parse the meta file # parse the meta file
@ -141,10 +143,10 @@ class PageDimParser(object):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=') (name, argres) = item.split('=')
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -298,9 +300,10 @@ def generateBook(bookDir, raw, fixedimage):
if not os.path.exists(svgDir) : if not os.path.exists(svgDir) :
os.makedirs(svgDir) os.makedirs(svgDir)
xmlDir = os.path.join(bookDir,'xml') if buildXML:
if not os.path.exists(xmlDir) : xmlDir = os.path.join(bookDir,'xml')
os.makedirs(xmlDir) if not os.path.exists(xmlDir) :
os.makedirs(xmlDir)
otherFile = os.path.join(bookDir,'other0000.dat') otherFile = os.path.join(bookDir,'other0000.dat')
if not os.path.exists(otherFile) : if not os.path.exists(otherFile) :
@ -336,7 +339,7 @@ def generateBook(bookDir, raw, fixedimage):
print 'Processing Meta Data and creating OPF' print 'Processing Meta Data and creating OPF'
meta_array = getMetaArray(metaFile) meta_array = getMetaArray(metaFile)
# replace special chars in title and authors like & < > # replace special chars in title and authors like & < >
title = meta_array.get('Title','No Title Provided') title = meta_array.get('Title','No Title Provided')
title = title.replace('&','&amp;') title = title.replace('&','&amp;')
title = title.replace('<','&lt;') title = title.replace('<','&lt;')
@ -348,11 +351,14 @@ def generateBook(bookDir, raw, fixedimage):
authors = authors.replace('>','&gt;') authors = authors.replace('>','&gt;')
meta_array['Authors'] = authors meta_array['Authors'] = authors
xname = os.path.join(xmlDir, 'metadata.xml') if buildXML:
metastr = '' xname = os.path.join(xmlDir, 'metadata.xml')
for key in meta_array: mlst = []
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n' for key in meta_array:
file(xname, 'wb').write(metastr) mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
metastr = "".join(mlst)
mlst = None
file(xname, 'wb').write(metastr)
print 'Processing StyleSheet' print 'Processing StyleSheet'
# get some scaling info from metadata to use while processing styles # get some scaling info from metadata to use while processing styles
@ -404,8 +410,9 @@ def generateBook(bookDir, raw, fixedimage):
# now get the css info # now get the css info
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw) cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
file(xname, 'wb').write(cssstr) file(xname, 'wb').write(cssstr)
xname = os.path.join(xmlDir, 'other0000.xml') if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile)) xname = os.path.join(xmlDir, 'other0000.xml')
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
print 'Processing Glyphs' print 'Processing Glyphs'
gd = GlyphDict() gd = GlyphDict()
@ -425,8 +432,9 @@ def generateBook(bookDir, raw, fixedimage):
fname = os.path.join(glyphsDir,filename) fname = os.path.join(glyphsDir,filename)
flat_xml = convert2xml.fromData(dict, fname) flat_xml = convert2xml.fromData(dict, fname)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml')) if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, fname)) xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
gp = GParser(flat_xml) gp = GParser(flat_xml)
for i in xrange(0, gp.count): for i in xrange(0, gp.count):
@ -441,29 +449,29 @@ def generateBook(bookDir, raw, fixedimage):
glyfile.close() glyfile.close()
print " " print " "
# build up tocentries while processing html
tocentries = ''
# start up the html # start up the html
# also build up tocentries while processing html
htmlFileName = "book.html" htmlFileName = "book.html"
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n' hlst = []
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n' hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
htmlstr += '<head>\n' hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n' hlst.append('<head>\n')
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n' hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n' hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
htmlstr += '</head>\n<body>\n' hlst.append('</head>\n<body>\n')
print 'Processing Pages' print 'Processing Pages'
# Books are at 1440 DPI. This is rendering at twice that size for # Books are at 1440 DPI. This is rendering at twice that size for
# readability when rendering to the screen. # readability when rendering to the screen.
scaledpi = 1440.0 scaledpi = 1440.0
filenames = os.listdir(pageDir) filenames = os.listdir(pageDir)
@ -471,6 +479,7 @@ def generateBook(bookDir, raw, fixedimage):
numfiles = len(filenames) numfiles = len(filenames)
xmllst = [] xmllst = []
elst = []
for filename in filenames: for filename in filenames:
# print ' ', filename # print ' ', filename
@ -481,45 +490,51 @@ def generateBook(bookDir, raw, fixedimage):
# keep flat_xml for later svg processing # keep flat_xml for later svg processing
xmllst.append(flat_xml) xmllst.append(flat_xml)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml')) if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, fname)) xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
# first get the html # first get the html
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage) pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
tocentries += tocinfo elst.append(tocinfo)
htmlstr += pagehtml hlst.append(pagehtml)
# finish up the html string and output it # finish up the html string and output it
htmlstr += '</body>\n</html>\n' hlst.append('</body>\n</html>\n')
htmlstr = "".join(hlst)
hlst = None
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr) file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
print " " print " "
print 'Extracting Table of Contents from Amazon OCR' print 'Extracting Table of Contents from Amazon OCR'
# first create a table of contents file for the svg images # first create a table of contents file for the svg images
tochtml = '<?xml version="1.0" encoding="utf-8"?>\n' tlst = []
tochtml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
tochtml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >' tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
tochtml += '<head>\n' tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
tochtml += '<title>' + meta_array['Title'] + '</title>\n' tlst.append('<head>\n')
tochtml += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' tlst.append('<title>' + meta_array['Title'] + '</title>\n')
tochtml += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
tochtml += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
tochtml += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
tochtml += '</head>\n' tlst.append('</head>\n')
tochtml += '<body>\n' tlst.append('<body>\n')
tochtml += '<h2>Table of Contents</h2>\n' tlst.append('<h2>Table of Contents</h2>\n')
start = pageidnums[0] start = pageidnums[0]
if (raw): if (raw):
startname = 'page%04d.svg' % start startname = 'page%04d.svg' % start
else: else:
startname = 'page%04d.xhtml' % start startname = 'page%04d.xhtml' % start
tochtml += '<h3><a href="' + startname + '">Start of Book</a></h3>\n' tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
# build up a table of contents for the svg xhtml output # build up a table of contents for the svg xhtml output
tocentries = "".join(elst)
elst = None
toclst = tocentries.split('\n') toclst = tocentries.split('\n')
toclst.pop() toclst.pop()
for entry in toclst: for entry in toclst:
@ -530,30 +545,32 @@ def generateBook(bookDir, raw, fixedimage):
fname = 'page%04d.svg' % id fname = 'page%04d.svg' % id
else: else:
fname = 'page%04d.xhtml' % id fname = 'page%04d.xhtml' % id
tochtml += '<h3><a href="'+ fname + '">' + title + '</a></h3>\n' tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
tochtml += '</body>\n' tlst.append('</body>\n')
tochtml += '</html>\n' tlst.append('</html>\n')
tochtml = "".join(tlst)
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml) file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
# now create index_svg.xhtml that points to all required files # now create index_svg.xhtml that points to all required files
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n' slst = []
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >' slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
svgindex += '<head>\n' slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
svgindex += '<title>' + meta_array['Title'] + '</title>\n' slst.append('<head>\n')
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' slst.append('<title>' + meta_array['Title'] + '</title>\n')
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
svgindex += '</head>\n' slst.append('</head>\n')
svgindex += '<body>\n' slst.append('<body>\n')
print "Building svg images of each book page" print "Building svg images of each book page"
svgindex += '<h2>List of Pages</h2>\n' slst.append('<h2>List of Pages</h2>\n')
svgindex += '<div>\n' slst.append('<div>\n')
idlst = sorted(pageIDMap.keys()) idlst = sorted(pageIDMap.keys())
numids = len(idlst) numids = len(idlst)
cnt = len(idlst) cnt = len(idlst)
@ -566,49 +583,54 @@ def generateBook(bookDir, raw, fixedimage):
nextid = None nextid = None
print '.', print '.',
pagelst = pageIDMap[pageid] pagelst = pageIDMap[pageid]
flat_svg = '' flst = []
for page in pagelst: for page in pagelst:
flat_svg += xmllst[page] flst.append(xmllst[page])
flat_svg = "".join(flst)
flst=None
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi) svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
if (raw) : if (raw) :
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w') pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid) slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
else : else :
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w') pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid) slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
previd = pageid previd = pageid
pfile.write(svgxml) pfile.write(svgxml)
pfile.close() pfile.close()
counter += 1 counter += 1
svgindex += '</div>\n' slst.append('</div>\n')
svgindex += '<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n' slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
svgindex += '</body>\n</html>\n' slst.append('</body>\n</html>\n')
svgindex = "".join(slst)
slst = None
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex) file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
print " " print " "
# build the opf file # build the opf file
opfname = os.path.join(bookDir, 'book.opf') opfname = os.path.join(bookDir, 'book.opf')
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n' olst = []
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n' olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
# adding metadata # adding metadata
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n' olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
if 'oASIN' in meta_array: if 'oASIN' in meta_array:
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n' olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n' olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
opfstr += ' <dc:language>en</dc:language>\n' olst.append(' <dc:language>en</dc:language>\n')
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n' olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
if isCover: if isCover:
opfstr += ' <meta name="cover" content="bookcover"/>\n' olst.append(' <meta name="cover" content="bookcover"/>\n')
opfstr += ' </metadata>\n' olst.append(' </metadata>\n')
opfstr += '<manifest>\n' olst.append('<manifest>\n')
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n' olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n' olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
# adding image files to manifest # adding image files to manifest
filenames = os.listdir(imgDir) filenames = os.listdir(imgDir)
filenames = sorted(filenames) filenames = sorted(filenames)
@ -618,17 +640,19 @@ def generateBook(bookDir, raw, fixedimage):
imgext = 'jpeg' imgext = 'jpeg'
if imgext == '.svg': if imgext == '.svg':
imgext = 'svg+xml' imgext = 'svg+xml'
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n' olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
if isCover: if isCover:
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n' olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
opfstr += '</manifest>\n' olst.append('</manifest>\n')
# adding spine # adding spine
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n' olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
if isCover: if isCover:
opfstr += ' <guide>\n' olst.append(' <guide>\n')
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n' olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
opfstr += ' </guide>\n' olst.append(' </guide>\n')
opfstr += '</package>\n' olst.append('</package>\n')
opfstr = "".join(olst)
olst = None
file(opfname, 'wb').write(opfstr) file(opfname, 'wb').write(opfstr)
print 'Processing Complete' print 'Processing Complete'
@ -649,7 +673,6 @@ def usage():
def main(argv): def main(argv):
bookDir = '' bookDir = ''
if len(argv) == 0: if len(argv) == 0:
argv = sys.argv argv = sys.argv
@ -663,7 +686,7 @@ def main(argv):
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
return 1 return 1
raw = 0 raw = 0
fixedimage = True fixedimage = True

View file

@ -14,7 +14,7 @@ from __future__ import with_statement
# 2 - Added OS X support by using OpenSSL when available # 2 - Added OS X support by using OpenSSL when available
# 3 - screen out improper key lengths to prevent segfaults on Linux # 3 - screen out improper key lengths to prevent segfaults on Linux
# 3.1 - Allow Windows versions of libcrypto to be found # 3.1 - Allow Windows versions of libcrypto to be found
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml # 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
# 3.3 - On Windows try PyCrypto first and OpenSSL next # 3.3 - On Windows try PyCrypto first and OpenSSL next
# 3.4 - Modify interace to allow use with import # 3.4 - Modify interace to allow use with import
@ -50,7 +50,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -58,13 +58,13 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
@ -73,7 +73,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey): def __init__(self, userkey):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -84,7 +84,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES key') raise IGNOBLEError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -122,7 +122,7 @@ def _load_crypto():
AES = _load_crypto() AES = _load_crypto()
""" """
Decrypt Barnes & Noble ADEPT encrypted EPUB books. Decrypt Barnes & Noble ADEPT encrypted EPUB books.

View file

@ -53,7 +53,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -61,28 +61,28 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key', AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey, iv): def __init__(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
self._iv = iv self._iv = iv
key = self._key = AES_KEY() key = self._key = AES_KEY()
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES Encrypt key') raise IGNOBLEError('Failed to initialize AES Encrypt key')
def encrypt(self, data): def encrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1) rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
if rv == 0: if rv == 0:

View file

@ -67,25 +67,25 @@ def _load_crypto_libcrypto():
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey', d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey',
[RSA_p, c_char_pp, c_long]) [RSA_p, c_char_pp, c_long])
RSA_size = F(c_int, 'RSA_size', [RSA_p]) RSA_size = F(c_int, 'RSA_size', [RSA_p])
@ -97,7 +97,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class RSA(object): class RSA(object):
def __init__(self, der): def __init__(self, der):
buf = create_string_buffer(der) buf = create_string_buffer(der)
@ -105,7 +105,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -114,7 +114,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[:dlen] return to[:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -130,7 +130,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise ADEPTError('Failed to initialize AES key') raise ADEPTError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -148,13 +148,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -164,22 +164,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -189,19 +189,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -209,13 +209,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -224,7 +224,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -252,7 +252,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)

View file

@ -76,13 +76,13 @@ if sys.platform.startswith('win'):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key', AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
@ -427,8 +427,8 @@ def extractKeyfile(keypath):
print "Key generation Error: " + str(e) print "Key generation Error: " + str(e)
return 1 return 1
except Exception, e: except Exception, e:
print "General Error: " + str(e) print "General Error: " + str(e)
return 1 return 1
if not success: if not success:
return 1 return 1
return 0 return 0

View file

@ -4,7 +4,7 @@
from __future__ import with_statement from __future__ import with_statement
# To run this program install Python 2.6 from http://www.python.org/download/ # To run this program install Python 2.6 from http://www.python.org/download/
# and OpenSSL (already installed on Mac OS X and Linux) OR # and OpenSSL (already installed on Mac OS X and Linux) OR
# PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto # PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# (make sure to install the version for Python 2.6). Save this script file as # (make sure to install the version for Python 2.6). Save this script file as
# ineptpdf.pyw and double-click on it to run it. # ineptpdf.pyw and double-click on it to run it.
@ -83,7 +83,7 @@ def _load_crypto_libcrypto():
AES_MAXNR = 14 AES_MAXNR = 14
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -98,13 +98,13 @@ def _load_crypto_libcrypto():
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
@ -125,7 +125,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -134,7 +134,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[1:dlen] return to[1:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -196,13 +196,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -212,22 +212,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -237,19 +237,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -257,13 +257,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -272,7 +272,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -293,6 +293,7 @@ def _load_crypto_pycrypto():
return self._arc4.decrypt(data) return self._arc4.decrypt(data)
class AES(object): class AES(object):
MODE_CBC = _AES.MODE_CBC
@classmethod @classmethod
def new(cls, userkey, mode, iv): def new(cls, userkey, mode, iv):
self = AES() self = AES()
@ -315,7 +316,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)
@ -410,7 +411,7 @@ class PSLiteral(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
name = [] name = []
for char in self.name: for char in self.name:
@ -429,22 +430,22 @@ class PSKeyword(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
return self.name return self.name
# PSSymbolTable # PSSymbolTable
class PSSymbolTable(object): class PSSymbolTable(object):
''' '''
Symbol table that stores PSLiteral or PSKeyword. Symbol table that stores PSLiteral or PSKeyword.
''' '''
def __init__(self, classe): def __init__(self, classe):
self.dic = {} self.dic = {}
self.classe = classe self.classe = classe
return return
def intern(self, name): def intern(self, name):
if name in self.dic: if name in self.dic:
lit = self.dic[name] lit = self.dic[name]
@ -514,11 +515,11 @@ class PSBaseParser(object):
def flush(self): def flush(self):
return return
def close(self): def close(self):
self.flush() self.flush()
return return
def tell(self): def tell(self):
return self.bufpos+self.charpos return self.bufpos+self.charpos
@ -554,7 +555,7 @@ class PSBaseParser(object):
raise PSEOF('Unexpected EOF') raise PSEOF('Unexpected EOF')
self.charpos = 0 self.charpos = 0
return return
def parse_main(self, s, i): def parse_main(self, s, i):
m = NONSPC.search(s, i) m = NONSPC.search(s, i)
if not m: if not m:
@ -589,11 +590,11 @@ class PSBaseParser(object):
return (self.parse_wclose, j+1) return (self.parse_wclose, j+1)
self.add_token(KWD(c)) self.add_token(KWD(c))
return (self.parse_main, j+1) return (self.parse_main, j+1)
def add_token(self, obj): def add_token(self, obj):
self.tokens.append((self.tokenstart, obj)) self.tokens.append((self.tokenstart, obj))
return return
def parse_comment(self, s, i): def parse_comment(self, s, i):
m = EOL.search(s, i) m = EOL.search(s, i)
if not m: if not m:
@ -604,7 +605,7 @@ class PSBaseParser(object):
# We ignore comments. # We ignore comments.
#self.tokens.append(self.token) #self.tokens.append(self.token)
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal(self, s, i): def parse_literal(self, s, i):
m = END_LITERAL.search(s, i) m = END_LITERAL.search(s, i)
if not m: if not m:
@ -618,7 +619,7 @@ class PSBaseParser(object):
return (self.parse_literal_hex, j+1) return (self.parse_literal_hex, j+1)
self.add_token(LIT(self.token)) self.add_token(LIT(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal_hex(self, s, i): def parse_literal_hex(self, s, i):
c = s[i] c = s[i]
if HEX.match(c) and len(self.hex) < 2: if HEX.match(c) and len(self.hex) < 2:
@ -653,7 +654,7 @@ class PSBaseParser(object):
self.token += s[i:j] self.token += s[i:j]
self.add_token(float(self.token)) self.add_token(float(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_keyword(self, s, i): def parse_keyword(self, s, i):
m = END_KEYWORD.search(s, i) m = END_KEYWORD.search(s, i)
if not m: if not m:
@ -801,7 +802,7 @@ class PSStackParser(PSBaseParser):
PSBaseParser.__init__(self, fp) PSBaseParser.__init__(self, fp)
self.reset() self.reset()
return return
def reset(self): def reset(self):
self.context = [] self.context = []
self.curtype = None self.curtype = None
@ -842,10 +843,10 @@ class PSStackParser(PSBaseParser):
def do_keyword(self, pos, token): def do_keyword(self, pos, token):
return return
def nextobject(self, direct=False): def nextobject(self, direct=False):
''' '''
Yields a list of objects: keywords, literals, strings, Yields a list of objects: keywords, literals, strings,
numbers, arrays and dictionaries. Arrays and dictionaries numbers, arrays and dictionaries. Arrays and dictionaries
are represented as Python sequence and dictionaries. are represented as Python sequence and dictionaries.
''' '''
@ -914,7 +915,7 @@ class PDFNotImplementedError(PSException): pass
## PDFObjRef ## PDFObjRef
## ##
class PDFObjRef(PDFObject): class PDFObjRef(PDFObject):
def __init__(self, doc, objid, genno): def __init__(self, doc, objid, genno):
if objid == 0: if objid == 0:
if STRICT: if STRICT:
@ -1029,25 +1030,25 @@ def stream_value(x):
# ascii85decode(data) # ascii85decode(data)
def ascii85decode(data): def ascii85decode(data):
n = b = 0 n = b = 0
out = '' out = ''
for c in data: for c in data:
if '!' <= c and c <= 'u': if '!' <= c and c <= 'u':
n += 1 n += 1
b = b*85+(ord(c)-33) b = b*85+(ord(c)-33)
if n == 5: if n == 5:
out += struct.pack('>L',b) out += struct.pack('>L',b)
n = b = 0 n = b = 0
elif c == 'z': elif c == 'z':
assert n == 0 assert n == 0
out += '\0\0\0\0' out += '\0\0\0\0'
elif c == '~': elif c == '~':
if n: if n:
for _ in range(5-n): for _ in range(5-n):
b = b*85+84 b = b*85+84
out += struct.pack('>L',b)[:n-1] out += struct.pack('>L',b)[:n-1]
break break
return out return out
## PDFStream type ## PDFStream type
@ -1064,7 +1065,7 @@ class PDFStream(PDFObject):
else: else:
if eol in ('\r', '\n', '\r\n'): if eol in ('\r', '\n', '\r\n'):
rawdata = rawdata[:length] rawdata = rawdata[:length]
self.dic = dic self.dic = dic
self.rawdata = rawdata self.rawdata = rawdata
self.decipher = decipher self.decipher = decipher
@ -1078,7 +1079,7 @@ class PDFStream(PDFObject):
self.objid = objid self.objid = objid
self.genno = genno self.genno = genno
return return
def __repr__(self): def __repr__(self):
if self.rawdata: if self.rawdata:
return '<PDFStream(%r): raw=%d, %r>' % \ return '<PDFStream(%r): raw=%d, %r>' % \
@ -1162,7 +1163,7 @@ class PDFStream(PDFObject):
data = self.decipher(self.objid, self.genno, data) data = self.decipher(self.objid, self.genno, data)
return data return data
## PDF Exceptions ## PDF Exceptions
## ##
class PDFSyntaxError(PDFException): pass class PDFSyntaxError(PDFException): pass
@ -1227,7 +1228,7 @@ class PDFXRef(object):
self.offsets[objid] = (int(genno), int(pos)) self.offsets[objid] = (int(genno), int(pos))
self.load_trailer(parser) self.load_trailer(parser)
return return
KEYWORD_TRAILER = PSKeywordTable.intern('trailer') KEYWORD_TRAILER = PSKeywordTable.intern('trailer')
def load_trailer(self, parser): def load_trailer(self, parser):
try: try:
@ -1268,7 +1269,7 @@ class PDFXRefStream(object):
for first, size in self.index: for first, size in self.index:
for objid in xrange(first, first + size): for objid in xrange(first, first + size):
yield objid yield objid
def load(self, parser, debug=0): def load(self, parser, debug=0):
(_,objid) = parser.nexttoken() # ignored (_,objid) = parser.nexttoken() # ignored
(_,genno) = parser.nexttoken() # ignored (_,genno) = parser.nexttoken() # ignored
@ -1286,7 +1287,7 @@ class PDFXRefStream(object):
self.entlen = self.fl1+self.fl2+self.fl3 self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.dic self.trailer = stream.dic
return return
def getpos(self, objid): def getpos(self, objid):
offset = 0 offset = 0
for first, size in self.index: for first, size in self.index:
@ -1337,7 +1338,7 @@ class PDFDocument(object):
self.parser = parser self.parser = parser
# The document is set to be temporarily ready during collecting # The document is set to be temporarily ready during collecting
# all the basic information about the document, e.g. # all the basic information about the document, e.g.
# the header, the encryption information, and the access rights # the header, the encryption information, and the access rights
# for the document. # for the document.
self.ready = True self.ready = True
# Retrieve the information of each header that was appended # Retrieve the information of each header that was appended
@ -1413,7 +1414,7 @@ class PDFDocument(object):
length = int_value(param.get('Length', 0)) / 8 length = int_value(param.get('Length', 0)) / 8
edcdata = str_value(param.get('EDCData')).decode('base64') edcdata = str_value(param.get('EDCData')).decode('base64')
pdrllic = str_value(param.get('PDRLLic')).decode('base64') pdrllic = str_value(param.get('PDRLLic')).decode('base64')
pdrlpol = str_value(param.get('PDRLPol')).decode('base64') pdrlpol = str_value(param.get('PDRLPol')).decode('base64')
edclist = [] edclist = []
for pair in edcdata.split('\n'): for pair in edcdata.split('\n'):
edclist.append(pair) edclist.append(pair)
@ -1433,9 +1434,9 @@ class PDFDocument(object):
raise ADEPTError('Could not decrypt PDRLPol, aborting ...') raise ADEPTError('Could not decrypt PDRLPol, aborting ...')
else: else:
cutter = -1 * ord(pdrlpol[-1]) cutter = -1 * ord(pdrlpol[-1])
pdrlpol = pdrlpol[:cutter] pdrlpol = pdrlpol[:cutter]
return plaintext[:16] return plaintext[:16]
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \ PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \
'\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz' '\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
# experimental aes pw support # experimental aes pw support
@ -1455,14 +1456,14 @@ class PDFDocument(object):
EncMetadata = str_value(param['EncryptMetadata']) EncMetadata = str_value(param['EncryptMetadata'])
except: except:
EncMetadata = 'True' EncMetadata = 'True'
self.is_printable = bool(P & 4) self.is_printable = bool(P & 4)
self.is_modifiable = bool(P & 8) self.is_modifiable = bool(P & 8)
self.is_extractable = bool(P & 16) self.is_extractable = bool(P & 16)
self.is_annotationable = bool(P & 32) self.is_annotationable = bool(P & 32)
self.is_formsenabled = bool(P & 256) self.is_formsenabled = bool(P & 256)
self.is_textextractable = bool(P & 512) self.is_textextractable = bool(P & 512)
self.is_assemblable = bool(P & 1024) self.is_assemblable = bool(P & 1024)
self.is_formprintable = bool(P & 2048) self.is_formprintable = bool(P & 2048)
# Algorithm 3.2 # Algorithm 3.2
password = (password+self.PASSWORD_PADDING)[:32] # 1 password = (password+self.PASSWORD_PADDING)[:32] # 1
hash = hashlib.md5(password) # 2 hash = hashlib.md5(password) # 2
@ -1537,10 +1538,10 @@ class PDFDocument(object):
if length > 0: if length > 0:
if len(bookkey) == length: if len(bookkey) == length:
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
elif len(bookkey) == length + 1: elif len(bookkey) == length + 1:
V = ord(bookkey[0]) V = ord(bookkey[0])
bookkey = bookkey[1:] bookkey = bookkey[1:]
else: else:
@ -1554,7 +1555,7 @@ class PDFDocument(object):
print "length is %d and len(bookkey) is %d" % (length, len(bookkey)) print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
print "bookkey[0] is %d" % ord(bookkey[0]) print "bookkey[0] is %d" % ord(bookkey[0])
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
self.decrypt_key = bookkey self.decrypt_key = bookkey
@ -1571,7 +1572,7 @@ class PDFDocument(object):
hash = hashlib.md5(key) hash = hashlib.md5(key)
key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)] key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)]
return key return key
def genkey_v3(self, objid, genno): def genkey_v3(self, objid, genno):
objid = struct.pack('<L', objid ^ 0x3569ac) objid = struct.pack('<L', objid ^ 0x3569ac)
genno = struct.pack('<L', genno ^ 0xca96) genno = struct.pack('<L', genno ^ 0xca96)
@ -1611,14 +1612,14 @@ class PDFDocument(object):
#print cutter #print cutter
plaintext = plaintext[:cutter] plaintext = plaintext[:cutter]
return plaintext return plaintext
def decrypt_rc4(self, objid, genno, data): def decrypt_rc4(self, objid, genno, data):
key = self.genkey(objid, genno) key = self.genkey(objid, genno)
return ARC4.new(key).decrypt(data) return ARC4.new(key).decrypt(data)
KEYWORD_OBJ = PSKeywordTable.intern('obj') KEYWORD_OBJ = PSKeywordTable.intern('obj')
def getobj(self, objid): def getobj(self, objid):
if not self.ready: if not self.ready:
raise PDFException('PDFDocument not initialized') raise PDFException('PDFDocument not initialized')
@ -1688,7 +1689,7 @@ class PDFDocument(object):
## if x: ## if x:
## objid1 = x[-2] ## objid1 = x[-2]
## genno = x[-1] ## genno = x[-1]
## ##
if kwd is not self.KEYWORD_OBJ: if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError( raise PDFSyntaxError(
'Invalid object spec: offset=%r' % index) 'Invalid object spec: offset=%r' % index)
@ -1700,7 +1701,7 @@ class PDFDocument(object):
self.objs[objid] = obj self.objs[objid] = obj
return obj return obj
class PDFObjStmRef(object): class PDFObjStmRef(object):
maxindex = 0 maxindex = 0
def __init__(self, objid, stmid, index): def __init__(self, objid, stmid, index):
@ -1710,7 +1711,7 @@ class PDFObjStmRef(object):
if index > PDFObjStmRef.maxindex: if index > PDFObjStmRef.maxindex:
PDFObjStmRef.maxindex = index PDFObjStmRef.maxindex = index
## PDFParser ## PDFParser
## ##
class PDFParser(PSStackParser): class PDFParser(PSStackParser):
@ -1736,7 +1737,7 @@ class PDFParser(PSStackParser):
if token is self.KEYWORD_ENDOBJ: if token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4)) self.add_results(*self.pop(4))
return return
if token is self.KEYWORD_R: if token is self.KEYWORD_R:
# reference to indirect object # reference to indirect object
try: try:
@ -1747,7 +1748,7 @@ class PDFParser(PSStackParser):
except PSSyntaxError: except PSSyntaxError:
pass pass
return return
if token is self.KEYWORD_STREAM: if token is self.KEYWORD_STREAM:
# stream object # stream object
((_,dic),) = self.pop(1) ((_,dic),) = self.pop(1)
@ -1787,7 +1788,7 @@ class PDFParser(PSStackParser):
obj = PDFStream(dic, data, self.doc.decipher) obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj)) self.push((pos, obj))
return return
# others # others
self.push((pos, token)) self.push((pos, token))
return return
@ -1823,7 +1824,7 @@ class PDFParser(PSStackParser):
xref.load(self) xref.load(self)
else: else:
if token is not self.KEYWORD_XREF: if token is not self.KEYWORD_XREF:
raise PDFNoValidXRef('xref not found: pos=%d, token=%r' % raise PDFNoValidXRef('xref not found: pos=%d, token=%r' %
(pos, token)) (pos, token))
self.nextline() self.nextline()
xref = PDFXRef() xref = PDFXRef()
@ -1838,7 +1839,7 @@ class PDFParser(PSStackParser):
pos = int_value(trailer['Prev']) pos = int_value(trailer['Prev'])
self.read_xref_from(pos, xrefs) self.read_xref_from(pos, xrefs)
return return
# read xref tables and trailers # read xref tables and trailers
def read_xref(self): def read_xref(self):
xrefs = [] xrefs = []
@ -1957,7 +1958,7 @@ class PDFSerializer(object):
self.write("%010d 00000 n \n" % xrefs[objid][0]) self.write("%010d 00000 n \n" % xrefs[objid][0])
else: else:
self.write("%010d %05d f \n" % (0, 65535)) self.write("%010d %05d f \n" % (0, 65535))
self.write('trailer\n') self.write('trailer\n')
self.serialize_object(trailer) self.serialize_object(trailer)
self.write('\nstartxref\n%d\n%%%%EOF' % startxref) self.write('\nstartxref\n%d\n%%%%EOF' % startxref)
@ -1977,7 +1978,7 @@ class PDFSerializer(object):
while maxindex >= power: while maxindex >= power:
fl3 += 1 fl3 += 1
power *= 256 power *= 256
index = [] index = []
first = None first = None
prev = None prev = None
@ -2004,14 +2005,14 @@ class PDFSerializer(object):
# we force all generation numbers to be 0 # we force all generation numbers to be 0
# f3 = objref[1] # f3 = objref[1]
f3 = 0 f3 = 0
data.append(struct.pack('>B', f1)) data.append(struct.pack('>B', f1))
data.append(struct.pack('>L', f2)[-fl2:]) data.append(struct.pack('>L', f2)[-fl2:])
data.append(struct.pack('>L', f3)[-fl3:]) data.append(struct.pack('>L', f3)[-fl3:])
index.extend((first, prev - first + 1)) index.extend((first, prev - first + 1))
data = zlib.compress(''.join(data)) data = zlib.compress(''.join(data))
dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index, dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index,
'W': [1, fl2, fl3], 'Length': len(data), 'W': [1, fl2, fl3], 'Length': len(data),
'Filter': LITERALS_FLATE_DECODE[0], 'Filter': LITERALS_FLATE_DECODE[0],
'Root': trailer['Root'],} 'Root': trailer['Root'],}
if 'Info' in trailer: if 'Info' in trailer:
@ -2033,9 +2034,9 @@ class PDFSerializer(object):
string = string.replace(')', r'\)') string = string.replace(')', r'\)')
# get rid of ciando id # get rid of ciando id
regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}') regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}')
if regularexp.match(string): return ('http://www.ciando.com') if regularexp.match(string): return ('http://www.ciando.com')
return string return string
def serialize_object(self, obj): def serialize_object(self, obj):
if isinstance(obj, dict): if isinstance(obj, dict):
# Correct malformed Mac OS resource forks for Stanza # Correct malformed Mac OS resource forks for Stanza
@ -2059,21 +2060,21 @@ class PDFSerializer(object):
elif isinstance(obj, bool): elif isinstance(obj, bool):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj).lower()) self.write(str(obj).lower())
elif isinstance(obj, (int, long, float)): elif isinstance(obj, (int, long, float)):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj)) self.write(str(obj))
elif isinstance(obj, PDFObjRef): elif isinstance(obj, PDFObjRef):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write('%d %d R' % (obj.objid, 0)) self.write('%d %d R' % (obj.objid, 0))
elif isinstance(obj, PDFStream): elif isinstance(obj, PDFStream):
### If we don't generate cross ref streams the object streams ### If we don't generate cross ref streams the object streams
### are no longer useful, as we have extracted all objects from ### are no longer useful, as we have extracted all objects from
### them. Therefore leave them out from the output. ### them. Therefore leave them out from the output.
if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm: if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm:
self.write('(deleted)') self.write('(deleted)')
else: else:
data = obj.get_decdata() data = obj.get_decdata()
self.serialize_object(obj.dic) self.serialize_object(obj.dic)
@ -2085,7 +2086,7 @@ class PDFSerializer(object):
if data[0].isalnum() and self.last.isalnum(): if data[0].isalnum() and self.last.isalnum():
self.write(' ') self.write(' ')
self.write(data) self.write(data)
def serialize_indirect(self, objid, obj): def serialize_indirect(self, objid, obj):
self.write('%d 0 obj' % (objid,)) self.write('%d 0 obj' % (objid,))
self.serialize_object(obj) self.serialize_object(obj)
@ -2097,7 +2098,7 @@ class PDFSerializer(object):
class DecryptionDialog(Tkinter.Frame): class DecryptionDialog(Tkinter.Frame):
def __init__(self, root): def __init__(self, root):
Tkinter.Frame.__init__(self, root, border=5) Tkinter.Frame.__init__(self, root, border=5)
ltext='Select file for decryption\n' ltext='Select file for decryption\n'
self.status = Tkinter.Label(self, text=ltext) self.status = Tkinter.Label(self, text=ltext)
self.status.pack(fill=Tkconstants.X, expand=1) self.status.pack(fill=Tkconstants.X, expand=1)
body = Tkinter.Frame(self) body = Tkinter.Frame(self)
@ -2123,7 +2124,7 @@ class DecryptionDialog(Tkinter.Frame):
button.grid(row=2, column=2) button.grid(row=2, column=2)
buttons = Tkinter.Frame(self) buttons = Tkinter.Frame(self)
buttons.pack() buttons.pack()
botton = Tkinter.Button( botton = Tkinter.Button(
buttons, text="Decrypt", width=10, command=self.decrypt) buttons, text="Decrypt", width=10, command=self.decrypt)
@ -2132,7 +2133,7 @@ class DecryptionDialog(Tkinter.Frame):
button = Tkinter.Button( button = Tkinter.Button(
buttons, text="Quit", width=10, command=self.quit) buttons, text="Quit", width=10, command=self.quit)
button.pack(side=Tkconstants.RIGHT) button.pack(side=Tkconstants.RIGHT)
def get_keypath(self): def get_keypath(self):
keypath = tkFileDialog.askopenfilename( keypath = tkFileDialog.askopenfilename(

View file

@ -5,19 +5,19 @@ from __future__ import with_statement
# engine to remove drm from Kindle for Mac and Kindle for PC books # engine to remove drm from Kindle for Mac and Kindle for PC books
# for personal use for archiving and converting your ebooks # for personal use for archiving and converting your ebooks
# PLEASE DO NOT PIRATE EBOOKS! # PLEASE DO NOT PIRATE EBOOKS!
# We want all authors and publishers, and eBook stores to live # We want all authors and publishers, and eBook stores to live
# long and prosperous lives but at the same time we just want to # long and prosperous lives but at the same time we just want to
# be able to read OUR books on whatever device we want and to keep # be able to read OUR books on whatever device we want and to keep
# readable for a long, long time # readable for a long, long time
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle, # This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates # unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
# and many many others # and many many others
__version__ = '3.9' __version__ = '4.0'
class Unbuffered: class Unbuffered:
def __init__(self, stream): def __init__(self, stream):
@ -34,6 +34,8 @@ import string
import re import re
import traceback import traceback
buildXML = False
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -50,7 +52,7 @@ else:
import mobidedrm import mobidedrm
import topazextract import topazextract
import kgenpids import kgenpids
# cleanup bytestring filenames # cleanup bytestring filenames
# borrowed from calibre from calibre/src/calibre/__init__.py # borrowed from calibre from calibre/src/calibre/__init__.py
@ -75,6 +77,8 @@ def cleanup_name(name):
return one return one
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids): def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
global buildXML
# handle the obvious cases at the beginning # handle the obvious cases at the beginning
if not os.path.isfile(infile): if not os.path.isfile(infile):
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist" print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
@ -100,14 +104,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
outfilename = outfilename + "_" + filenametitle outfilename = outfilename + "_" + filenametitle
elif outfilename[:8] != filenametitle[:8]: elif outfilename[:8] != filenametitle[:8]:
outfilename = outfilename[:8] + "_" + filenametitle outfilename = outfilename[:8] + "_" + filenametitle
# avoid excessively long file names # avoid excessively long file names
if len(outfilename)>150: if len(outfilename)>150:
outfilename = outfilename[:150] outfilename = outfilename[:150]
# build pid list # build pid list
md1, md2 = mb.getPIDMetaInfo() md1, md2 = mb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
try: try:
mb.processBook(pidlst) mb.processBook(pidlst)
@ -128,9 +132,9 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
else: else:
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi') outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
mb.getMobiFile(outfile) mb.getMobiFile(outfile)
return 0 return 0
# topaz: # topaz:
print " Creating NoDRM HTMLZ Archive" print " Creating NoDRM HTMLZ Archive"
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz') zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
mb.getHTMLZip(zipname) mb.getHTMLZip(zipname)
@ -139,9 +143,10 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip') zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
mb.getSVGZip(zipname) mb.getSVGZip(zipname)
print " Creating XML ZIP Archive" if buildXML:
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip') print " Creating XML ZIP Archive"
mb.getXMLZip(zipname) zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
mb.getXMLZip(zipname)
# remove internal temporary directory of Topaz pieces # remove internal temporary directory of Topaz pieces
mb.cleanup() mb.cleanup()
@ -156,7 +161,7 @@ def usage(progname):
# #
# Main # Main
# #
def main(argv=sys.argv): def main(argv=sys.argv):
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
@ -164,9 +169,9 @@ def main(argv=sys.argv):
kInfoFiles = [] kInfoFiles = []
serials = [] serials = []
pids = [] pids = []
print ('K4MobiDeDrm v%(__version__)s ' print ('K4MobiDeDrm v%(__version__)s '
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals()) 'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
@ -177,7 +182,7 @@ def main(argv=sys.argv):
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
@ -195,8 +200,8 @@ def main(argv=sys.argv):
# try with built in Kindle Info files # try with built in Kindle Info files
k4 = True k4 = True
if sys.platform.startswith('linux'): if sys.platform.startswith('linux'):
k4 = False k4 = False
kInfoFiles = None kInfoFiles = None
infile = args[0] infile = args[0]
outdir = args[1] outdir = args[1]
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids) return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
@ -205,4 +210,3 @@ def main(argv=sys.argv):
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -5,7 +5,8 @@ from __future__ import with_statement
import sys import sys
import os import os
import os.path import os.path
import re
import copy
import subprocess import subprocess
from struct import pack, unpack, unpack_from from struct import pack, unpack, unpack_from
@ -24,6 +25,25 @@ def _load_crypto_libcrypto():
raise DrmException('libcrypto not found') raise DrmException('libcrypto not found')
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
# From OpenSSL's crypto aes header
#
# AES_ENCRYPT 1
# AES_DECRYPT 0
# AES_MAXNR 14 (in bytes)
# AES_BLOCK_SIZE 16 (in bytes)
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
# note: the ivec string, and output buffer are mutable
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -31,25 +51,31 @@ def _load_crypto_libcrypto():
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)] _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', # From OpenSSL's Crypto evp/p5_crpt2.c
#
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# int keylen, unsigned char *out);
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
class LibCrypto(object): class LibCrypto(object):
def __init__(self): def __init__(self):
self._blocksize = 0 self._blocksize = 0
self._keyctx = None self._keyctx = None
self.iv = 0 self._iv = 0
def set_decrypt_key(self, userkey, iv): def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -57,14 +83,17 @@ def _load_crypto_libcrypto():
raise DrmException('AES improper key used') raise DrmException('AES improper key used')
return return
keyctx = self._keyctx = AES_KEY() keyctx = self._keyctx = AES_KEY()
self.iv = iv self._iv = iv
self._userkey = userkey
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0: if rv < 0:
raise DrmException('Failed to initialize AES key') raise DrmException('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0) mutable_iv = create_string_buffer(self._iv, len(self._iv))
keyctx = self._keyctx
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
if rv == 0: if rv == 0:
raise DrmException('AES decryption failed') raise DrmException('AES decryption failed')
return out.raw return out.raw
@ -111,13 +140,17 @@ def SHA256(message):
# Various character maps used to decrypt books. Probably supposed to act as obfuscation # Various character maps used to decrypt books. Probably supposed to act as obfuscation
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M" charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM" charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
# For kinf approach of K4PC/K4Mac # For kinf approach of K4Mac 1.6.X or later
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" # On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# For Mac they seem to re-use charMap2 here # For Mac they seem to re-use charMap2 here
charMap5 = charMap2 charMap5 = charMap2
# new in K4M 1.9.X
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
def encode(data, map): def encode(data, map):
result = "" result = ""
for char in data: for char in data:
@ -144,7 +177,7 @@ def decode(data,map):
result += pack("B",value) result += pack("B",value)
return result return result
# For .kinf approach of K4PC and now K4Mac # For K4M 1.6.X and later
# generate table of prime number less than or equal to int n # generate table of prime number less than or equal to int n
def primes(n): def primes(n):
if n==2: return [2] if n==2: return [2]
@ -271,7 +304,7 @@ def GetDiskPartitionUUID(diskpart):
if not foundIt: if not foundIt:
uuidnum = '' uuidnum = ''
return uuidnum return uuidnum
def GetMACAddressMunged(): def GetMACAddressMunged():
macnum = os.getenv('MYMACNUM') macnum = os.getenv('MYMACNUM')
if macnum != None: if macnum != None:
@ -315,33 +348,11 @@ def GetMACAddressMunged():
return macnum return macnum
# uses unix env to get username instead of using sysctlbyname # uses unix env to get username instead of using sysctlbyname
def GetUserName(): def GetUserName():
username = os.getenv('USER') username = os.getenv('USER')
return username return username
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
def CryptUnprotectData(encryptedData):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
iter = 0x3e8
keylen = 0x80
crp = LibCrypto()
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
def isNewInstall(): def isNewInstall():
home = os.getenv('HOME') home = os.getenv('HOME')
# soccer game fan anyone # soccer game fan anyone
@ -350,7 +361,7 @@ def isNewInstall():
if os.path.exists(dpath): if os.path.exists(dpath):
return True return True
return False return False
def GetIDString(): def GetIDString():
# K4Mac now has an extensive set of ids strings it uses # K4Mac now has an extensive set of ids strings it uses
@ -359,13 +370,13 @@ def GetIDString():
# BUT Amazon has now become nasty enough to detect when its app # BUT Amazon has now become nasty enough to detect when its app
# is being run under a debugger and actually changes code paths # is being run under a debugger and actually changes code paths
# including which one of these strings is chosen, all to try # including which one of these strings is chosen, all to try
# to prevent reverse engineering # to prevent reverse engineering
# Sad really ... they will only hurt their own sales ... # Sad really ... they will only hurt their own sales ...
# true book lovers really want to keep their books forever # true book lovers really want to keep their books forever
# and move them to their devices and DRM prevents that so they # and move them to their devices and DRM prevents that so they
# will just buy from someplace else that they can remove # will just buy from someplace else that they can remove
# the DRM from # the DRM from
# Amazon should know by now that true book lover's are not like # Amazon should know by now that true book lover's are not like
@ -388,27 +399,91 @@ def GetIDString():
return '9999999999' return '9999999999'
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
class CryptUnprotectData(object):
def __init__(self):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
self.crp = LibCrypto()
iter = 0x3e8
keylen = 0x80
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine # implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.6.0 # used for Kindle for Mac Versions >= 1.6.0
def CryptUnprotectDataV2(encryptedData): class CryptUnprotectDataV2(object):
sp = GetUserName() + ':&%:' + GetIDString() def __init__(self):
passwdData = encode(SHA256(sp),charMap5) sp = GetUserName() + ':&%:' + GetIDString()
# salt generation as per the code passwdData = encode(SHA256(sp),charMap5)
salt = 0x0512981d * 2 * 1 * 1 # salt generation as per the code
salt = str(salt) + GetUserName() salt = 0x0512981d * 2 * 1 * 1
salt = encode(salt,charMap5) salt = str(salt) + GetUserName()
salt = encode(salt,charMap5)
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext
# unprotect the new header blob in .kinf2011
# used in Kindle for Mac Version >= 1.9.0
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
crp = LibCrypto() crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = crp.keyivgen(passwdData, salt, iter, keylen) key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32] key = key_iv[0:32]
iv = key_iv[32:48] iv = key_iv[32:48]
crp.set_decrypt_key(key,iv) crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData) cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.9.0
class CryptUnprotectDataV3(object):
def __init__(self, entropy):
sp = GetUserName() + '+@#$%+' + GetIDString()
passwdData = encode(SHA256(sp),charMap2)
salt = entropy
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap2)
return cleartext
# Locate the .kindle-info files # Locate the .kindle-info files
def getKindleInfoFiles(kInfoFiles): def getKindleInfoFiles(kInfoFiles):
# first search for current .kindle-info files # first search for current .kindle-info files
@ -424,12 +499,22 @@ def getKindleInfoFiles(kInfoFiles):
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
found = True found = True
# add any .kinf files # add any .rainier*-kinf files
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"' cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
cmdline = cmdline.encode(sys.getfilesystemencoding()) cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate() out1, out2 = p1.communicate()
reslst = out1.split('\n') reslst = out1.split('\n')
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
# add any .kinf2011 files
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
for resline in reslst: for resline in reslst:
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
@ -438,7 +523,7 @@ def getKindleInfoFiles(kInfoFiles):
print('No kindle-info files have been found.') print('No kindle-info files have been found.')
return kInfoFiles return kInfoFiles
# determine type of kindle info provided and return a # determine type of kindle info provided and return a
# database of keynames and values # database of keynames and values
def getDBfromFile(kInfoFile): def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"] names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
@ -449,7 +534,9 @@ def getDBfromFile(kInfoFile):
data = infoReader.read() data = infoReader.read()
if data.find('[') != -1 : if data.find('[') != -1 :
# older style kindle-info file # older style kindle-info file
cud = CryptUnprotectData()
items = data.split('[') items = data.split('[')
for item in items: for item in items:
if item != '': if item != '':
@ -462,87 +549,175 @@ def getDBfromFile(kInfoFile):
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
encryptedValue = decode(rawdata,charMap2) encryptedValue = decode(rawdata,charMap2)
cleartext = CryptUnprotectData(encryptedValue) cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1
if cnt == 0: if cnt == 0:
DB = None DB = None
return DB return DB
# else newer style .kinf file used by K4Mac >= 1.6.0 if hdr == '/':
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split # else newer style .kinf file used by K4Mac >= 1.6.0
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
cud = CryptUnprotectDataV2()
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
# "entropy" not used for K4Mac only K4PC
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# the latest .kinf2011 version for K4M 1.9.1
# put back the hdr char, it is needed
data = hdr + data
data = data[:-1] data = data[:-1]
items = data.split('/') items = data.split('/')
# the headerblob is the encrypted information needed to build the entropy string
headerblob = items.pop(0)
encryptedValue = decode(headerblob, charMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces in the same way
# this version is different from K4PC it scales the build number by multipying by 735
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
cud = CryptUnprotectDataV3(entropy)
# loop through the item records until all are processed # loop through the item records until all are processed
while len(items) > 0: while len(items) > 0:
# get the first item record # get the first item record
item = items.pop(0) item = items.pop(0)
# the first 32 chars of the first record of a group # the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5 # is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32] keyhash = item[0:32]
keyname = "unknown" keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual # unlike K4PC the keyhash is not used in generating entropy
# CryptProtectData Blob that represents that keys contents # entropy = SHA1(keyhash) + added_entropy
# "entropy" not used for K4Mac only K4PC # entropy = added_entropy
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation # has the ':' split char followed by the string representation
# of the number of records that follow # of the number of records that follow
# and make up the contents # and make up the contents
srcnt = decode(item[34:],charMap5) srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt) rcnt = int(srcnt)
# read and store in rcnt records of data # read and store in rcnt records of data
# that make up the contents value # that make up the contents value
edlst = [] edlst = []
for i in xrange(rcnt): for i in xrange(rcnt):
item = items.pop(0) item = items.pop(0)
edlst.append(item) edlst.append(item)
keyname = "unknown" keyname = "unknown"
for name in names: for name in names:
if encodeHash(name,charMap5) == keyhash: if encodeHash(name,testMap8) == keyhash:
keyname = name keyname = name
break break
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
# the charMap5 encoded contents data has had a length # the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved # of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from # to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing # working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding. # CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be: # The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3) # len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through) # (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5 # move first offsets chars to end to align for decode by testMap8
encdata = "".join(edlst) encdata = "".join(edlst)
contlen = len(encdata) contlen = len(encdata)
# now properly split and recombine # now properly split and recombine
# by moving noffset chars from the start of the # by moving noffset chars from the start of the
# string to the end of the string # string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1] noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset] pfx = encdata[0:noffset]
encdata = encdata[noffset:] encdata = encdata[noffset:]
encdata = encdata + pfx encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data # decode using testMap8 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5) encryptedValue = decode(encdata,testMap8)
cleartext = CryptUnprotectDataV2(encryptedValue) cleartext = cud.decrypt(encryptedValue)
# Debugging
# print keyname # print keyname
# print cleartext # print cleartext
# print cleartext.encode('hex')
# print
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1

View file

@ -3,7 +3,7 @@
from __future__ import with_statement from __future__ import with_statement
import sys, os import sys, os, re
from struct import pack, unpack, unpack_from from struct import pack, unpack, unpack_from
from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
@ -11,9 +11,7 @@ from ctypes import windll, c_char_p, c_wchar_p, c_uint, POINTER, byref, \
string_at, Structure, c_void_p, cast string_at, Structure, c_void_p, cast
import _winreg as winreg import _winreg as winreg
MAX_PATH = 255 MAX_PATH = 255
kernel32 = windll.kernel32 kernel32 = windll.kernel32
advapi32 = windll.advapi32 advapi32 = windll.advapi32
crypt32 = windll.crypt32 crypt32 = windll.crypt32
@ -33,9 +31,35 @@ def SHA1(message):
ctx.update(message) ctx.update(message)
return ctx.digest() return ctx.digest()
def SHA256(message):
ctx = hashlib.sha256()
ctx.update(message)
return ctx.digest()
# For K4PC 1.9.X
# use routines in alfcrypto:
# AES_cbc_encrypt
# AES_set_decrypt_key
# PKCS5_PBKDF2_HMAC_SHA1
from alfcrypto import AES_CBC, KeyIVGen
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
key_iv = KeyIVGen().pbkdf2(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
aes=AES_CBC()
aes.set_decrypt_key(key, iv)
cleartext = aes.decrypt(encryptedData)
return cleartext
# simple primes table (<= n) calculator # simple primes table (<= n) calculator
def primes(n): def primes(n):
if n==2: return [2] if n==2: return [2]
elif n<2: return [] elif n<2: return []
s=range(3,n+1,2) s=range(3,n+1,2)
@ -59,6 +83,10 @@ def primes(n):
# Probably supposed to act as obfuscation # Probably supposed to act as obfuscation
charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_" charMap2 = "AaZzB0bYyCc1XxDdW2wEeVv3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_"
charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# New maps in K4PC 1.9.0
testMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
testMap6 = "9YzAb0Cd1Ef2n5Pr6St7Uvh3Jk4M8WxG"
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -73,7 +101,7 @@ def encode(data, map):
result += map[Q] result += map[Q]
result += map[R] result += map[R]
return result return result
# Hash the bytes in data and then encode the digest with the characters in map # Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map): def encodeHash(data,map):
return encode(MD5(data),map) return encode(MD5(data),map)
@ -165,7 +193,8 @@ def CryptUnprotectData():
outdata = DataBlob() outdata = DataBlob()
if not _CryptUnprotectData(byref(indata), None, byref(entropy), if not _CryptUnprotectData(byref(indata), None, byref(entropy),
None, None, flags, byref(outdata)): None, None, flags, byref(outdata)):
raise DrmException("Failed to Unprotect Data") # raise DrmException("Failed to Unprotect Data")
return 'failed'
return string_at(outdata.pbData, outdata.cbData) return string_at(outdata.pbData, outdata.cbData)
return CryptUnprotectData return CryptUnprotectData
CryptUnprotectData = CryptUnprotectData() CryptUnprotectData = CryptUnprotectData()
@ -198,10 +227,17 @@ def getKindleInfoFiles(kInfoFiles):
else: else:
kInfoFiles.append(kinfopath) kInfoFiles.append(kinfopath)
# now look for even newer (K4PC 1.9.0 and later) .kinf2011 file
kinfopath = path +'\\Amazon\\Kindle\\storage\\.kinf2011'
if not os.path.isfile(kinfopath):
print('No K4PC 1.9.X .kinf files have not been found.')
else:
kInfoFiles.append(kinfopath)
return kInfoFiles return kInfoFiles
# determine type of kindle info provided and return a # determine type of kindle info provided and return a
# database of keynames and values # database of keynames and values
def getDBfromFile(kInfoFile): def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"] names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
@ -232,12 +268,97 @@ def getDBfromFile(kInfoFile):
DB = None DB = None
return DB return DB
# else newer style .kinf file if hdr == '/':
# else rainier-2-1-1 .kinf file
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
# the raw keyhash string is used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents)-largest prime number <= int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
noffset = contlen - primes(int(contlen/3))[-1]
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using Map5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1)
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# else newest .kinf2011 style .kinf file
# the .kinf file uses "/" to separate it into records # the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split # so remove the trailing "/" to make it easy to use split
data = data[:-1] # need to put back the first char read because it it part
# of the added entropy blob
data = hdr + data[:-1]
items = data.split('/') items = data.split('/')
# starts with and encoded and encrypted header blob
headerblob = items.pop(0)
encryptedValue = decode(headerblob, testMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces that form the added entropy
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
added_entropy = m.group(2) + m.group(4)
# loop through the item records until all are processed # loop through the item records until all are processed
while len(items) > 0: while len(items) > 0:
@ -248,11 +369,11 @@ def getDBfromFile(kInfoFile):
# is the MD5 hash of the key name encoded by charMap5 # is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32] keyhash = item[0:32]
# the raw keyhash string is also used to create entropy for the actual # the sha1 of raw keyhash string is used to create entropy along
# CryptProtectData Blob that represents that keys contents # with the added entropy provided above from the headerblob
entropy = SHA1(keyhash) entropy = SHA1(keyhash) + added_entropy
# the remainder of the first record when decoded with charMap5 # the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation # has the ':' split char followed by the string representation
# of the number of records that follow # of the number of records that follow
# and make up the contents # and make up the contents
@ -266,43 +387,39 @@ def getDBfromFile(kInfoFile):
item = items.pop(0) item = items.pop(0)
edlst.append(item) edlst.append(item)
# key names now use the new testMap8 encoding
keyname = "unknown" keyname = "unknown"
for name in names: for name in names:
if encodeHash(name,charMap5) == keyhash: if encodeHash(name,testMap8) == keyhash:
keyname = name keyname = name
break break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length # the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved # of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from # to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing # working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding. # CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be: # The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3) # len(contents)-largest prime number <= int(len(content)/3)
# (in other words split "about" 2/3rds of the way through) # (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5 # move first offsets chars to end to align for decode by testMap8
# by moving noffset chars from the start of the
# string to the end of the string
encdata = "".join(edlst) encdata = "".join(edlst)
contlen = len(encdata) contlen = len(encdata)
noffset = contlen - primes(int(contlen/3))[-1] noffset = contlen - primes(int(contlen/3))[-1]
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
pfx = encdata[0:noffset] pfx = encdata[0:noffset]
encdata = encdata[noffset:] encdata = encdata[noffset:]
encdata = encdata + pfx encdata = encdata + pfx
# decode using Map5 to get the CryptProtect Data # decode using new testMap8 to get the original CryptProtect Data
encryptedValue = decode(encdata,charMap5) encryptedValue = decode(encdata,testMap8)
DB[keyname] = CryptUnprotectData(encryptedValue, entropy, 1) cleartext = CryptUnprotectData(encryptedValue, entropy, 1)
DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1
if cnt == 0: if cnt == 0:
DB = None DB = None
return DB return DB

View file

@ -62,7 +62,7 @@ def encode(data, map):
result += map[Q] result += map[Q]
result += map[R] result += map[R]
return result return result
# Hash the bytes in data and then encode the digest with the characters in map # Hash the bytes in data and then encode the digest with the characters in map
def encodeHash(data,map): def encodeHash(data,map):
return encode(MD5(data),map) return encode(MD5(data),map)
@ -78,11 +78,11 @@ def decode(data,map):
value = (((high * len(map)) ^ 0x80) & 0xFF) + low value = (((high * len(map)) ^ 0x80) & 0xFF) + low
result += pack("B",value) result += pack("B",value)
return result return result
# #
# PID generation routines # PID generation routines
# #
# Returns two bit at offset from a bit field # Returns two bit at offset from a bit field
def getTwoBitsFromBitField(bitField,offset): def getTwoBitsFromBitField(bitField,offset):
byteNumber = offset // 4 byteNumber = offset // 4
@ -91,10 +91,10 @@ def getTwoBitsFromBitField(bitField,offset):
# Returns the six bits at offset from a bit field # Returns the six bits at offset from a bit field
def getSixBitsFromBitField(bitField,offset): def getSixBitsFromBitField(bitField,offset):
offset *= 3 offset *= 3
value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2) value = (getTwoBitsFromBitField(bitField,offset) <<4) + (getTwoBitsFromBitField(bitField,offset+1) << 2) +getTwoBitsFromBitField(bitField,offset+2)
return value return value
# 8 bits to six bits encoding from hash to generate PID string # 8 bits to six bits encoding from hash to generate PID string
def encodePID(hash): def encodePID(hash):
global charMap3 global charMap3
@ -121,8 +121,8 @@ def generatePidEncryptionTable() :
def generatePidSeed(table,dsn) : def generatePidSeed(table,dsn) :
value = 0 value = 0
for counter in range (0,4) : for counter in range (0,4) :
index = (ord(dsn[counter]) ^ value) &0xFF index = (ord(dsn[counter]) ^ value) &0xFF
value = (value >> 8) ^ table[index] value = (value >> 8) ^ table[index]
return value return value
# Generate the device PID # Generate the device PID
@ -141,7 +141,7 @@ def generateDevicePID(table,dsn,nbRoll):
return pidAscii return pidAscii
def crc32(s): def crc32(s):
return (~binascii.crc32(s,-1))&0xFFFFFFFF return (~binascii.crc32(s,-1))&0xFFFFFFFF
# convert from 8 digit PID to 10 digit PID with checksum # convert from 8 digit PID to 10 digit PID with checksum
def checksumPid(s): def checksumPid(s):
@ -204,10 +204,10 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
print(message) print(message)
kindleDatabase = None kindleDatabase = None
pass pass
if kindleDatabase == None : if kindleDatabase == None :
return pidlst return pidlst
try: try:
# Get the Mazama Random number # Get the Mazama Random number
MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"] MazamaRandomNumber = kindleDatabase["MazamaRandomNumber"]
@ -217,7 +217,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
except KeyError: except KeyError:
print "Keys not found in " + kInfoFile print "Keys not found in " + kInfoFile
return pidlst return pidlst
# Get the ID string used # Get the ID string used
encodedIDString = encodeHash(GetIDString(),charMap1) encodedIDString = encodeHash(GetIDString(),charMap1)
@ -226,7 +226,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
# concat, hash and encode to calculate the DSN # concat, hash and encode to calculate the DSN
DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1) DSN = encode(SHA1(MazamaRandomNumber+encodedIDString+encodedUsername),charMap1)
# Compute the device PID (for which I can tell, is used for nothing). # Compute the device PID (for which I can tell, is used for nothing).
table = generatePidEncryptionTable() table = generatePidEncryptionTable()
devicePID = generateDevicePID(table,DSN,4) devicePID = generateDevicePID(table,DSN,4)
@ -258,7 +258,7 @@ def getK4Pids(pidlst, rec209, token, kInfoFile):
def getPidList(md1, md2, k4, pids, serials, kInfoFiles): def getPidList(md1, md2, k4, pids, serials, kInfoFiles):
pidlst = [] pidlst = []
if kInfoFiles is None: if kInfoFiles is None:
kInfoFiles = [] kInfoFiles = []
if k4: if k4:
kInfoFiles = getKindleInfoFiles(kInfoFiles) kInfoFiles = getKindleInfoFiles(kInfoFiles)
for infoFile in kInfoFiles: for infoFile in kInfoFiles:

View file

@ -27,8 +27,8 @@
# files reveals that a confusion has arisen because trailing data entries # files reveals that a confusion has arisen because trailing data entries
# are not encrypted, but it turns out that the multibyte entries # are not encrypted, but it turns out that the multibyte entries
# in utf8 file are encrypted. (Although neither kind gets compressed.) # in utf8 file are encrypted. (Although neither kind gets compressed.)
# This knowledge leads to a simplification of the test for the # This knowledge leads to a simplification of the test for the
# trailing data byte flags - version 5 and higher AND header size >= 0xE4. # trailing data byte flags - version 5 and higher AND header size >= 0xE4.
# 0.15 - Now outputs 'heartbeat', and is also quicker for long files. # 0.15 - Now outputs 'heartbeat', and is also quicker for long files.
# 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility. # 0.16 - And reverts to 'done' not 'done.' at the end for unswindle compatibility.
# 0.17 - added modifications to support its use as an imported python module # 0.17 - added modifications to support its use as an imported python module
@ -42,7 +42,7 @@
# 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file. # 0.20 - Correction: It seems that multibyte entries are encrypted in a v6 file.
# 0.21 - Added support for multiple pids # 0.21 - Added support for multiple pids
# 0.22 - revised structure to hold MobiBook as a class to allow an extended interface # 0.22 - revised structure to hold MobiBook as a class to allow an extended interface
# 0.23 - fixed problem with older files with no EXTH section # 0.23 - fixed problem with older files with no EXTH section
# 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well # 0.24 - add support for type 1 encryption and 'TEXtREAd' books as well
# 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption # 0.25 - Fixed support for 'BOOKMOBI' type 1 encryption
# 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100% # 0.26 - Now enables Text-To-Speech flag and sets clipping limit to 100%
@ -54,8 +54,10 @@
# 0.30 - Modified interface slightly to work better with new calibre plugin style # 0.30 - Modified interface slightly to work better with new calibre plugin style
# 0.31 - The multibyte encrytion info is true for version 7 files too. # 0.31 - The multibyte encrytion info is true for version 7 files too.
# 0.32 - Added support for "Print Replica" Kindle ebooks # 0.32 - Added support for "Print Replica" Kindle ebooks
# 0.33 - Performance improvements for large files (concatenation)
# 0.34 - Performance improvements in decryption (libalfcrypto)
__version__ = '0.32' __version__ = '0.34'
import sys import sys
@ -72,6 +74,7 @@ sys.stdout=Unbuffered(sys.stdout)
import os import os
import struct import struct
import binascii import binascii
from alfcrypto import Pukall_Cipher
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -83,36 +86,37 @@ class DrmException(Exception):
# Implementation of Pukall Cipher 1 # Implementation of Pukall Cipher 1
def PC1(key, src, decryption=True): def PC1(key, src, decryption=True):
sum1 = 0; return Pukall_Cipher().PC1(key,src,decryption)
sum2 = 0; # sum1 = 0;
keyXorVal = 0; # sum2 = 0;
if len(key)!=16: # keyXorVal = 0;
print "Bad key length!" # if len(key)!=16:
return None # print "Bad key length!"
wkey = [] # return None
for i in xrange(8): # wkey = []
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1])) # for i in xrange(8):
dst = "" # wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
for i in xrange(len(src)): # dst = ""
temp1 = 0; # for i in xrange(len(src)):
byteXorVal = 0; # temp1 = 0;
for j in xrange(8): # byteXorVal = 0;
temp1 ^= wkey[j] # for j in xrange(8):
sum2 = (sum2+j)*20021 + sum1 # temp1 ^= wkey[j]
sum1 = (temp1*346)&0xFFFF # sum2 = (sum2+j)*20021 + sum1
sum2 = (sum2+sum1)&0xFFFF # sum1 = (temp1*346)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF # sum2 = (sum2+sum1)&0xFFFF
byteXorVal ^= temp1 ^ sum2 # temp1 = (temp1*20021+1)&0xFFFF
curByte = ord(src[i]) # byteXorVal ^= temp1 ^ sum2
if not decryption: # curByte = ord(src[i])
keyXorVal = curByte * 257; # if not decryption:
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF # keyXorVal = curByte * 257;
if decryption: # curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
keyXorVal = curByte * 257; # if decryption:
for j in xrange(8): # keyXorVal = curByte * 257;
wkey[j] ^= keyXorVal; # for j in xrange(8):
dst+=chr(curByte) # wkey[j] ^= keyXorVal;
return dst # dst+=chr(curByte)
# return dst
def checksumPid(s): def checksumPid(s):
letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789" letters = "ABCDEFGHIJKLMNPQRSTUVWXYZ123456789"
@ -236,7 +240,7 @@ class MobiBook:
self.meta_array = {} self.meta_array = {}
pass pass
self.print_replica = False self.print_replica = False
def getBookTitle(self): def getBookTitle(self):
codec_map = { codec_map = {
1252 : 'windows-1252', 1252 : 'windows-1252',
@ -319,7 +323,7 @@ class MobiBook:
def getMobiFile(self, outpath): def getMobiFile(self, outpath):
file(outpath,'wb').write(self.mobi_data) file(outpath,'wb').write(self.mobi_data)
def getPrintReplica(self): def getPrintReplica(self):
return self.print_replica return self.print_replica
@ -355,9 +359,9 @@ class MobiBook:
if self.magic == 'TEXtREAd': if self.magic == 'TEXtREAd':
bookkey_data = self.sect[0x0E:0x0E+16] bookkey_data = self.sect[0x0E:0x0E+16]
elif self.mobi_version < 0: elif self.mobi_version < 0:
bookkey_data = self.sect[0x90:0x90+16] bookkey_data = self.sect[0x90:0x90+16]
else: else:
bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32] bookkey_data = self.sect[self.mobi_length+16:self.mobi_length+32]
pid = "00000000" pid = "00000000"
found_key = PC1(t1_keyvec, bookkey_data) found_key = PC1(t1_keyvec, bookkey_data)
else : else :
@ -372,7 +376,7 @@ class MobiBook:
self.patchSection(0, "\0" * drm_size, drm_ptr) self.patchSection(0, "\0" * drm_size, drm_ptr)
# kill the drm pointers # kill the drm pointers
self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8) self.patchSection(0, "\xff" * 4 + "\0" * 12, 0xA8)
if pid=="00000000": if pid=="00000000":
print "File has default encryption, no specific PID." print "File has default encryption, no specific PID."
else: else:
@ -383,7 +387,8 @@ class MobiBook:
# decrypt sections # decrypt sections
print "Decrypting. Please wait . . .", print "Decrypting. Please wait . . .",
self.mobi_data = self.data_file[:self.sections[1][0]] mobidataList = []
mobidataList.append(self.data_file[:self.sections[1][0]])
for i in xrange(1, self.records+1): for i in xrange(1, self.records+1):
data = self.loadSection(i) data = self.loadSection(i)
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags) extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
@ -393,11 +398,12 @@ class MobiBook:
decoded_data = PC1(found_key, data[0:len(data) - extra_size]) decoded_data = PC1(found_key, data[0:len(data) - extra_size])
if i==1: if i==1:
self.print_replica = (decoded_data[0:4] == '%MOP') self.print_replica = (decoded_data[0:4] == '%MOP')
self.mobi_data += decoded_data mobidataList.append(decoded_data)
if extra_size > 0: if extra_size > 0:
self.mobi_data += data[-extra_size:] mobidataList.append(data[-extra_size:])
if self.num_sections > self.records+1: if self.num_sections > self.records+1:
self.mobi_data += self.data_file[self.sections[self.records+1][0]:] mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
self.mobi_data = "".join(mobidataList)
print "done" print "done"
return return

View file

@ -18,7 +18,7 @@ def load_libcrypto():
return None return None
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
# typedef struct DES_ks # typedef struct DES_ks
# { # {
# union # union
@ -30,7 +30,7 @@ def load_libcrypto():
# } ks[16]; # } ks[16];
# } DES_key_schedule; # } DES_key_schedule;
# just create a big enough place to hold everything # just create a big enough place to hold everything
# it will have alignment of structure so we should be okay (16 byte aligned?) # it will have alignment of structure so we should be okay (16 byte aligned?)
class DES_KEY_SCHEDULE(Structure): class DES_KEY_SCHEDULE(Structure):
_fields_ = [('DES_cblock1', c_char * 16), _fields_ = [('DES_cblock1', c_char * 16),
@ -61,7 +61,7 @@ def load_libcrypto():
DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p]) DES_set_key = F(None, 'DES_set_key',[c_char_p, DES_KEY_SCHEDULE_p])
DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int]) DES_ecb_encrypt = F(None, 'DES_ecb_encrypt',[c_char_p, c_char_p, DES_KEY_SCHEDULE_p, c_int])
class DES(object): class DES(object):
def __init__(self, key): def __init__(self, key):
if len(key) != 8 : if len(key) != 8 :
@ -87,4 +87,3 @@ def load_libcrypto():
return ''.join(result) return ''.join(result)
return DES return DES

View file

@ -0,0 +1,68 @@
# A simple implementation of pbkdf2 using stock python modules. See RFC2898
# for details. Basically, it derives a key from a password and salt.
# Copyright 2004 Matt Johnston <matt @ ucc asn au>
# Copyright 2009 Daniel Holth <dholth@fastmail.fm>
# This code may be freely used and modified for any purpose.
# Revision history
# v0.1 October 2004 - Initial release
# v0.2 8 March 2007 - Make usable with hashlib in Python 2.5 and use
# v0.3 "" the correct digest_size rather than always 20
# v0.4 Oct 2009 - Rescue from chandler svn, test and optimize.
import sys
import hmac
from struct import pack
try:
# only in python 2.5
import hashlib
sha = hashlib.sha1
md5 = hashlib.md5
sha256 = hashlib.sha256
except ImportError: # pragma: NO COVERAGE
# fallback
import sha
import md5
# this is what you want to call.
def pbkdf2( password, salt, itercount, keylen, hashfn = sha ):
try:
# depending whether the hashfn is from hashlib or sha/md5
digest_size = hashfn().digest_size
except TypeError: # pragma: NO COVERAGE
digest_size = hashfn.digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( password, None, hashfn )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, itercount, i )
return T[0: keylen]
def xorstr( a, b ):
if len(a) != len(b):
raise ValueError("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
# Helper as per the spec. h is a hmac which has been created seeded with the
# password, it will be copy()ed and not modified.
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T

View file

@ -28,4 +28,3 @@ def load_pycrypto():
i += 8 i += 8
return ''.join(result) return ''.join(result)
return DES return DES

View file

@ -2,8 +2,8 @@
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab # vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
import sys import sys
ECB = 0 ECB = 0
CBC = 1 CBC = 1
class Des(object): class Des(object):
__pc1 = [56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17, __pc1 = [56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35,
@ -11,13 +11,13 @@ class Des(object):
13, 5, 60, 52, 44, 36, 28, 20, 12, 4, 27, 19, 11, 3] 13, 5, 60, 52, 44, 36, 28, 20, 12, 4, 27, 19, 11, 3]
__left_rotations = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1] __left_rotations = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]
__pc2 = [13, 16, 10, 23, 0, 4,2, 27, 14, 5, 20, 9, __pc2 = [13, 16, 10, 23, 0, 4,2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1, 22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47, 40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31] 43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31]
__ip = [57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, __ip = [57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0, 58, 50, 42, 34, 26, 18, 10, 2, 56, 48, 40, 32, 24, 16, 8, 0, 58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6] 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6]
__expansion_table = [31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, __expansion_table = [31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,11, 12, 13, 14, 15, 16, 7, 8, 9, 10, 11, 12,11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,19, 20, 21, 22, 23, 24, 15, 16, 17, 18, 19, 20,19, 20, 21, 22, 23, 24,
@ -61,8 +61,8 @@ class Des(object):
35, 3, 43, 11, 51, 19, 59, 27,34, 2, 42, 10, 50, 18, 58, 26, 35, 3, 43, 11, 51, 19, 59, 27,34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,32, 0, 40, 8, 48, 16, 56, 24] 33, 1, 41, 9, 49, 17, 57, 25,32, 0, 40, 8, 48, 16, 56, 24]
# Type of crypting being done # Type of crypting being done
ENCRYPT = 0x00 ENCRYPT = 0x00
DECRYPT = 0x01 DECRYPT = 0x01
def __init__(self, key, mode=ECB, IV=None): def __init__(self, key, mode=ECB, IV=None):
if len(key) != 8: if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.") raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
@ -74,7 +74,7 @@ class Des(object):
self.setIV(IV) self.setIV(IV)
self.L = [] self.L = []
self.R = [] self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16) self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = [] self.final = []
self.setKey(key) self.setKey(key)
def getKey(self): def getKey(self):

View file

@ -43,7 +43,7 @@ class SimplePrefs(object):
key = self.file2key[filename] key = self.file2key[filename]
filepath = os.path.join(self.prefdir,filename) filepath = os.path.join(self.prefdir,filename)
if os.path.isfile(filepath): if os.path.isfile(filepath):
try : try :
data = file(filepath,'rb').read() data = file(filepath,'rb').read()
self.prefs[key] = data self.prefs[key] = data
except Exception, e: except Exception, e:
@ -75,4 +75,3 @@ class SimplePrefs(object):
pass pass
self.prefs = newprefs self.prefs = newprefs
return return

View file

@ -6,6 +6,7 @@ import csv
import sys import sys
import os import os
import getopt import getopt
import re
from struct import pack from struct import pack
from struct import unpack from struct import unpack
@ -43,8 +44,8 @@ class DocParser(object):
'pos-right' : 'text-align: right;', 'pos-right' : 'text-align: right;',
'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;', 'pos-center' : 'text-align: center; margin-left: auto; margin-right: auto;',
} }
# find tag if within pos to end inclusive # find tag if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -59,10 +60,10 @@ class DocParser(object):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -82,12 +83,19 @@ class DocParser(object):
return startpos return startpos
# returns a vector of integers for the tagpath # returns a vector of integers for the tagpath
def getData(self, tagpath, pos, end): def getData(self, tagpath, pos, end, clean=False):
if clean:
digits_only = re.compile(r'''([0-9]+)''')
argres=[] argres=[]
(foundat, argt) = self.findinDoc(tagpath, pos, end) (foundat, argt) = self.findinDoc(tagpath, pos, end)
if (argt != None) and (len(argt) > 0) : if (argt != None) and (len(argt) > 0) :
argList = argt.split('|') argList = argt.split('|')
argres = [ int(strval) for strval in argList] for strval in argList:
if clean:
m = re.search(digits_only, strval)
if m != None:
strval = m.group()
argres.append(int(strval))
return argres return argres
def process(self): def process(self):
@ -112,7 +120,7 @@ class DocParser(object):
(pos, tag) = self.findinDoc('style._tag',start,end) (pos, tag) = self.findinDoc('style._tag',start,end)
if tag == None : if tag == None :
(pos, tag) = self.findinDoc('style.type',start,end) (pos, tag) = self.findinDoc('style.type',start,end)
# Is this something we know how to convert to css # Is this something we know how to convert to css
if tag in self.stags : if tag in self.stags :
@ -121,7 +129,7 @@ class DocParser(object):
if sclass != None: if sclass != None:
sclass = sclass.replace(' ','-') sclass = sclass.replace(' ','-')
sclass = '.cl-' + sclass.lower() sclass = '.cl-' + sclass.lower()
else : else :
sclass = '' sclass = ''
# check for any "after class" specifiers # check for any "after class" specifiers
@ -129,7 +137,7 @@ class DocParser(object):
if aftclass != None: if aftclass != None:
aftclass = aftclass.replace(' ','-') aftclass = aftclass.replace(' ','-')
aftclass = '.cl-' + aftclass.lower() aftclass = '.cl-' + aftclass.lower()
else : else :
aftclass = '' aftclass = ''
cssargs = {} cssargs = {}
@ -140,7 +148,7 @@ class DocParser(object):
(pos2, val) = self.findinDoc('style.rule.value', start, end) (pos2, val) = self.findinDoc('style.rule.value', start, end)
if attr == None : break if attr == None : break
if (attr == 'display') or (attr == 'pos') or (attr == 'align'): if (attr == 'display') or (attr == 'pos') or (attr == 'align'):
# handle text based attributess # handle text based attributess
attr = attr + '-' + val attr = attr + '-' + val
@ -168,7 +176,7 @@ class DocParser(object):
if aftclass != "" : keep = False if aftclass != "" : keep = False
if keep : if keep :
# make sure line-space does not go below 100% or above 300% since # make sure line-space does not go below 100% or above 300% since
# it can be wacky in some styles # it can be wacky in some styles
if 'line-space' in cssargs: if 'line-space' in cssargs:
seg = cssargs['line-space'][0] seg = cssargs['line-space'][0]
@ -178,7 +186,7 @@ class DocParser(object):
del cssargs['line-space'] del cssargs['line-space']
cssargs['line-space'] = (self.attr_val_map['line-space'], val) cssargs['line-space'] = (self.attr_val_map['line-space'], val)
# handle modifications for css style hanging indents # handle modifications for css style hanging indents
if 'hang' in cssargs: if 'hang' in cssargs:
hseg = cssargs['hang'][0] hseg = cssargs['hang'][0]
@ -211,7 +219,7 @@ class DocParser(object):
if sclass != '' : if sclass != '' :
classlst += sclass + '\n' classlst += sclass + '\n'
# handle special case of paragraph class used inside chapter heading # handle special case of paragraph class used inside chapter heading
# and non-chapter headings # and non-chapter headings
if sclass != '' : if sclass != '' :
@ -232,7 +240,7 @@ class DocParser(object):
if cssline != ' { }': if cssline != ' { }':
csspage += self.stags[tag] + cssline + '\n' csspage += self.stags[tag] + cssline + '\n'
return csspage, classlst return csspage, classlst
@ -251,5 +259,5 @@ def convert2CSS(flatxml, fontsize, ph, pw):
def getpageIDMap(flatxml): def getpageIDMap(flatxml):
dp = DocParser(flatxml, 0, 0, 0) dp = DocParser(flatxml, 0, 0, 0)
pageidnumbers = dp.getData('info.original.pid', 0, -1) pageidnumbers = dp.getData('info.original.pid', 0, -1, True)
return pageidnumbers return pageidnumbers

View file

@ -52,7 +52,7 @@ class Process(object):
self.__stdout_thread = threading.Thread( self.__stdout_thread = threading.Thread(
name="stdout-thread", name="stdout-thread",
target=self.__reader, args=(self.__collected_outdata, target=self.__reader, args=(self.__collected_outdata,
self.__process.stdout)) self.__process.stdout))
self.__stdout_thread.setDaemon(True) self.__stdout_thread.setDaemon(True)
self.__stdout_thread.start() self.__stdout_thread.start()
@ -60,7 +60,7 @@ class Process(object):
self.__stderr_thread = threading.Thread( self.__stderr_thread = threading.Thread(
name="stderr-thread", name="stderr-thread",
target=self.__reader, args=(self.__collected_errdata, target=self.__reader, args=(self.__collected_errdata,
self.__process.stderr)) self.__process.stderr))
self.__stderr_thread.setDaemon(True) self.__stderr_thread.setDaemon(True)
self.__stderr_thread.start() self.__stderr_thread.start()
@ -146,4 +146,3 @@ class Process(object):
self.__quit = True self.__quit = True
self.__inputsem.release() self.__inputsem.release()
self.__lock.release() self.__lock.release()

View file

@ -16,15 +16,18 @@ if 'calibre' in sys.modules:
else: else:
inCalibre = False inCalibre = False
buildXML = False
import os, csv, getopt import os, csv, getopt
import zlib, zipfile, tempfile, shutil import zlib, zipfile, tempfile, shutil
from struct import pack from struct import pack
from struct import unpack from struct import unpack
from alfcrypto import Topaz_Cipher
class TpzDRMError(Exception): class TpzDRMError(Exception):
pass pass
# local support routines # local support routines
if inCalibre: if inCalibre:
from calibre_plugins.k4mobidedrm import kgenpids from calibre_plugins.k4mobidedrm import kgenpids
@ -58,22 +61,22 @@ def bookReadEncodedNumber(fo):
flag = False flag = False
data = ord(fo.read(1)) data = ord(fo.read(1))
if data == 0xFF: if data == 0xFF:
flag = True flag = True
data = ord(fo.read(1)) data = ord(fo.read(1))
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
data = ord(fo.read(1)) data = ord(fo.read(1))
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# Get a length prefixed string from file # Get a length prefixed string from file
def bookReadString(fo): def bookReadString(fo):
stringLength = bookReadEncodedNumber(fo) stringLength = bookReadEncodedNumber(fo)
return unpack(str(stringLength)+"s",fo.read(stringLength))[0] return unpack(str(stringLength)+"s",fo.read(stringLength))[0]
# #
# crypto routines # crypto routines
@ -81,25 +84,28 @@ def bookReadString(fo):
# Context initialisation for the Topaz Crypto # Context initialisation for the Topaz Crypto
def topazCryptoInit(key): def topazCryptoInit(key):
ctx1 = 0x0CAFFE19E return Topaz_Cipher().ctx_init(key)
for keyChar in key:
keyByte = ord(keyChar) # ctx1 = 0x0CAFFE19E
ctx2 = ctx1 # for keyChar in key:
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF ) # keyByte = ord(keyChar)
return [ctx1,ctx2] # ctx2 = ctx1
# ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
# return [ctx1,ctx2]
# decrypt data with the context prepared by topazCryptoInit() # decrypt data with the context prepared by topazCryptoInit()
def topazCryptoDecrypt(data, ctx): def topazCryptoDecrypt(data, ctx):
ctx1 = ctx[0] return Topaz_Cipher().decrypt(data, ctx)
ctx2 = ctx[1] # ctx1 = ctx[0]
plainText = "" # ctx2 = ctx[1]
for dataChar in data: # plainText = ""
dataByte = ord(dataChar) # for dataChar in data:
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF # dataByte = ord(dataChar)
ctx2 = ctx1 # m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF) # ctx2 = ctx1
plainText += chr(m) # ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
return plainText # plainText += chr(m)
# return plainText
# Decrypt data with the PID # Decrypt data with the PID
def decryptRecord(data,PID): def decryptRecord(data,PID):
@ -153,7 +159,7 @@ class TopazBook:
def parseTopazHeaders(self): def parseTopazHeaders(self):
def bookReadHeaderRecordData(): def bookReadHeaderRecordData():
# Read and return the data of one header record at the current book file position # Read and return the data of one header record at the current book file position
# [[offset,decompressedLength,compressedLength],...] # [[offset,decompressedLength,compressedLength],...]
nbValues = bookReadEncodedNumber(self.fo) nbValues = bookReadEncodedNumber(self.fo)
values = [] values = []
@ -213,11 +219,11 @@ class TopazBook:
self.bookKey = key self.bookKey = key
def getBookPayloadRecord(self, name, index): def getBookPayloadRecord(self, name, index):
# Get a record in the book payload, given its name and index. # Get a record in the book payload, given its name and index.
# decrypted and decompressed if necessary # decrypted and decompressed if necessary
encrypted = False encrypted = False
compressed = False compressed = False
try: try:
recordOffset = self.bookHeaderRecords[name][index][0] recordOffset = self.bookHeaderRecords[name][index][0]
except: except:
raise TpzDRMError("Parse Error : Invalid Record, record not found") raise TpzDRMError("Parse Error : Invalid Record, record not found")
@ -268,8 +274,8 @@ class TopazBook:
rv = genbook.generateBook(self.outdir, raw, fixedimage) rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0: if rv == 0:
print "\nBook Successfully generated" print "\nBook Successfully generated"
return rv return rv
# try each pid to decode the file # try each pid to decode the file
bookKey = None bookKey = None
for pid in pidlst: for pid in pidlst:
@ -297,7 +303,7 @@ class TopazBook:
rv = genbook.generateBook(self.outdir, raw, fixedimage) rv = genbook.generateBook(self.outdir, raw, fixedimage)
if rv == 0: if rv == 0:
print "\nBook Successfully generated" print "\nBook Successfully generated"
return rv return rv
def createBookDirectory(self): def createBookDirectory(self):
outdir = self.outdir outdir = self.outdir
@ -361,7 +367,7 @@ class TopazBook:
zipUpDir(svgzip, self.outdir, 'svg') zipUpDir(svgzip, self.outdir, 'svg')
zipUpDir(svgzip, self.outdir, 'img') zipUpDir(svgzip, self.outdir, 'img')
svgzip.close() svgzip.close()
def getXMLZip(self, zipname): def getXMLZip(self, zipname):
xmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False) xmlzip = zipfile.ZipFile(zipname,'w',zipfile.ZIP_DEFLATED, False)
targetdir = os.path.join(self.outdir,'xml') targetdir = os.path.join(self.outdir,'xml')
@ -371,23 +377,23 @@ class TopazBook:
def cleanup(self): def cleanup(self):
if os.path.isdir(self.outdir): if os.path.isdir(self.outdir):
pass shutil.rmtree(self.outdir, True)
# shutil.rmtree(self.outdir, True)
def usage(progname): def usage(progname):
print "Removes DRM protection from Topaz ebooks and extract the contents" print "Removes DRM protection from Topaz ebooks and extract the contents"
print "Usage:" print "Usage:"
print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname print " %s [-k <kindle.info>] [-p <pidnums>] [-s <kindleSerialNumbers>] <infile> <outdir> " % progname
# Main # Main
def main(argv=sys.argv): def main(argv=sys.argv):
global buildXML
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
k4 = False k4 = False
pids = [] pids = []
serials = [] serials = []
kInfoFiles = [] kInfoFiles = []
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
except getopt.GetoptError, err: except getopt.GetoptError, err:
@ -397,7 +403,7 @@ def main(argv=sys.argv):
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
return 1 return 1
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
@ -429,7 +435,7 @@ def main(argv=sys.argv):
title = tb.getBookTitle() title = tb.getBookTitle()
print "Processing Book: ", title print "Processing Book: ", title
keysRecord, keysRecordRecord = tb.getPIDMetaInfo() keysRecord, keysRecordRecord = tb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(keysRecord, keysRecordRecord, k4, pids, serials, kInfoFiles)
try: try:
print "Decrypting Book" print "Decrypting Book"
@ -443,9 +449,10 @@ def main(argv=sys.argv):
zipname = os.path.join(outdir, bookname + '_SVG' + '.zip') zipname = os.path.join(outdir, bookname + '_SVG' + '.zip')
tb.getSVGZip(zipname) tb.getSVGZip(zipname)
print " Creating XML ZIP Archive" if buildXML:
zipname = os.path.join(outdir, bookname + '_XML' + '.zip') print " Creating XML ZIP Archive"
tb.getXMLZip(zipname) zipname = os.path.join(outdir, bookname + '_XML' + '.zip')
tb.getXMLZip(zipname)
# removing internal temporary directory of pieces # removing internal temporary directory of pieces
tb.cleanup() tb.cleanup()
@ -461,9 +468,8 @@ def main(argv=sys.argv):
return 1 return 1
return 0 return 0
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -30,8 +30,8 @@ class fixZip:
self.inzip = zipfile.ZipFile(zinput,'r') self.inzip = zipfile.ZipFile(zinput,'r')
self.outzip = zipfile.ZipFile(zoutput,'w') self.outzip = zipfile.ZipFile(zoutput,'w')
# open the input zip for reading only as a raw file # open the input zip for reading only as a raw file
self.bzf = file(zinput,'rb') self.bzf = file(zinput,'rb')
def getlocalname(self, zi): def getlocalname(self, zi):
local_header_offset = zi.header_offset local_header_offset = zi.header_offset
self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET) self.bzf.seek(local_header_offset + _FILENAME_LEN_OFFSET)
@ -86,7 +86,7 @@ class fixZip:
return data return data
def fix(self): def fix(self):
# get the zipinfo for each member of the input archive # get the zipinfo for each member of the input archive
@ -103,7 +103,7 @@ class fixZip:
if zinfo.filename != "mimetype" or self.ztype == '.zip': if zinfo.filename != "mimetype" or self.ztype == '.zip':
data = None data = None
nzinfo = zinfo nzinfo = zinfo
try: try:
data = self.inzip.read(zinfo.filename) data = self.inzip.read(zinfo.filename)
except zipfile.BadZipfile or zipfile.error: except zipfile.BadZipfile or zipfile.error:
local_name = self.getlocalname(zinfo) local_name = self.getlocalname(zinfo)
@ -126,7 +126,7 @@ def usage():
inputzip is the source zipfile to fix inputzip is the source zipfile to fix
outputzip is the fixed zip archive outputzip is the fixed zip archive
""" """
def repairBook(infile, outfile): def repairBook(infile, outfile):
if not os.path.exists(infile): if not os.path.exists(infile):
@ -152,5 +152,3 @@ def main(argv=sys.argv):
if __name__ == '__main__' : if __name__ == '__main__' :
sys.exit(main()) sys.exit(main())

View file

@ -1,7 +1,7 @@
ReadMe_DeDRM_WinApp_vX.X ReadMe_DeDRM_vX.X_WinApp
----------------------- -----------------------
DeDRM_WinApp is a pure python drag and drop application that allows users to drag and drop ebooks or folders of ebooks onto theDeDRM_Drop_Target to have the DRM removed. It repackages the"tools" python software in one easy to use program. DeDRM_vX.X_WinApp is a pure python drag and drop application that allows users to drag and drop ebooks or folders of ebooks onto the DeDRM_Drop_Target to have the DRM removed. It repackages the"tools" python software in one easy to use program that remembers preferences and settings.
It should work out of the box with Kindle for PC ebooks and Adobe Adept epub and pdf ebooks. It should work out of the box with Kindle for PC ebooks and Adobe Adept epub and pdf ebooks.
@ -21,9 +21,9 @@ This program requires that the proper 32 bit version of Python 2.X (tested with
Installation Installation
------------ ------------
1. Download the latest DeDRM_WinApp_vx.x.zip and fully Extract its contents. 1. From tools_vX.X\DeDRM_Applications\, right click on DeDRM_v_X.X_WinApp.zip and fully Extract its contents.
2. Move the resulting DeDRM_WinApp_vX.X folder to whereever you keep you other programs. 2. Move the resulting DeDRM_vX.X_WinApp folder to whereever you keep you other programs.
(I typically use an "Applications" folder inside of my home directory) (I typically use an "Applications" folder inside of my home directory)
3. Open the folder, and create a short-cut to DeDRM_Drop_Target and move that short-cut to your Desktop. 3. Open the folder, and create a short-cut to DeDRM_Drop_Target and move that short-cut to your Desktop.
@ -33,19 +33,18 @@ Installation
If you already have a correct version of Python and PyCrypto installed and in your path, you are ready to go! If you already have a correct version of Python and PyCrypto installed and in your path, you are ready to go!
If not, see below.
If not, see where you can get these additional pieces.
Installing Python on Windows Installing Python on Windows
---------------------------- ----------------------------
I strongly recommend installing ActiveStates Active Python, Community Edition for Windows (x86) 32 bits. This is a free, full version of the Python. It comes with some important additional modules that are not included in the bare-bones version from www.python.org unless you choose to install everything. I strongly recommend fully installing ActiveStates Active Python, free Community Edition for Windows (x86) 32 bits. This is a free, full version of the Python. It comes with some important additional modules that are not included in the bare-bones version from www.python.org unless you choose to install everything.
1. Download ActivePython 2.7.1 for Windows (x86) (or later 2.7 version for Windows (x86) ) from http://www.activestate.com/activepython/downloads. Do not download the ActivePython 2.7.1 for Windows (64-bit, x64) verson, even if you are running 64-bit Windows. 1. Download ActivePython 2.7.X for Windows (x86) (or later 2.7 version for Windows (x86) ) from http://www.activestate.com/activepython/downloads. Do not download the ActivePython 2.7.X for Windows (64-bit, x64) verson, even if you are running 64-bit Windows.
2. When it has finished downloading, run the installer. Accept the default options. 2. When it has finished downloading, run the installer. Accept the default options.
Installing PyCrypto on Windows Installing PyCrypto on Windows
------------------------------ ------------------------------
PyCrypto is a set of encryption/decryption routines that work with Python. The sources are freely available, and compiled versions are available from several sources. You must install a version that is for 32-bit Windows and Python 2.7. I recommend the installer linked from Michael Foords blog. PyCrypto is a set of encryption/decryption routines that work with Python. The sources are freely available, and compiled versions are available from several sources. You must install a version that is for 32-bit Windows and Python 2.7. I recommend the installer linked from Michael Foords blog.

View file

@ -16,15 +16,15 @@
# ** NOTE: This program does NOT decrypt or modify Topaz files in any way. It simply identifies them. # ** NOTE: This program does NOT decrypt or modify Topaz files in any way. It simply identifies them.
# PLEASE DO NOT PIRATE EBOOKS! # PLEASE DO NOT PIRATE EBOOKS!
# We want all authors and publishers, and eBook stores to live # We want all authors and publishers, and eBook stores to live
# long and prosperous lives but at the same time we just want to # long and prosperous lives but at the same time we just want to
# be able to read OUR books on whatever device we want and to keep # be able to read OUR books on whatever device we want and to keep
# readable for a long, long time # readable for a long, long time
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle, # This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates # unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
# and many many others # and many many others
# Revision history: # Revision history:
@ -71,17 +71,17 @@ def cli_main(argv=sys.argv, obj=None):
if len(argv) != 2: if len(argv) != 2:
print "usage: %s DIRECTORY" % (progname,) print "usage: %s DIRECTORY" % (progname,)
return 1 return 1
if obj == None: if obj == None:
print "\nTopaz search results:\n" print "\nTopaz search results:\n"
else: else:
obj.stext.insert(Tkconstants.END,"Topaz search results:\n\n") obj.stext.insert(Tkconstants.END,"Topaz search results:\n\n")
inpath = argv[1] inpath = argv[1]
files = os.listdir(inpath) files = os.listdir(inpath)
filefilter = re.compile("(\.azw$)|(\.azw1$)|(\.prc$)|(\.tpz$)", re.IGNORECASE) filefilter = re.compile("(\.azw$)|(\.azw1$)|(\.prc$)|(\.tpz$)", re.IGNORECASE)
files = filter(filefilter.search, files) files = filter(filefilter.search, files)
if files: if files:
topazcount = 0 topazcount = 0
totalcount = 0 totalcount = 0
@ -136,14 +136,14 @@ def cli_main(argv=sys.argv, obj=None):
else: else:
msg = "No typical Topaz file extensions found in %s.\n\n" % inpath msg = "No typical Topaz file extensions found in %s.\n\n" % inpath
obj.stext.insert(Tkconstants.END,msg) obj.stext.insert(Tkconstants.END,msg)
return 0 return 0
class DecryptionDialog(Tkinter.Frame): class DecryptionDialog(Tkinter.Frame):
def __init__(self, root): def __init__(self, root):
Tkinter.Frame.__init__(self, root, border=5) Tkinter.Frame.__init__(self, root, border=5)
ltext='Search a directory for Topaz eBooks\n' ltext='Search a directory for Topaz eBooks\n'
self.status = Tkinter.Label(self, text=ltext) self.status = Tkinter.Label(self, text=ltext)
self.status.pack(fill=Tkconstants.X, expand=1) self.status.pack(fill=Tkconstants.X, expand=1)
body = Tkinter.Frame(self) body = Tkinter.Frame(self)
@ -162,7 +162,7 @@ class DecryptionDialog(Tkinter.Frame):
#self.stext.insert(Tkconstants.END,msg1) #self.stext.insert(Tkconstants.END,msg1)
buttons = Tkinter.Frame(self) buttons = Tkinter.Frame(self)
buttons.pack() buttons.pack()
self.botton = Tkinter.Button( self.botton = Tkinter.Button(
buttons, text="Search", width=10, command=self.search) buttons, text="Search", width=10, command=self.search)
@ -171,7 +171,7 @@ class DecryptionDialog(Tkinter.Frame):
self.button = Tkinter.Button( self.button = Tkinter.Button(
buttons, text="Quit", width=10, command=self.quit) buttons, text="Quit", width=10, command=self.quit)
self.button.pack(side=Tkconstants.RIGHT) self.button.pack(side=Tkconstants.RIGHT)
def get_inpath(self): def get_inpath(self):
cwd = os.getcwdu() cwd = os.getcwdu()
cwd = cwd.encode('utf-8') cwd = cwd.encode('utf-8')
@ -183,8 +183,8 @@ class DecryptionDialog(Tkinter.Frame):
self.inpath.delete(0, Tkconstants.END) self.inpath.delete(0, Tkconstants.END)
self.inpath.insert(0, inpath) self.inpath.insert(0, inpath)
return return
def search(self): def search(self):
inpath = self.inpath.get() inpath = self.inpath.get()
if not inpath or not os.path.exists(inpath): if not inpath or not os.path.exists(inpath):
@ -213,4 +213,4 @@ def gui_main():
if __name__ == '__main__': if __name__ == '__main__':
if len(sys.argv) > 1: if len(sys.argv) > 1:
sys.exit(cli_main()) sys.exit(cli_main())
sys.exit(gui_main()) sys.exit(gui_main())

View file

@ -54,8 +54,9 @@
# 0.30 - Modified interface slightly to work better with new calibre plugin style # 0.30 - Modified interface slightly to work better with new calibre plugin style
# 0.31 - The multibyte encrytion info is true for version 7 files too. # 0.31 - The multibyte encrytion info is true for version 7 files too.
# 0.32 - Added support for "Print Replica" Kindle ebooks # 0.32 - Added support for "Print Replica" Kindle ebooks
# 0.33 - Performance improvements for large files (concatenation)
__version__ = '0.32' __version__ = '0.33'
import sys import sys
@ -383,7 +384,8 @@ class MobiBook:
# decrypt sections # decrypt sections
print "Decrypting. Please wait . . .", print "Decrypting. Please wait . . .",
self.mobi_data = self.data_file[:self.sections[1][0]] mobidataList = []
mobidataList.append(self.data_file[:self.sections[1][0]])
for i in xrange(1, self.records+1): for i in xrange(1, self.records+1):
data = self.loadSection(i) data = self.loadSection(i)
extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags) extra_size = getSizeOfTrailingDataEntries(data, len(data), self.extra_data_flags)
@ -393,11 +395,12 @@ class MobiBook:
decoded_data = PC1(found_key, data[0:len(data) - extra_size]) decoded_data = PC1(found_key, data[0:len(data) - extra_size])
if i==1: if i==1:
self.print_replica = (decoded_data[0:4] == '%MOP') self.print_replica = (decoded_data[0:4] == '%MOP')
self.mobi_data += decoded_data mobidataList.append(decoded_data)
if extra_size > 0: if extra_size > 0:
self.mobi_data += data[-extra_size:] mobidataList.append(data[-extra_size:])
if self.num_sections > self.records+1: if self.num_sections > self.records+1:
self.mobi_data += self.data_file[self.sections[self.records+1][0]:] mobidataList.append(self.data_file[self.sections[self.records+1][0]:])
self.mobi_data = "".join(mobidataList)
print "done" print "done"
return return

View file

@ -76,13 +76,13 @@ if sys.platform.startswith('win'):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key', AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
@ -427,8 +427,8 @@ def extractKeyfile(keypath):
print "Key generation Error: " + str(e) print "Key generation Error: " + str(e)
return 1 return 1
except Exception, e: except Exception, e:
print "General Error: " + str(e) print "General Error: " + str(e)
return 1 return 1
if not success: if not success:
return 1 return 1
return 0 return 0

View file

@ -4,7 +4,7 @@
from __future__ import with_statement from __future__ import with_statement
# To run this program install Python 2.6 from http://www.python.org/download/ # To run this program install Python 2.6 from http://www.python.org/download/
# and OpenSSL (already installed on Mac OS X and Linux) OR # and OpenSSL (already installed on Mac OS X and Linux) OR
# PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto # PyCrypto from http://www.voidspace.org.uk/python/modules.shtml#pycrypto
# (make sure to install the version for Python 2.6). Save this script file as # (make sure to install the version for Python 2.6). Save this script file as
# ineptpdf.pyw and double-click on it to run it. # ineptpdf.pyw and double-click on it to run it.
@ -83,7 +83,7 @@ def _load_crypto_libcrypto():
AES_MAXNR = 14 AES_MAXNR = 14
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -98,13 +98,13 @@ def _load_crypto_libcrypto():
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
@ -125,7 +125,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -134,7 +134,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[1:dlen] return to[1:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -196,13 +196,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -212,22 +212,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -237,19 +237,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -257,13 +257,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -272,7 +272,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -293,6 +293,7 @@ def _load_crypto_pycrypto():
return self._arc4.decrypt(data) return self._arc4.decrypt(data)
class AES(object): class AES(object):
MODE_CBC = _AES.MODE_CBC
@classmethod @classmethod
def new(cls, userkey, mode, iv): def new(cls, userkey, mode, iv):
self = AES() self = AES()
@ -315,7 +316,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)
@ -410,7 +411,7 @@ class PSLiteral(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
name = [] name = []
for char in self.name: for char in self.name:
@ -429,22 +430,22 @@ class PSKeyword(PSObject):
def __init__(self, name): def __init__(self, name):
self.name = name self.name = name
return return
def __repr__(self): def __repr__(self):
return self.name return self.name
# PSSymbolTable # PSSymbolTable
class PSSymbolTable(object): class PSSymbolTable(object):
''' '''
Symbol table that stores PSLiteral or PSKeyword. Symbol table that stores PSLiteral or PSKeyword.
''' '''
def __init__(self, classe): def __init__(self, classe):
self.dic = {} self.dic = {}
self.classe = classe self.classe = classe
return return
def intern(self, name): def intern(self, name):
if name in self.dic: if name in self.dic:
lit = self.dic[name] lit = self.dic[name]
@ -514,11 +515,11 @@ class PSBaseParser(object):
def flush(self): def flush(self):
return return
def close(self): def close(self):
self.flush() self.flush()
return return
def tell(self): def tell(self):
return self.bufpos+self.charpos return self.bufpos+self.charpos
@ -554,7 +555,7 @@ class PSBaseParser(object):
raise PSEOF('Unexpected EOF') raise PSEOF('Unexpected EOF')
self.charpos = 0 self.charpos = 0
return return
def parse_main(self, s, i): def parse_main(self, s, i):
m = NONSPC.search(s, i) m = NONSPC.search(s, i)
if not m: if not m:
@ -589,11 +590,11 @@ class PSBaseParser(object):
return (self.parse_wclose, j+1) return (self.parse_wclose, j+1)
self.add_token(KWD(c)) self.add_token(KWD(c))
return (self.parse_main, j+1) return (self.parse_main, j+1)
def add_token(self, obj): def add_token(self, obj):
self.tokens.append((self.tokenstart, obj)) self.tokens.append((self.tokenstart, obj))
return return
def parse_comment(self, s, i): def parse_comment(self, s, i):
m = EOL.search(s, i) m = EOL.search(s, i)
if not m: if not m:
@ -604,7 +605,7 @@ class PSBaseParser(object):
# We ignore comments. # We ignore comments.
#self.tokens.append(self.token) #self.tokens.append(self.token)
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal(self, s, i): def parse_literal(self, s, i):
m = END_LITERAL.search(s, i) m = END_LITERAL.search(s, i)
if not m: if not m:
@ -618,7 +619,7 @@ class PSBaseParser(object):
return (self.parse_literal_hex, j+1) return (self.parse_literal_hex, j+1)
self.add_token(LIT(self.token)) self.add_token(LIT(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_literal_hex(self, s, i): def parse_literal_hex(self, s, i):
c = s[i] c = s[i]
if HEX.match(c) and len(self.hex) < 2: if HEX.match(c) and len(self.hex) < 2:
@ -653,7 +654,7 @@ class PSBaseParser(object):
self.token += s[i:j] self.token += s[i:j]
self.add_token(float(self.token)) self.add_token(float(self.token))
return (self.parse_main, j) return (self.parse_main, j)
def parse_keyword(self, s, i): def parse_keyword(self, s, i):
m = END_KEYWORD.search(s, i) m = END_KEYWORD.search(s, i)
if not m: if not m:
@ -801,7 +802,7 @@ class PSStackParser(PSBaseParser):
PSBaseParser.__init__(self, fp) PSBaseParser.__init__(self, fp)
self.reset() self.reset()
return return
def reset(self): def reset(self):
self.context = [] self.context = []
self.curtype = None self.curtype = None
@ -842,10 +843,10 @@ class PSStackParser(PSBaseParser):
def do_keyword(self, pos, token): def do_keyword(self, pos, token):
return return
def nextobject(self, direct=False): def nextobject(self, direct=False):
''' '''
Yields a list of objects: keywords, literals, strings, Yields a list of objects: keywords, literals, strings,
numbers, arrays and dictionaries. Arrays and dictionaries numbers, arrays and dictionaries. Arrays and dictionaries
are represented as Python sequence and dictionaries. are represented as Python sequence and dictionaries.
''' '''
@ -914,7 +915,7 @@ class PDFNotImplementedError(PSException): pass
## PDFObjRef ## PDFObjRef
## ##
class PDFObjRef(PDFObject): class PDFObjRef(PDFObject):
def __init__(self, doc, objid, genno): def __init__(self, doc, objid, genno):
if objid == 0: if objid == 0:
if STRICT: if STRICT:
@ -1029,25 +1030,25 @@ def stream_value(x):
# ascii85decode(data) # ascii85decode(data)
def ascii85decode(data): def ascii85decode(data):
n = b = 0 n = b = 0
out = '' out = ''
for c in data: for c in data:
if '!' <= c and c <= 'u': if '!' <= c and c <= 'u':
n += 1 n += 1
b = b*85+(ord(c)-33) b = b*85+(ord(c)-33)
if n == 5: if n == 5:
out += struct.pack('>L',b) out += struct.pack('>L',b)
n = b = 0 n = b = 0
elif c == 'z': elif c == 'z':
assert n == 0 assert n == 0
out += '\0\0\0\0' out += '\0\0\0\0'
elif c == '~': elif c == '~':
if n: if n:
for _ in range(5-n): for _ in range(5-n):
b = b*85+84 b = b*85+84
out += struct.pack('>L',b)[:n-1] out += struct.pack('>L',b)[:n-1]
break break
return out return out
## PDFStream type ## PDFStream type
@ -1064,7 +1065,7 @@ class PDFStream(PDFObject):
else: else:
if eol in ('\r', '\n', '\r\n'): if eol in ('\r', '\n', '\r\n'):
rawdata = rawdata[:length] rawdata = rawdata[:length]
self.dic = dic self.dic = dic
self.rawdata = rawdata self.rawdata = rawdata
self.decipher = decipher self.decipher = decipher
@ -1078,7 +1079,7 @@ class PDFStream(PDFObject):
self.objid = objid self.objid = objid
self.genno = genno self.genno = genno
return return
def __repr__(self): def __repr__(self):
if self.rawdata: if self.rawdata:
return '<PDFStream(%r): raw=%d, %r>' % \ return '<PDFStream(%r): raw=%d, %r>' % \
@ -1162,7 +1163,7 @@ class PDFStream(PDFObject):
data = self.decipher(self.objid, self.genno, data) data = self.decipher(self.objid, self.genno, data)
return data return data
## PDF Exceptions ## PDF Exceptions
## ##
class PDFSyntaxError(PDFException): pass class PDFSyntaxError(PDFException): pass
@ -1227,7 +1228,7 @@ class PDFXRef(object):
self.offsets[objid] = (int(genno), int(pos)) self.offsets[objid] = (int(genno), int(pos))
self.load_trailer(parser) self.load_trailer(parser)
return return
KEYWORD_TRAILER = PSKeywordTable.intern('trailer') KEYWORD_TRAILER = PSKeywordTable.intern('trailer')
def load_trailer(self, parser): def load_trailer(self, parser):
try: try:
@ -1268,7 +1269,7 @@ class PDFXRefStream(object):
for first, size in self.index: for first, size in self.index:
for objid in xrange(first, first + size): for objid in xrange(first, first + size):
yield objid yield objid
def load(self, parser, debug=0): def load(self, parser, debug=0):
(_,objid) = parser.nexttoken() # ignored (_,objid) = parser.nexttoken() # ignored
(_,genno) = parser.nexttoken() # ignored (_,genno) = parser.nexttoken() # ignored
@ -1286,7 +1287,7 @@ class PDFXRefStream(object):
self.entlen = self.fl1+self.fl2+self.fl3 self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.dic self.trailer = stream.dic
return return
def getpos(self, objid): def getpos(self, objid):
offset = 0 offset = 0
for first, size in self.index: for first, size in self.index:
@ -1337,7 +1338,7 @@ class PDFDocument(object):
self.parser = parser self.parser = parser
# The document is set to be temporarily ready during collecting # The document is set to be temporarily ready during collecting
# all the basic information about the document, e.g. # all the basic information about the document, e.g.
# the header, the encryption information, and the access rights # the header, the encryption information, and the access rights
# for the document. # for the document.
self.ready = True self.ready = True
# Retrieve the information of each header that was appended # Retrieve the information of each header that was appended
@ -1413,7 +1414,7 @@ class PDFDocument(object):
length = int_value(param.get('Length', 0)) / 8 length = int_value(param.get('Length', 0)) / 8
edcdata = str_value(param.get('EDCData')).decode('base64') edcdata = str_value(param.get('EDCData')).decode('base64')
pdrllic = str_value(param.get('PDRLLic')).decode('base64') pdrllic = str_value(param.get('PDRLLic')).decode('base64')
pdrlpol = str_value(param.get('PDRLPol')).decode('base64') pdrlpol = str_value(param.get('PDRLPol')).decode('base64')
edclist = [] edclist = []
for pair in edcdata.split('\n'): for pair in edcdata.split('\n'):
edclist.append(pair) edclist.append(pair)
@ -1433,9 +1434,9 @@ class PDFDocument(object):
raise ADEPTError('Could not decrypt PDRLPol, aborting ...') raise ADEPTError('Could not decrypt PDRLPol, aborting ...')
else: else:
cutter = -1 * ord(pdrlpol[-1]) cutter = -1 * ord(pdrlpol[-1])
pdrlpol = pdrlpol[:cutter] pdrlpol = pdrlpol[:cutter]
return plaintext[:16] return plaintext[:16]
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \ PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..' \
'\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz' '\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
# experimental aes pw support # experimental aes pw support
@ -1455,14 +1456,14 @@ class PDFDocument(object):
EncMetadata = str_value(param['EncryptMetadata']) EncMetadata = str_value(param['EncryptMetadata'])
except: except:
EncMetadata = 'True' EncMetadata = 'True'
self.is_printable = bool(P & 4) self.is_printable = bool(P & 4)
self.is_modifiable = bool(P & 8) self.is_modifiable = bool(P & 8)
self.is_extractable = bool(P & 16) self.is_extractable = bool(P & 16)
self.is_annotationable = bool(P & 32) self.is_annotationable = bool(P & 32)
self.is_formsenabled = bool(P & 256) self.is_formsenabled = bool(P & 256)
self.is_textextractable = bool(P & 512) self.is_textextractable = bool(P & 512)
self.is_assemblable = bool(P & 1024) self.is_assemblable = bool(P & 1024)
self.is_formprintable = bool(P & 2048) self.is_formprintable = bool(P & 2048)
# Algorithm 3.2 # Algorithm 3.2
password = (password+self.PASSWORD_PADDING)[:32] # 1 password = (password+self.PASSWORD_PADDING)[:32] # 1
hash = hashlib.md5(password) # 2 hash = hashlib.md5(password) # 2
@ -1537,10 +1538,10 @@ class PDFDocument(object):
if length > 0: if length > 0:
if len(bookkey) == length: if len(bookkey) == length:
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
elif len(bookkey) == length + 1: elif len(bookkey) == length + 1:
V = ord(bookkey[0]) V = ord(bookkey[0])
bookkey = bookkey[1:] bookkey = bookkey[1:]
else: else:
@ -1554,7 +1555,7 @@ class PDFDocument(object):
print "length is %d and len(bookkey) is %d" % (length, len(bookkey)) print "length is %d and len(bookkey) is %d" % (length, len(bookkey))
print "bookkey[0] is %d" % ord(bookkey[0]) print "bookkey[0] is %d" % ord(bookkey[0])
if ebx_V == 3: if ebx_V == 3:
V = 3 V = 3
else: else:
V = 2 V = 2
self.decrypt_key = bookkey self.decrypt_key = bookkey
@ -1571,7 +1572,7 @@ class PDFDocument(object):
hash = hashlib.md5(key) hash = hashlib.md5(key)
key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)] key = hash.digest()[:min(len(self.decrypt_key) + 5, 16)]
return key return key
def genkey_v3(self, objid, genno): def genkey_v3(self, objid, genno):
objid = struct.pack('<L', objid ^ 0x3569ac) objid = struct.pack('<L', objid ^ 0x3569ac)
genno = struct.pack('<L', genno ^ 0xca96) genno = struct.pack('<L', genno ^ 0xca96)
@ -1611,14 +1612,14 @@ class PDFDocument(object):
#print cutter #print cutter
plaintext = plaintext[:cutter] plaintext = plaintext[:cutter]
return plaintext return plaintext
def decrypt_rc4(self, objid, genno, data): def decrypt_rc4(self, objid, genno, data):
key = self.genkey(objid, genno) key = self.genkey(objid, genno)
return ARC4.new(key).decrypt(data) return ARC4.new(key).decrypt(data)
KEYWORD_OBJ = PSKeywordTable.intern('obj') KEYWORD_OBJ = PSKeywordTable.intern('obj')
def getobj(self, objid): def getobj(self, objid):
if not self.ready: if not self.ready:
raise PDFException('PDFDocument not initialized') raise PDFException('PDFDocument not initialized')
@ -1688,7 +1689,7 @@ class PDFDocument(object):
## if x: ## if x:
## objid1 = x[-2] ## objid1 = x[-2]
## genno = x[-1] ## genno = x[-1]
## ##
if kwd is not self.KEYWORD_OBJ: if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError( raise PDFSyntaxError(
'Invalid object spec: offset=%r' % index) 'Invalid object spec: offset=%r' % index)
@ -1700,7 +1701,7 @@ class PDFDocument(object):
self.objs[objid] = obj self.objs[objid] = obj
return obj return obj
class PDFObjStmRef(object): class PDFObjStmRef(object):
maxindex = 0 maxindex = 0
def __init__(self, objid, stmid, index): def __init__(self, objid, stmid, index):
@ -1710,7 +1711,7 @@ class PDFObjStmRef(object):
if index > PDFObjStmRef.maxindex: if index > PDFObjStmRef.maxindex:
PDFObjStmRef.maxindex = index PDFObjStmRef.maxindex = index
## PDFParser ## PDFParser
## ##
class PDFParser(PSStackParser): class PDFParser(PSStackParser):
@ -1736,7 +1737,7 @@ class PDFParser(PSStackParser):
if token is self.KEYWORD_ENDOBJ: if token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4)) self.add_results(*self.pop(4))
return return
if token is self.KEYWORD_R: if token is self.KEYWORD_R:
# reference to indirect object # reference to indirect object
try: try:
@ -1747,7 +1748,7 @@ class PDFParser(PSStackParser):
except PSSyntaxError: except PSSyntaxError:
pass pass
return return
if token is self.KEYWORD_STREAM: if token is self.KEYWORD_STREAM:
# stream object # stream object
((_,dic),) = self.pop(1) ((_,dic),) = self.pop(1)
@ -1787,7 +1788,7 @@ class PDFParser(PSStackParser):
obj = PDFStream(dic, data, self.doc.decipher) obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj)) self.push((pos, obj))
return return
# others # others
self.push((pos, token)) self.push((pos, token))
return return
@ -1823,7 +1824,7 @@ class PDFParser(PSStackParser):
xref.load(self) xref.load(self)
else: else:
if token is not self.KEYWORD_XREF: if token is not self.KEYWORD_XREF:
raise PDFNoValidXRef('xref not found: pos=%d, token=%r' % raise PDFNoValidXRef('xref not found: pos=%d, token=%r' %
(pos, token)) (pos, token))
self.nextline() self.nextline()
xref = PDFXRef() xref = PDFXRef()
@ -1838,7 +1839,7 @@ class PDFParser(PSStackParser):
pos = int_value(trailer['Prev']) pos = int_value(trailer['Prev'])
self.read_xref_from(pos, xrefs) self.read_xref_from(pos, xrefs)
return return
# read xref tables and trailers # read xref tables and trailers
def read_xref(self): def read_xref(self):
xrefs = [] xrefs = []
@ -1957,7 +1958,7 @@ class PDFSerializer(object):
self.write("%010d 00000 n \n" % xrefs[objid][0]) self.write("%010d 00000 n \n" % xrefs[objid][0])
else: else:
self.write("%010d %05d f \n" % (0, 65535)) self.write("%010d %05d f \n" % (0, 65535))
self.write('trailer\n') self.write('trailer\n')
self.serialize_object(trailer) self.serialize_object(trailer)
self.write('\nstartxref\n%d\n%%%%EOF' % startxref) self.write('\nstartxref\n%d\n%%%%EOF' % startxref)
@ -1977,7 +1978,7 @@ class PDFSerializer(object):
while maxindex >= power: while maxindex >= power:
fl3 += 1 fl3 += 1
power *= 256 power *= 256
index = [] index = []
first = None first = None
prev = None prev = None
@ -2004,14 +2005,14 @@ class PDFSerializer(object):
# we force all generation numbers to be 0 # we force all generation numbers to be 0
# f3 = objref[1] # f3 = objref[1]
f3 = 0 f3 = 0
data.append(struct.pack('>B', f1)) data.append(struct.pack('>B', f1))
data.append(struct.pack('>L', f2)[-fl2:]) data.append(struct.pack('>L', f2)[-fl2:])
data.append(struct.pack('>L', f3)[-fl3:]) data.append(struct.pack('>L', f3)[-fl3:])
index.extend((first, prev - first + 1)) index.extend((first, prev - first + 1))
data = zlib.compress(''.join(data)) data = zlib.compress(''.join(data))
dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index, dic = {'Type': LITERAL_XREF, 'Size': prev + 1, 'Index': index,
'W': [1, fl2, fl3], 'Length': len(data), 'W': [1, fl2, fl3], 'Length': len(data),
'Filter': LITERALS_FLATE_DECODE[0], 'Filter': LITERALS_FLATE_DECODE[0],
'Root': trailer['Root'],} 'Root': trailer['Root'],}
if 'Info' in trailer: if 'Info' in trailer:
@ -2033,9 +2034,9 @@ class PDFSerializer(object):
string = string.replace(')', r'\)') string = string.replace(')', r'\)')
# get rid of ciando id # get rid of ciando id
regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}') regularexp = re.compile(r'http://www.ciando.com/index.cfm/intRefererID/\d{5}')
if regularexp.match(string): return ('http://www.ciando.com') if regularexp.match(string): return ('http://www.ciando.com')
return string return string
def serialize_object(self, obj): def serialize_object(self, obj):
if isinstance(obj, dict): if isinstance(obj, dict):
# Correct malformed Mac OS resource forks for Stanza # Correct malformed Mac OS resource forks for Stanza
@ -2059,21 +2060,21 @@ class PDFSerializer(object):
elif isinstance(obj, bool): elif isinstance(obj, bool):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj).lower()) self.write(str(obj).lower())
elif isinstance(obj, (int, long, float)): elif isinstance(obj, (int, long, float)):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write(str(obj)) self.write(str(obj))
elif isinstance(obj, PDFObjRef): elif isinstance(obj, PDFObjRef):
if self.last.isalnum(): if self.last.isalnum():
self.write(' ') self.write(' ')
self.write('%d %d R' % (obj.objid, 0)) self.write('%d %d R' % (obj.objid, 0))
elif isinstance(obj, PDFStream): elif isinstance(obj, PDFStream):
### If we don't generate cross ref streams the object streams ### If we don't generate cross ref streams the object streams
### are no longer useful, as we have extracted all objects from ### are no longer useful, as we have extracted all objects from
### them. Therefore leave them out from the output. ### them. Therefore leave them out from the output.
if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm: if obj.dic.get('Type') == LITERAL_OBJSTM and not gen_xref_stm:
self.write('(deleted)') self.write('(deleted)')
else: else:
data = obj.get_decdata() data = obj.get_decdata()
self.serialize_object(obj.dic) self.serialize_object(obj.dic)
@ -2085,7 +2086,7 @@ class PDFSerializer(object):
if data[0].isalnum() and self.last.isalnum(): if data[0].isalnum() and self.last.isalnum():
self.write(' ') self.write(' ')
self.write(data) self.write(data)
def serialize_indirect(self, objid, obj): def serialize_indirect(self, objid, obj):
self.write('%d 0 obj' % (objid,)) self.write('%d 0 obj' % (objid,))
self.serialize_object(obj) self.serialize_object(obj)
@ -2097,7 +2098,7 @@ class PDFSerializer(object):
class DecryptionDialog(Tkinter.Frame): class DecryptionDialog(Tkinter.Frame):
def __init__(self, root): def __init__(self, root):
Tkinter.Frame.__init__(self, root, border=5) Tkinter.Frame.__init__(self, root, border=5)
ltext='Select file for decryption\n' ltext='Select file for decryption\n'
self.status = Tkinter.Label(self, text=ltext) self.status = Tkinter.Label(self, text=ltext)
self.status.pack(fill=Tkconstants.X, expand=1) self.status.pack(fill=Tkconstants.X, expand=1)
body = Tkinter.Frame(self) body = Tkinter.Frame(self)
@ -2123,7 +2124,7 @@ class DecryptionDialog(Tkinter.Frame):
button.grid(row=2, column=2) button.grid(row=2, column=2)
buttons = Tkinter.Frame(self) buttons = Tkinter.Frame(self)
buttons.pack() buttons.pack()
botton = Tkinter.Button( botton = Tkinter.Button(
buttons, text="Decrypt", width=10, command=self.decrypt) buttons, text="Decrypt", width=10, command=self.decrypt)
@ -2132,7 +2133,7 @@ class DecryptionDialog(Tkinter.Frame):
button = Tkinter.Button( button = Tkinter.Button(
buttons, text="Quit", width=10, command=self.quit) buttons, text="Quit", width=10, command=self.quit)
button.pack(side=Tkconstants.RIGHT) button.pack(side=Tkconstants.RIGHT)
def get_keypath(self): def get_keypath(self):
keypath = tkFileDialog.askopenfilename( keypath = tkFileDialog.askopenfilename(

View file

@ -67,25 +67,25 @@ def _load_crypto_libcrypto():
RSA_NO_PADDING = 3 RSA_NO_PADDING = 3
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
class RSA(Structure): class RSA(Structure):
pass pass
RSA_p = POINTER(RSA) RSA_p = POINTER(RSA)
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey', d2i_RSAPrivateKey = F(RSA_p, 'd2i_RSAPrivateKey',
[RSA_p, c_char_pp, c_long]) [RSA_p, c_char_pp, c_long])
RSA_size = F(c_int, 'RSA_size', [RSA_p]) RSA_size = F(c_int, 'RSA_size', [RSA_p])
@ -97,7 +97,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class RSA(object): class RSA(object):
def __init__(self, der): def __init__(self, der):
buf = create_string_buffer(der) buf = create_string_buffer(der)
@ -105,7 +105,7 @@ def _load_crypto_libcrypto():
rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der)) rsa = self._rsa = d2i_RSAPrivateKey(None, pp, len(der))
if rsa is None: if rsa is None:
raise ADEPTError('Error parsing ADEPT user key DER') raise ADEPTError('Error parsing ADEPT user key DER')
def decrypt(self, from_): def decrypt(self, from_):
rsa = self._rsa rsa = self._rsa
to = create_string_buffer(RSA_size(rsa)) to = create_string_buffer(RSA_size(rsa))
@ -114,7 +114,7 @@ def _load_crypto_libcrypto():
if dlen < 0: if dlen < 0:
raise ADEPTError('RSA decryption failed') raise ADEPTError('RSA decryption failed')
return to[:dlen] return to[:dlen]
def __del__(self): def __del__(self):
if self._rsa is not None: if self._rsa is not None:
RSA_free(self._rsa) RSA_free(self._rsa)
@ -130,7 +130,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise ADEPTError('Failed to initialize AES key') raise ADEPTError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -148,13 +148,13 @@ def _load_crypto_pycrypto():
# ASN.1 parsing code from tlslite # ASN.1 parsing code from tlslite
class ASN1Error(Exception): class ASN1Error(Exception):
pass pass
class ASN1Parser(object): class ASN1Parser(object):
class Parser(object): class Parser(object):
def __init__(self, bytes): def __init__(self, bytes):
self.bytes = bytes self.bytes = bytes
self.index = 0 self.index = 0
def get(self, length): def get(self, length):
if self.index + length > len(self.bytes): if self.index + length > len(self.bytes):
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
@ -164,22 +164,22 @@ def _load_crypto_pycrypto():
x |= self.bytes[self.index] x |= self.bytes[self.index]
self.index += 1 self.index += 1
return x return x
def getFixBytes(self, lengthBytes): def getFixBytes(self, lengthBytes):
bytes = self.bytes[self.index : self.index+lengthBytes] bytes = self.bytes[self.index : self.index+lengthBytes]
self.index += lengthBytes self.index += lengthBytes
return bytes return bytes
def getVarBytes(self, lengthLength): def getVarBytes(self, lengthLength):
lengthBytes = self.get(lengthLength) lengthBytes = self.get(lengthLength)
return self.getFixBytes(lengthBytes) return self.getFixBytes(lengthBytes)
def getFixList(self, length, lengthList): def getFixList(self, length, lengthList):
l = [0] * lengthList l = [0] * lengthList
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def getVarList(self, length, lengthLength): def getVarList(self, length, lengthLength):
lengthList = self.get(lengthLength) lengthList = self.get(lengthLength)
if lengthList % length != 0: if lengthList % length != 0:
@ -189,19 +189,19 @@ def _load_crypto_pycrypto():
for x in range(lengthList): for x in range(lengthList):
l[x] = self.get(length) l[x] = self.get(length)
return l return l
def startLengthCheck(self, lengthLength): def startLengthCheck(self, lengthLength):
self.lengthCheck = self.get(lengthLength) self.lengthCheck = self.get(lengthLength)
self.indexCheck = self.index self.indexCheck = self.index
def setLengthCheck(self, length): def setLengthCheck(self, length):
self.lengthCheck = length self.lengthCheck = length
self.indexCheck = self.index self.indexCheck = self.index
def stopLengthCheck(self): def stopLengthCheck(self):
if (self.index - self.indexCheck) != self.lengthCheck: if (self.index - self.indexCheck) != self.lengthCheck:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def atLengthCheck(self): def atLengthCheck(self):
if (self.index - self.indexCheck) < self.lengthCheck: if (self.index - self.indexCheck) < self.lengthCheck:
return False return False
@ -209,13 +209,13 @@ def _load_crypto_pycrypto():
return True return True
else: else:
raise ASN1Error("Error decoding ASN.1") raise ASN1Error("Error decoding ASN.1")
def __init__(self, bytes): def __init__(self, bytes):
p = self.Parser(bytes) p = self.Parser(bytes)
p.get(1) p.get(1)
self.length = self._getASN1Length(p) self.length = self._getASN1Length(p)
self.value = p.getFixBytes(self.length) self.value = p.getFixBytes(self.length)
def getChild(self, which): def getChild(self, which):
p = self.Parser(self.value) p = self.Parser(self.value)
for x in range(which+1): for x in range(which+1):
@ -224,7 +224,7 @@ def _load_crypto_pycrypto():
length = self._getASN1Length(p) length = self._getASN1Length(p)
p.getFixBytes(length) p.getFixBytes(length)
return ASN1Parser(p.bytes[markIndex:p.index]) return ASN1Parser(p.bytes[markIndex:p.index])
def _getASN1Length(self, p): def _getASN1Length(self, p):
firstLength = p.get(1) firstLength = p.get(1)
if firstLength<=127: if firstLength<=127:
@ -252,7 +252,7 @@ def _load_crypto_pycrypto():
for byte in bytes: for byte in bytes:
total = (total << 8) + byte total = (total << 8) + byte
return total return total
def decrypt(self, data): def decrypt(self, data):
return self._rsa.decrypt(data) return self._rsa.decrypt(data)

View file

@ -76,13 +76,13 @@ if sys.platform.startswith('win'):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key', AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
@ -427,8 +427,8 @@ def extractKeyfile(keypath):
print "Key generation Error: " + str(e) print "Key generation Error: " + str(e)
return 1 return 1
except Exception, e: except Exception, e:
print "General Error: " + str(e) print "General Error: " + str(e)
return 1 return 1
if not success: if not success:
return 1 return 1
return 0 return 0

View file

@ -14,7 +14,7 @@ from __future__ import with_statement
# 2 - Added OS X support by using OpenSSL when available # 2 - Added OS X support by using OpenSSL when available
# 3 - screen out improper key lengths to prevent segfaults on Linux # 3 - screen out improper key lengths to prevent segfaults on Linux
# 3.1 - Allow Windows versions of libcrypto to be found # 3.1 - Allow Windows versions of libcrypto to be found
# 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml # 3.2 - add support for encoding to 'utf-8' when building up list of files to cecrypt from encryption.xml
# 3.3 - On Windows try PyCrypto first and OpenSSL next # 3.3 - On Windows try PyCrypto first and OpenSSL next
# 3.4 - Modify interace to allow use with import # 3.4 - Modify interace to allow use with import
@ -50,7 +50,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -58,13 +58,13 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
@ -73,7 +73,7 @@ def _load_crypto_libcrypto():
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey): def __init__(self, userkey):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -84,7 +84,7 @@ def _load_crypto_libcrypto():
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES key') raise IGNOBLEError('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
iv = ("\x00" * self._blocksize) iv = ("\x00" * self._blocksize)
@ -122,7 +122,7 @@ def _load_crypto():
AES = _load_crypto() AES = _load_crypto()
""" """
Decrypt Barnes & Noble ADEPT encrypted EPUB books. Decrypt Barnes & Noble ADEPT encrypted EPUB books.

View file

@ -53,7 +53,7 @@ def _load_crypto_libcrypto():
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -61,28 +61,28 @@ def _load_crypto_libcrypto():
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))),
('rounds', c_int)] ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key', AES_set_encrypt_key = F(c_int, 'AES_set_encrypt_key',
[c_char_p, c_int, AES_KEY_p]) [c_char_p, c_int, AES_KEY_p])
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt', AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',
[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, [c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,
c_int]) c_int])
class AES(object): class AES(object):
def __init__(self, userkey, iv): def __init__(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
self._iv = iv self._iv = iv
key = self._key = AES_KEY() key = self._key = AES_KEY()
rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key) rv = AES_set_encrypt_key(userkey, len(userkey) * 8, key)
if rv < 0: if rv < 0:
raise IGNOBLEError('Failed to initialize AES Encrypt key') raise IGNOBLEError('Failed to initialize AES Encrypt key')
def encrypt(self, data): def encrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1) rv = AES_cbc_encrypt(data, out, len(data), self._key, self._iv, 1)
if rv == 0: if rv == 0:

View file

@ -0,0 +1,568 @@
#! /usr/bin/env python
"""
Routines for doing AES CBC in one file
Modified by some_updates to extract
and combine only those parts needed for AES CBC
into one simple to add python file
Original Version
Copyright (c) 2002 by Paul A. Lambert
Under:
CryptoPy Artisitic License Version 1.0
See the wonderful pure python package cryptopy-1.2.5
and read its LICENSE.txt for complete license details.
"""
class CryptoError(Exception):
""" Base class for crypto exceptions """
def __init__(self,errorMessage='Error!'):
self.message = errorMessage
def __str__(self):
return self.message
class InitCryptoError(CryptoError):
""" Crypto errors during algorithm initialization """
class BadKeySizeError(InitCryptoError):
""" Bad key size error """
class EncryptError(CryptoError):
""" Error in encryption processing """
class DecryptError(CryptoError):
""" Error in decryption processing """
class DecryptNotBlockAlignedError(DecryptError):
""" Error in decryption processing """
def xorS(a,b):
""" XOR two strings """
assert len(a)==len(b)
x = []
for i in range(len(a)):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
def xor(a,b):
""" XOR two strings """
x = []
for i in range(min(len(a),len(b))):
x.append( chr(ord(a[i])^ord(b[i])))
return ''.join(x)
"""
Base 'BlockCipher' and Pad classes for cipher instances.
BlockCipher supports automatic padding and type conversion. The BlockCipher
class was written to make the actual algorithm code more readable and
not for performance.
"""
class BlockCipher:
""" Block ciphers """
def __init__(self):
self.reset()
def reset(self):
self.resetEncrypt()
self.resetDecrypt()
def resetEncrypt(self):
self.encryptBlockCount = 0
self.bytesToEncrypt = ''
def resetDecrypt(self):
self.decryptBlockCount = 0
self.bytesToDecrypt = ''
def encrypt(self, plainText, more = None):
""" Encrypt a string and return a binary string """
self.bytesToEncrypt += plainText # append plainText to any bytes from prior encrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToEncrypt), self.blockSize)
cipherText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ctBlock = self.encryptBlock(self.bytesToEncrypt[bStart:bStart+self.blockSize])
self.encryptBlockCount += 1
cipherText += ctBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # no more data expected from caller
finalBytes = self.padding.addPad(self.bytesToEncrypt,self.blockSize)
if len(finalBytes) > 0:
ctBlock = self.encryptBlock(finalBytes)
self.encryptBlockCount += 1
cipherText += ctBlock
self.resetEncrypt()
return cipherText
def decrypt(self, cipherText, more = None):
""" Decrypt a string and return a string """
self.bytesToDecrypt += cipherText # append to any bytes from prior decrypt
numBlocks, numExtraBytes = divmod(len(self.bytesToDecrypt), self.blockSize)
if more == None: # no more calls to decrypt, should have all the data
if numExtraBytes != 0:
raise DecryptNotBlockAlignedError, 'Data not block aligned on decrypt'
# hold back some bytes in case last decrypt has zero len
if (more != None) and (numExtraBytes == 0) and (numBlocks >0) :
numBlocks -= 1
numExtraBytes = self.blockSize
plainText = ''
for i in range(numBlocks):
bStart = i*self.blockSize
ptBlock = self.decryptBlock(self.bytesToDecrypt[bStart : bStart+self.blockSize])
self.decryptBlockCount += 1
plainText += ptBlock
if numExtraBytes > 0: # save any bytes that are not block aligned
self.bytesToEncrypt = self.bytesToEncrypt[-numExtraBytes:]
else:
self.bytesToEncrypt = ''
if more == None: # last decrypt remove padding
plainText = self.padding.removePad(plainText, self.blockSize)
self.resetDecrypt()
return plainText
class Pad:
def __init__(self):
pass # eventually could put in calculation of min and max size extension
class padWithPadLen(Pad):
""" Pad a binary string with the length of the padding """
def addPad(self, extraBytes, blockSize):
""" Add padding to a binary string to make it an even multiple
of the block size """
blocks, numExtraBytes = divmod(len(extraBytes), blockSize)
padLength = blockSize - numExtraBytes
return extraBytes + padLength*chr(padLength)
def removePad(self, paddedBinaryString, blockSize):
""" Remove padding from a binary string """
if not(0<len(paddedBinaryString)):
raise DecryptNotBlockAlignedError, 'Expected More Data'
return paddedBinaryString[:-ord(paddedBinaryString[-1])]
class noPadding(Pad):
""" No padding. Use this to get ECB behavior from encrypt/decrypt """
def addPad(self, extraBytes, blockSize):
""" Add no padding """
return extraBytes
def removePad(self, paddedBinaryString, blockSize):
""" Remove no padding """
return paddedBinaryString
"""
Rijndael encryption algorithm
This byte oriented implementation is intended to closely
match FIPS specification for readability. It is not implemented
for performance.
"""
class Rijndael(BlockCipher):
""" Rijndael encryption algorithm """
def __init__(self, key = None, padding = padWithPadLen(), keySize=16, blockSize=16 ):
self.name = 'RIJNDAEL'
self.keySize = keySize
self.strength = keySize*8
self.blockSize = blockSize # blockSize is in bytes
self.padding = padding # change default to noPadding() to get normal ECB behavior
assert( keySize%4==0 and NrTable[4].has_key(keySize/4)),'key size must be 16,20,24,29 or 32 bytes'
assert( blockSize%4==0 and NrTable.has_key(blockSize/4)), 'block size must be 16,20,24,29 or 32 bytes'
self.Nb = self.blockSize/4 # Nb is number of columns of 32 bit words
self.Nk = keySize/4 # Nk is the key length in 32-bit words
self.Nr = NrTable[self.Nb][self.Nk] # The number of rounds (Nr) is a function of
# the block (Nb) and key (Nk) sizes.
if key != None:
self.setKey(key)
def setKey(self, key):
""" Set a key and generate the expanded key """
assert( len(key) == (self.Nk*4) ), 'Key length must be same as keySize parameter'
self.__expandedKey = keyExpansion(self, key)
self.reset() # BlockCipher.reset()
def encryptBlock(self, plainTextBlock):
""" Encrypt a block, plainTextBlock must be a array of bytes [Nb by 4] """
self.state = self._toBlock(plainTextBlock)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
for round in range(1,self.Nr): #for round = 1 step 1 to Nr
SubBytes(self)
ShiftRows(self)
MixColumns(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
SubBytes(self)
ShiftRows(self)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
return self._toBString(self.state)
def decryptBlock(self, encryptedBlock):
""" decrypt a block (array of bytes) """
self.state = self._toBlock(encryptedBlock)
AddRoundKey(self, self.__expandedKey[self.Nr*self.Nb:(self.Nr+1)*self.Nb])
for round in range(self.Nr-1,0,-1):
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[round*self.Nb:(round+1)*self.Nb])
InvMixColumns(self)
InvShiftRows(self)
InvSubBytes(self)
AddRoundKey(self, self.__expandedKey[0:self.Nb])
return self._toBString(self.state)
def _toBlock(self, bs):
""" Convert binary string to array of bytes, state[col][row]"""
assert ( len(bs) == 4*self.Nb ), 'Rijndarl blocks must be of size blockSize'
return [[ord(bs[4*i]),ord(bs[4*i+1]),ord(bs[4*i+2]),ord(bs[4*i+3])] for i in range(self.Nb)]
def _toBString(self, block):
""" Convert block (array of bytes) to binary string """
l = []
for col in block:
for rowElement in col:
l.append(chr(rowElement))
return ''.join(l)
#-------------------------------------
""" Number of rounds Nr = NrTable[Nb][Nk]
Nb Nk=4 Nk=5 Nk=6 Nk=7 Nk=8
------------------------------------- """
NrTable = {4: {4:10, 5:11, 6:12, 7:13, 8:14},
5: {4:11, 5:11, 6:12, 7:13, 8:14},
6: {4:12, 5:12, 6:12, 7:13, 8:14},
7: {4:13, 5:13, 6:13, 7:13, 8:14},
8: {4:14, 5:14, 6:14, 7:14, 8:14}}
#-------------------------------------
def keyExpansion(algInstance, keyString):
""" Expand a string of size keySize into a larger array """
Nk, Nb, Nr = algInstance.Nk, algInstance.Nb, algInstance.Nr # for readability
key = [ord(byte) for byte in keyString] # convert string to list
w = [[key[4*i],key[4*i+1],key[4*i+2],key[4*i+3]] for i in range(Nk)]
for i in range(Nk,Nb*(Nr+1)):
temp = w[i-1] # a four byte column
if (i%Nk) == 0 :
temp = temp[1:]+[temp[0]] # RotWord(temp)
temp = [ Sbox[byte] for byte in temp ]
temp[0] ^= Rcon[i/Nk]
elif Nk > 6 and i%Nk == 4 :
temp = [ Sbox[byte] for byte in temp ] # SubWord(temp)
w.append( [ w[i-Nk][byte]^temp[byte] for byte in range(4) ] )
return w
Rcon = (0,0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1b,0x36, # note extra '0' !!!
0x6c,0xd8,0xab,0x4d,0x9a,0x2f,0x5e,0xbc,0x63,0xc6,
0x97,0x35,0x6a,0xd4,0xb3,0x7d,0xfa,0xef,0xc5,0x91)
#-------------------------------------
def AddRoundKey(algInstance, keyBlock):
""" XOR the algorithm state with a block of key material """
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] ^= keyBlock[column][row]
#-------------------------------------
def SubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = Sbox[algInstance.state[column][row]]
def InvSubBytes(algInstance):
for column in range(algInstance.Nb):
for row in range(4):
algInstance.state[column][row] = InvSbox[algInstance.state[column][row]]
Sbox = (0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,
0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76,
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,
0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0,
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,
0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15,
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,
0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75,
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,
0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84,
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,
0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf,
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,
0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8,
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,
0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2,
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,
0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73,
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,
0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb,
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,
0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79,
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,
0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08,
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,
0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a,
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,
0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e,
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,
0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf,
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,
0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16)
InvSbox = (0x52,0x09,0x6a,0xd5,0x30,0x36,0xa5,0x38,
0xbf,0x40,0xa3,0x9e,0x81,0xf3,0xd7,0xfb,
0x7c,0xe3,0x39,0x82,0x9b,0x2f,0xff,0x87,
0x34,0x8e,0x43,0x44,0xc4,0xde,0xe9,0xcb,
0x54,0x7b,0x94,0x32,0xa6,0xc2,0x23,0x3d,
0xee,0x4c,0x95,0x0b,0x42,0xfa,0xc3,0x4e,
0x08,0x2e,0xa1,0x66,0x28,0xd9,0x24,0xb2,
0x76,0x5b,0xa2,0x49,0x6d,0x8b,0xd1,0x25,
0x72,0xf8,0xf6,0x64,0x86,0x68,0x98,0x16,
0xd4,0xa4,0x5c,0xcc,0x5d,0x65,0xb6,0x92,
0x6c,0x70,0x48,0x50,0xfd,0xed,0xb9,0xda,
0x5e,0x15,0x46,0x57,0xa7,0x8d,0x9d,0x84,
0x90,0xd8,0xab,0x00,0x8c,0xbc,0xd3,0x0a,
0xf7,0xe4,0x58,0x05,0xb8,0xb3,0x45,0x06,
0xd0,0x2c,0x1e,0x8f,0xca,0x3f,0x0f,0x02,
0xc1,0xaf,0xbd,0x03,0x01,0x13,0x8a,0x6b,
0x3a,0x91,0x11,0x41,0x4f,0x67,0xdc,0xea,
0x97,0xf2,0xcf,0xce,0xf0,0xb4,0xe6,0x73,
0x96,0xac,0x74,0x22,0xe7,0xad,0x35,0x85,
0xe2,0xf9,0x37,0xe8,0x1c,0x75,0xdf,0x6e,
0x47,0xf1,0x1a,0x71,0x1d,0x29,0xc5,0x89,
0x6f,0xb7,0x62,0x0e,0xaa,0x18,0xbe,0x1b,
0xfc,0x56,0x3e,0x4b,0xc6,0xd2,0x79,0x20,
0x9a,0xdb,0xc0,0xfe,0x78,0xcd,0x5a,0xf4,
0x1f,0xdd,0xa8,0x33,0x88,0x07,0xc7,0x31,
0xb1,0x12,0x10,0x59,0x27,0x80,0xec,0x5f,
0x60,0x51,0x7f,0xa9,0x19,0xb5,0x4a,0x0d,
0x2d,0xe5,0x7a,0x9f,0x93,0xc9,0x9c,0xef,
0xa0,0xe0,0x3b,0x4d,0xae,0x2a,0xf5,0xb0,
0xc8,0xeb,0xbb,0x3c,0x83,0x53,0x99,0x61,
0x17,0x2b,0x04,0x7e,0xba,0x77,0xd6,0x26,
0xe1,0x69,0x14,0x63,0x55,0x21,0x0c,0x7d)
#-------------------------------------
""" For each block size (Nb), the ShiftRow operation shifts row i
by the amount Ci. Note that row 0 is not shifted.
Nb C1 C2 C3
------------------- """
shiftOffset = { 4 : ( 0, 1, 2, 3),
5 : ( 0, 1, 2, 3),
6 : ( 0, 1, 2, 3),
7 : ( 0, 1, 2, 4),
8 : ( 0, 1, 3, 4) }
def ShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
def InvShiftRows(algInstance):
tmp = [0]*algInstance.Nb # list of size Nb
for r in range(1,4): # row 0 reamains unchanged and can be skipped
for c in range(algInstance.Nb):
tmp[c] = algInstance.state[(c+algInstance.Nb-shiftOffset[algInstance.Nb][r]) % algInstance.Nb][r]
for c in range(algInstance.Nb):
algInstance.state[c][r] = tmp[c]
#-------------------------------------
def MixColumns(a):
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(2,a.state[j][0])^mul(3,a.state[j][1])^mul(1,a.state[j][2])^mul(1,a.state[j][3])
Sprime[1] = mul(1,a.state[j][0])^mul(2,a.state[j][1])^mul(3,a.state[j][2])^mul(1,a.state[j][3])
Sprime[2] = mul(1,a.state[j][0])^mul(1,a.state[j][1])^mul(2,a.state[j][2])^mul(3,a.state[j][3])
Sprime[3] = mul(3,a.state[j][0])^mul(1,a.state[j][1])^mul(1,a.state[j][2])^mul(2,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
def InvMixColumns(a):
""" Mix the four bytes of every column in a linear way
This is the opposite operation of Mixcolumn """
Sprime = [0,0,0,0]
for j in range(a.Nb): # for each column
Sprime[0] = mul(0x0E,a.state[j][0])^mul(0x0B,a.state[j][1])^mul(0x0D,a.state[j][2])^mul(0x09,a.state[j][3])
Sprime[1] = mul(0x09,a.state[j][0])^mul(0x0E,a.state[j][1])^mul(0x0B,a.state[j][2])^mul(0x0D,a.state[j][3])
Sprime[2] = mul(0x0D,a.state[j][0])^mul(0x09,a.state[j][1])^mul(0x0E,a.state[j][2])^mul(0x0B,a.state[j][3])
Sprime[3] = mul(0x0B,a.state[j][0])^mul(0x0D,a.state[j][1])^mul(0x09,a.state[j][2])^mul(0x0E,a.state[j][3])
for i in range(4):
a.state[j][i] = Sprime[i]
#-------------------------------------
def mul(a, b):
""" Multiply two elements of GF(2^m)
needed for MixColumn and InvMixColumn """
if (a !=0 and b!=0):
return Alogtable[(Logtable[a] + Logtable[b])%255]
else:
return 0
Logtable = ( 0, 0, 25, 1, 50, 2, 26, 198, 75, 199, 27, 104, 51, 238, 223, 3,
100, 4, 224, 14, 52, 141, 129, 239, 76, 113, 8, 200, 248, 105, 28, 193,
125, 194, 29, 181, 249, 185, 39, 106, 77, 228, 166, 114, 154, 201, 9, 120,
101, 47, 138, 5, 33, 15, 225, 36, 18, 240, 130, 69, 53, 147, 218, 142,
150, 143, 219, 189, 54, 208, 206, 148, 19, 92, 210, 241, 64, 70, 131, 56,
102, 221, 253, 48, 191, 6, 139, 98, 179, 37, 226, 152, 34, 136, 145, 16,
126, 110, 72, 195, 163, 182, 30, 66, 58, 107, 40, 84, 250, 133, 61, 186,
43, 121, 10, 21, 155, 159, 94, 202, 78, 212, 172, 229, 243, 115, 167, 87,
175, 88, 168, 80, 244, 234, 214, 116, 79, 174, 233, 213, 231, 230, 173, 232,
44, 215, 117, 122, 235, 22, 11, 245, 89, 203, 95, 176, 156, 169, 81, 160,
127, 12, 246, 111, 23, 196, 73, 236, 216, 67, 31, 45, 164, 118, 123, 183,
204, 187, 62, 90, 251, 96, 177, 134, 59, 82, 161, 108, 170, 85, 41, 157,
151, 178, 135, 144, 97, 190, 220, 252, 188, 149, 207, 205, 55, 63, 91, 209,
83, 57, 132, 60, 65, 162, 109, 71, 20, 42, 158, 93, 86, 242, 211, 171,
68, 17, 146, 217, 35, 32, 46, 137, 180, 124, 184, 38, 119, 153, 227, 165,
103, 74, 237, 222, 197, 49, 254, 24, 13, 99, 140, 128, 192, 247, 112, 7)
Alogtable= ( 1, 3, 5, 15, 17, 51, 85, 255, 26, 46, 114, 150, 161, 248, 19, 53,
95, 225, 56, 72, 216, 115, 149, 164, 247, 2, 6, 10, 30, 34, 102, 170,
229, 52, 92, 228, 55, 89, 235, 38, 106, 190, 217, 112, 144, 171, 230, 49,
83, 245, 4, 12, 20, 60, 68, 204, 79, 209, 104, 184, 211, 110, 178, 205,
76, 212, 103, 169, 224, 59, 77, 215, 98, 166, 241, 8, 24, 40, 120, 136,
131, 158, 185, 208, 107, 189, 220, 127, 129, 152, 179, 206, 73, 219, 118, 154,
181, 196, 87, 249, 16, 48, 80, 240, 11, 29, 39, 105, 187, 214, 97, 163,
254, 25, 43, 125, 135, 146, 173, 236, 47, 113, 147, 174, 233, 32, 96, 160,
251, 22, 58, 78, 210, 109, 183, 194, 93, 231, 50, 86, 250, 21, 63, 65,
195, 94, 226, 61, 71, 201, 64, 192, 91, 237, 44, 116, 156, 191, 218, 117,
159, 186, 213, 100, 172, 239, 42, 126, 130, 157, 188, 223, 122, 142, 137, 128,
155, 182, 193, 88, 232, 35, 101, 175, 234, 37, 111, 177, 200, 67, 197, 84,
252, 31, 33, 99, 165, 244, 7, 9, 27, 45, 119, 153, 176, 203, 70, 202,
69, 207, 74, 222, 121, 139, 134, 145, 168, 227, 62, 66, 198, 81, 243, 14,
18, 54, 90, 238, 41, 123, 141, 140, 143, 138, 133, 148, 167, 242, 13, 23,
57, 75, 221, 124, 132, 151, 162, 253, 28, 36, 108, 180, 199, 82, 246, 1)
"""
AES Encryption Algorithm
The AES algorithm is just Rijndael algorithm restricted to the default
blockSize of 128 bits.
"""
class AES(Rijndael):
""" The AES algorithm is the Rijndael block cipher restricted to block
sizes of 128 bits and key sizes of 128, 192 or 256 bits
"""
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
""" Initialize AES, keySize is in bytes """
if not (keySize == 16 or keySize == 24 or keySize == 32) :
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
self.name = 'AES'
"""
CBC mode of encryption for block ciphers.
This algorithm mode wraps any BlockCipher to make a
Cipher Block Chaining mode.
"""
from random import Random # should change to crypto.random!!!
class CBC(BlockCipher):
""" The CBC class wraps block ciphers to make cipher block chaining (CBC) mode
algorithms. The initialization (IV) is automatic if set to None. Padding
is also automatic based on the Pad class used to initialize the algorithm
"""
def __init__(self, blockCipherInstance, padding = padWithPadLen()):
""" CBC algorithms are created by initializing with a BlockCipher instance """
self.baseCipher = blockCipherInstance
self.name = self.baseCipher.name + '_CBC'
self.blockSize = self.baseCipher.blockSize
self.keySize = self.baseCipher.keySize
self.padding = padding
self.baseCipher.padding = noPadding() # baseCipher should NOT pad!!
self.r = Random() # for IV generation, currently uses
# mediocre standard distro version <----------------
import time
newSeed = time.ctime()+str(self.r) # seed with instance location
self.r.seed(newSeed) # to make unique
self.reset()
def setKey(self, key):
self.baseCipher.setKey(key)
# Overload to reset both CBC state and the wrapped baseCipher
def resetEncrypt(self):
BlockCipher.resetEncrypt(self) # reset CBC encrypt state (super class)
self.baseCipher.resetEncrypt() # reset base cipher encrypt state
def resetDecrypt(self):
BlockCipher.resetDecrypt(self) # reset CBC state (super class)
self.baseCipher.resetDecrypt() # reset base cipher decrypt state
def encrypt(self, plainText, iv=None, more=None):
""" CBC encryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.encryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to encrypt'
return BlockCipher.encrypt(self,plainText, more=more)
def decrypt(self, cipherText, iv=None, more=None):
""" CBC decryption - overloads baseCipher to allow optional explicit IV
when iv=None, iv is auto generated!
"""
if self.decryptBlockCount == 0:
self.iv = iv
else:
assert(iv==None), 'IV used only on first call to decrypt'
return BlockCipher.decrypt(self, cipherText, more=more)
def encryptBlock(self, plainTextBlock):
""" CBC block encryption, IV is set with 'encrypt' """
auto_IV = ''
if self.encryptBlockCount == 0:
if self.iv == None:
# generate IV and use
self.iv = ''.join([chr(self.r.randrange(256)) for i in range(self.blockSize)])
self.prior_encr_CT_block = self.iv
auto_IV = self.prior_encr_CT_block # prepend IV if it's automatic
else: # application provided IV
assert(len(self.iv) == self.blockSize ),'IV must be same length as block'
self.prior_encr_CT_block = self.iv
""" encrypt the prior CT XORed with the PT """
ct = self.baseCipher.encryptBlock( xor(self.prior_encr_CT_block, plainTextBlock) )
self.prior_encr_CT_block = ct
return auto_IV+ct
def decryptBlock(self, encryptedBlock):
""" Decrypt a single block """
if self.decryptBlockCount == 0: # first call, process IV
if self.iv == None: # auto decrypt IV?
self.prior_CT_block = encryptedBlock
return ''
else:
assert(len(self.iv)==self.blockSize),"Bad IV size on CBC decryption"
self.prior_CT_block = self.iv
dct = self.baseCipher.decryptBlock(encryptedBlock)
""" XOR the prior decrypted CT with the prior CT """
dct_XOR_priorCT = xor( self.prior_CT_block, dct )
self.prior_CT_block = encryptedBlock
return dct_XOR_priorCT
"""
AES_CBC Encryption Algorithm
"""
class AES_CBC(CBC):
""" AES encryption in CBC feedback mode """
def __init__(self, key=None, padding=padWithPadLen(), keySize=16):
CBC.__init__( self, AES(key, noPadding(), keySize), padding)
self.name = 'AES_CBC'

Binary file not shown.

View file

@ -0,0 +1,290 @@
#! /usr/bin/env python
import sys, os
import hmac
from struct import pack
import hashlib
# interface to needed routines libalfcrypto
def _load_libalfcrypto():
import ctypes
from ctypes import CDLL, byref, POINTER, c_void_p, c_char_p, c_int, c_long, \
Structure, c_ulong, create_string_buffer, addressof, string_at, cast, sizeof
pointer_size = ctypes.sizeof(ctypes.c_voidp)
name_of_lib = None
if sys.platform.startswith('darwin'):
name_of_lib = 'libalfcrypto.dylib'
elif sys.platform.startswith('win'):
if pointer_size == 4:
name_of_lib = 'alfcrypto.dll'
else:
name_of_lib = 'alfcrypto64.dll'
else:
if pointer_size == 4:
name_of_lib = 'libalfcrypto32.so'
else:
name_of_lib = 'libalfcrypto64.so'
libalfcrypto = sys.path[0] + os.sep + name_of_lib
if not os.path.isfile(libalfcrypto):
raise Exception('libalfcrypto not found')
libalfcrypto = CDLL(libalfcrypto)
c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int)
def F(restype, name, argtypes):
func = getattr(libalfcrypto, name)
func.restype = restype
func.argtypes = argtypes
return func
# aes cbc decryption
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
#
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
#
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key,
# unsigned char *ivec, const int enc);
AES_MAXNR = 14
class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY)
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p, c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
# Pukall 1 Cipher
# unsigned char *PC1(const unsigned char *key, unsigned int klen, const unsigned char *src,
# unsigned char *dest, unsigned int len, int decryption);
PC1 = F(c_char_p, 'PC1', [c_char_p, c_ulong, c_char_p, c_char_p, c_ulong, c_ulong])
# Topaz Encryption
# typedef struct _TpzCtx {
# unsigned int v[2];
# } TpzCtx;
#
# void topazCryptoInit(TpzCtx *ctx, const unsigned char *key, int klen);
# void topazCryptoDecrypt(const TpzCtx *ctx, const unsigned char *in, unsigned char *out, int len);
class TPZ_CTX(Structure):
_fields_ = [('v', c_long * 2)]
TPZ_CTX_p = POINTER(TPZ_CTX)
topazCryptoInit = F(None, 'topazCryptoInit', [TPZ_CTX_p, c_char_p, c_ulong])
topazCryptoDecrypt = F(None, 'topazCryptoDecrypt', [TPZ_CTX_p, c_char_p, c_char_p, c_ulong])
class AES_CBC(object):
def __init__(self):
self._blocksize = 0
self._keyctx = None
self._iv = 0
def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey)
if (self._blocksize != 16) and (self._blocksize != 24) and (self._blocksize != 32) :
raise Exception('AES CBC improper key used')
return
keyctx = self._keyctx = AES_KEY()
self._iv = iv
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0:
raise Exception('Failed to initialize AES CBC key')
def decrypt(self, data):
out = create_string_buffer(len(data))
mutable_iv = create_string_buffer(self._iv, len(self._iv))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, mutable_iv, 0)
if rv == 0:
raise Exception('AES CBC decryption failed')
return out.raw
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
self.key = key
out = create_string_buffer(len(src))
de = 0
if decryption:
de = 1
rv = PC1(key, len(key), src, out, len(src), de)
return out.raw
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
tpz_ctx = self._ctx = TPZ_CTX()
topazCryptoInit(tpz_ctx, key, len(key))
return tpz_ctx
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
out = create_string_buffer(len(data))
topazCryptoDecrypt(ctx, data, out, len(data))
return out.raw
print "Using Library AlfCrypto DLL/DYLIB/SO"
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_python_alfcrypto():
import aescbc
class Pukall_Cipher(object):
def __init__(self):
self.key = None
def PC1(self, key, src, decryption=True):
sum1 = 0;
sum2 = 0;
keyXorVal = 0;
if len(key)!=16:
print "Bad key length!"
return None
wkey = []
for i in xrange(8):
wkey.append(ord(key[i*2])<<8 | ord(key[i*2+1]))
dst = ""
for i in xrange(len(src)):
temp1 = 0;
byteXorVal = 0;
for j in xrange(8):
temp1 ^= wkey[j]
sum2 = (sum2+j)*20021 + sum1
sum1 = (temp1*346)&0xFFFF
sum2 = (sum2+sum1)&0xFFFF
temp1 = (temp1*20021+1)&0xFFFF
byteXorVal ^= temp1 ^ sum2
curByte = ord(src[i])
if not decryption:
keyXorVal = curByte * 257;
curByte = ((curByte ^ (byteXorVal >> 8)) ^ byteXorVal) & 0xFF
if decryption:
keyXorVal = curByte * 257;
for j in xrange(8):
wkey[j] ^= keyXorVal;
dst+=chr(curByte)
return dst
class Topaz_Cipher(object):
def __init__(self):
self._ctx = None
def ctx_init(self, key):
ctx1 = 0x0CAFFE19E
for keyChar in key:
keyByte = ord(keyChar)
ctx2 = ctx1
ctx1 = ((((ctx1 >>2) * (ctx1 >>7))&0xFFFFFFFF) ^ (keyByte * keyByte * 0x0F902007)& 0xFFFFFFFF )
self._ctx = [ctx1, ctx2]
return [ctx1,ctx2]
def decrypt(self, data, ctx=None):
if ctx == None:
ctx = self._ctx
ctx1 = ctx[0]
ctx2 = ctx[1]
plainText = ""
for dataChar in data:
dataByte = ord(dataChar)
m = (dataByte ^ ((ctx1 >> 3) &0xFF) ^ ((ctx2<<3) & 0xFF)) &0xFF
ctx2 = ctx1
ctx1 = (((ctx1 >> 2) * (ctx1 >> 7)) &0xFFFFFFFF) ^((m * m * 0x0F902007) &0xFFFFFFFF)
plainText += chr(m)
return plainText
class AES_CBC(object):
def __init__(self):
self._key = None
self._iv = None
self.aes = None
def set_decrypt_key(self, userkey, iv):
self._key = userkey
self._iv = iv
self.aes = aescbc.AES_CBC(userkey, aescbc.noPadding(), len(userkey))
def decrypt(self, data):
iv = self._iv
cleartext = self.aes.decrypt(iv + data)
return cleartext
return (AES_CBC, Pukall_Cipher, Topaz_Cipher)
def _load_crypto():
AES_CBC = Pukall_Cipher = Topaz_Cipher = None
cryptolist = (_load_libalfcrypto, _load_python_alfcrypto)
for loader in cryptolist:
try:
AES_CBC, Pukall_Cipher, Topaz_Cipher = loader()
break
except (ImportError, Exception):
pass
return AES_CBC, Pukall_Cipher, Topaz_Cipher
AES_CBC, Pukall_Cipher, Topaz_Cipher = _load_crypto()
class KeyIVGen(object):
# this only exists in openssl so we will use pure python implementation instead
# PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
# [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
def pbkdf2(self, passwd, salt, iter, keylen):
def xorstr( a, b ):
if len(a) != len(b):
raise Exception("xorstr(): lengths differ")
return ''.join((chr(ord(x)^ord(y)) for x, y in zip(a, b)))
def prf( h, data ):
hm = h.copy()
hm.update( data )
return hm.digest()
def pbkdf2_F( h, salt, itercount, blocknum ):
U = prf( h, salt + pack('>i',blocknum ) )
T = U
for i in range(2, itercount+1):
U = prf( h, U )
T = xorstr( T, U )
return T
sha = hashlib.sha1
digest_size = sha().digest_size
# l - number of output blocks to produce
l = keylen / digest_size
if keylen % digest_size != 0:
l += 1
h = hmac.new( passwd, None, sha )
T = ""
for i in range(1, l+1):
T += pbkdf2_F( h, salt, iter, i )
return T[0: keylen]

Binary file not shown.

Binary file not shown.

View file

@ -23,7 +23,7 @@ from struct import unpack
class TpzDRMError(Exception): class TpzDRMError(Exception):
pass pass
# Get a 7 bit encoded number from string. The most # Get a 7 bit encoded number from string. The most
# significant byte comes first and has the high bit (8th) set # significant byte comes first and has the high bit (8th) set
def readEncodedNumber(file): def readEncodedNumber(file):
@ -32,57 +32,57 @@ def readEncodedNumber(file):
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# returns a binary string that encodes a number into 7 bits # returns a binary string that encodes a number into 7 bits
# most significant byte first which has the high bit set # most significant byte first which has the high bit set
def encodeNumber(number): def encodeNumber(number):
result = "" result = ""
negative = False negative = False
flag = 0 flag = 0
if number < 0 : if number < 0 :
number = -number + 1 number = -number + 1
negative = True negative = True
while True: while True:
byte = number & 0x7F byte = number & 0x7F
number = number >> 7 number = number >> 7
byte += flag byte += flag
result += chr(byte) result += chr(byte)
flag = 0x80 flag = 0x80
if number == 0 : if number == 0 :
if (byte == 0xFF and negative == False) : if (byte == 0xFF and negative == False) :
result += chr(0x80) result += chr(0x80)
break break
if negative: if negative:
result += chr(0xFF) result += chr(0xFF)
return result[::-1] return result[::-1]
# create / read a length prefixed string from the file # create / read a length prefixed string from the file
@ -97,9 +97,9 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
# convert a binary string generated by encodeNumber (7 bit encoded number) # convert a binary string generated by encodeNumber (7 bit encoded number)
# to the value you would find inside the page*.dat files to be processed # to the value you would find inside the page*.dat files to be processed
@ -265,6 +265,8 @@ class PageParser(object):
'paragraph.gridSize' : (1, 'scalar_number', 0, 0), 'paragraph.gridSize' : (1, 'scalar_number', 0, 0),
'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0), 'paragraph.gridTopCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'paragraph.gridEndCenter' : (1, 'scalar_number', 0, 0),
'word_semantic' : (1, 'snippets', 1, 1), 'word_semantic' : (1, 'snippets', 1, 1),
@ -284,6 +286,8 @@ class PageParser(object):
'_span.gridSize' : (1, 'scalar_number', 0, 0), '_span.gridSize' : (1, 'scalar_number', 0, 0),
'_span.gridBottomCenter' : (1, 'scalar_number', 0, 0), '_span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'_span.gridTopCenter' : (1, 'scalar_number', 0, 0), '_span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'_span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'_span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'span' : (1, 'snippets', 1, 0), 'span' : (1, 'snippets', 1, 0),
'span.firstWord' : (1, 'scalar_number', 0, 0), 'span.firstWord' : (1, 'scalar_number', 0, 0),
@ -291,6 +295,8 @@ class PageParser(object):
'span.gridSize' : (1, 'scalar_number', 0, 0), 'span.gridSize' : (1, 'scalar_number', 0, 0),
'span.gridBottomCenter' : (1, 'scalar_number', 0, 0), 'span.gridBottomCenter' : (1, 'scalar_number', 0, 0),
'span.gridTopCenter' : (1, 'scalar_number', 0, 0), 'span.gridTopCenter' : (1, 'scalar_number', 0, 0),
'span.gridBeginCenter' : (1, 'scalar_number', 0, 0),
'span.gridEndCenter' : (1, 'scalar_number', 0, 0),
'extratokens' : (1, 'snippets', 1, 0), 'extratokens' : (1, 'snippets', 1, 0),
'extratokens.type' : (1, 'scalar_text', 0, 0), 'extratokens.type' : (1, 'scalar_text', 0, 0),
@ -376,14 +382,14 @@ class PageParser(object):
for j in xrange(i+1, cnt) : for j in xrange(i+1, cnt) :
result += '.' + self.tagpath[j] result += '.' + self.tagpath[j]
return result return result
# list of absolute command byte values values that indicate # list of absolute command byte values values that indicate
# various types of loop meachanisms typically used to generate vectors # various types of loop meachanisms typically used to generate vectors
cmd_list = (0x76, 0x76) cmd_list = (0x76, 0x76)
# peek at and return 1 byte that is ahead by i bytes # peek at and return 1 byte that is ahead by i bytes
def peek(self, aheadi): def peek(self, aheadi):
c = self.fo.read(aheadi) c = self.fo.read(aheadi)
if (len(c) == 0): if (len(c) == 0):
@ -416,7 +422,7 @@ class PageParser(object):
return result return result
# process the next tag token, recursively handling subtags, # process the next tag token, recursively handling subtags,
# arguments, and commands # arguments, and commands
def procToken(self, token): def procToken(self, token):
@ -438,7 +444,7 @@ class PageParser(object):
if known_token : if known_token :
# handle subtags if present # handle subtags if present
subtagres = [] subtagres = []
if (splcase == 1): if (splcase == 1):
# this type of tag uses of escape marker 0x74 indicate subtag count # this type of tag uses of escape marker 0x74 indicate subtag count
@ -447,7 +453,7 @@ class PageParser(object):
subtags = 1 subtags = 1
num_args = 0 num_args = 0
if (subtags == 1): if (subtags == 1):
ntags = readEncodedNumber(self.fo) ntags = readEncodedNumber(self.fo)
if self.debug : print 'subtags: ' + token + ' has ' + str(ntags) if self.debug : print 'subtags: ' + token + ' has ' + str(ntags)
for j in xrange(ntags): for j in xrange(ntags):
@ -478,7 +484,7 @@ class PageParser(object):
return result return result
# all tokens that need to be processed should be in the hash # all tokens that need to be processed should be in the hash
# table if it may indicate a problem, either new token # table if it may indicate a problem, either new token
# or an out of sync condition # or an out of sync condition
else: else:
result = [] result = []
@ -530,7 +536,7 @@ class PageParser(object):
# dispatches loop commands bytes with various modes # dispatches loop commands bytes with various modes
# The 0x76 style loops are used to build vectors # The 0x76 style loops are used to build vectors
# This was all derived by trial and error and # This was all derived by trial and error and
# new loop types may exist that are not handled here # new loop types may exist that are not handled here
# since they did not appear in the test cases # since they did not appear in the test cases
@ -549,7 +555,7 @@ class PageParser(object):
return result return result
# add full tag path to injected snippets # add full tag path to injected snippets
def updateName(self, tag, prefix): def updateName(self, tag, prefix):
name = tag[0] name = tag[0]
@ -577,7 +583,7 @@ class PageParser(object):
argtype = tag[2] argtype = tag[2]
argList = tag[3] argList = tag[3]
nsubtagList = [] nsubtagList = []
if len(argList) > 0 : if len(argList) > 0 :
for j in argList: for j in argList:
asnip = self.snippetList[j] asnip = self.snippetList[j]
aso, atag = self.injectSnippets(asnip) aso, atag = self.injectSnippets(asnip)
@ -609,65 +615,70 @@ class PageParser(object):
nodename = fullpathname.pop() nodename = fullpathname.pop()
ilvl = len(fullpathname) ilvl = len(fullpathname)
indent = ' ' * (3 * ilvl) indent = ' ' * (3 * ilvl)
result = indent + '<' + nodename + '>' rlst = []
rlst.append(indent + '<' + nodename + '>')
if len(argList) > 0: if len(argList) > 0:
argres = '' alst = []
for j in argList: for j in argList:
if (argtype == 'text') or (argtype == 'scalar_text') : if (argtype == 'text') or (argtype == 'scalar_text') :
argres += j + '|' alst.append(j + '|')
else : else :
argres += str(j) + ',' alst.append(str(j) + ',')
argres = "".join(alst)
argres = argres[0:-1] argres = argres[0:-1]
if argtype == 'snippets' : if argtype == 'snippets' :
result += 'snippets:' + argres rlst.append('snippets:' + argres)
else : else :
result += argres rlst.append(argres)
if len(subtagList) > 0 : if len(subtagList) > 0 :
result += '\n' rlst.append('\n')
for j in subtagList: for j in subtagList:
if len(j) > 0 : if len(j) > 0 :
result += self.formatTag(j) rlst.append(self.formatTag(j))
result += indent + '</' + nodename + '>\n' rlst.append(indent + '</' + nodename + '>\n')
else: else:
result += '</' + nodename + '>\n' rlst.append('</' + nodename + '>\n')
return result return "".join(rlst)
# flatten tag # flatten tag
def flattenTag(self, node): def flattenTag(self, node):
name = node[0] name = node[0]
subtagList = node[1] subtagList = node[1]
argtype = node[2] argtype = node[2]
argList = node[3] argList = node[3]
result = name rlst = []
rlst.append(name)
if (len(argList) > 0): if (len(argList) > 0):
argres = '' alst = []
for j in argList: for j in argList:
if (argtype == 'text') or (argtype == 'scalar_text') : if (argtype == 'text') or (argtype == 'scalar_text') :
argres += j + '|' alst.append(j + '|')
else : else :
argres += str(j) + '|' alst.append(str(j) + '|')
argres = "".join(alst)
argres = argres[0:-1] argres = argres[0:-1]
if argtype == 'snippets' : if argtype == 'snippets' :
result += '.snippets=' + argres rlst.append('.snippets=' + argres)
else : else :
result += '=' + argres rlst.append('=' + argres)
result += '\n' rlst.append('\n')
for j in subtagList: for j in subtagList:
if len(j) > 0 : if len(j) > 0 :
result += self.flattenTag(j) rlst.append(self.flattenTag(j))
return result return "".join(rlst)
# reduce create xml output # reduce create xml output
def formatDoc(self, flat_xml): def formatDoc(self, flat_xml):
result = '' rlst = []
for j in self.doc : for j in self.doc :
if len(j) > 0: if len(j) > 0:
if flat_xml: if flat_xml:
result += self.flattenTag(j) rlst.append(self.flattenTag(j))
else: else:
result += self.formatTag(j) rlst.append(self.formatTag(j))
result = "".join(rlst)
if self.debug : print result if self.debug : print result
return result return result
@ -712,7 +723,7 @@ class PageParser(object):
first_token = None first_token = None
v = self.getNext() v = self.getNext()
if (v == None): if (v == None):
break break
if (v == 0x72): if (v == 0x72):
@ -723,7 +734,7 @@ class PageParser(object):
self.doc.append(tag) self.doc.append(tag)
else: else:
if self.debug: if self.debug:
print "Main Loop: Unknown value: %x" % v print "Main Loop: Unknown value: %x" % v
if (v == 0): if (v == 0):
if (self.peek(1) == 0x5f): if (self.peek(1) == 0x5f):
skip = self.fo.read(1) skip = self.fo.read(1)
@ -776,7 +787,7 @@ def usage():
# #
# Main # Main
# #
def main(argv): def main(argv):
dictFile = "" dictFile = ""
@ -797,11 +808,11 @@ def main(argv):
print str(err) # will print something like "option -a not recognized" print str(err) # will print something like "option -a not recognized"
usage() usage()
sys.exit(2) sys.exit(2)
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o =="-d": if o =="-d":
debug=True debug=True

View file

@ -68,7 +68,7 @@ class DocParser(object):
ys = [] ys = []
gdefs = [] gdefs = []
# get path defintions, positions, dimensions for each glyph # get path defintions, positions, dimensions for each glyph
# that makes up the image, and find min x and min y to reposition origin # that makes up the image, and find min x and min y to reposition origin
minx = -1 minx = -1
miny = -1 miny = -1
@ -79,7 +79,7 @@ class DocParser(object):
xs.append(gxList[j]) xs.append(gxList[j])
if minx == -1: minx = gxList[j] if minx == -1: minx = gxList[j]
else : minx = min(minx, gxList[j]) else : minx = min(minx, gxList[j])
ys.append(gyList[j]) ys.append(gyList[j])
if miny == -1: miny = gyList[j] if miny == -1: miny = gyList[j]
else : miny = min(miny, gyList[j]) else : miny = min(miny, gyList[j])
@ -124,12 +124,12 @@ class DocParser(object):
item = self.docList[pos] item = self.docList[pos]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
return name, argres return name, argres
# find tag in doc if within pos to end inclusive # find tag in doc if within pos to end inclusive
def findinDoc(self, tagpath, pos, end) : def findinDoc(self, tagpath, pos, end) :
result = None result = None
@ -142,10 +142,10 @@ class DocParser(object):
item = self.docList[j] item = self.docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=',1) (name, argres) = item.split('=',1)
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -182,13 +182,13 @@ class DocParser(object):
# class names are an issue given topaz may start them with numerals (not allowed), # class names are an issue given topaz may start them with numerals (not allowed),
# use a mix of cases (which cause some browsers problems), and actually # use a mix of cases (which cause some browsers problems), and actually
# attach numbers after "_reclustered*" to the end to deal classeses that inherit # attach numbers after "_reclustered*" to the end to deal classeses that inherit
# from a base class (but then not actually provide all of these _reclustereed # from a base class (but then not actually provide all of these _reclustereed
# classes in the stylesheet! # classes in the stylesheet!
# so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass # so we clean this up by lowercasing, prepend 'cl-', and getting any baseclass
# that exists in the stylesheet first, and then adding this specific class # that exists in the stylesheet first, and then adding this specific class
# after # after
# also some class names have spaces in them so need to convert to dashes # also some class names have spaces in them so need to convert to dashes
if nclass != None : if nclass != None :
nclass = nclass.replace(' ','-') nclass = nclass.replace(' ','-')
@ -211,7 +211,7 @@ class DocParser(object):
return nclass return nclass
# develop a sorted description of the starting positions of # develop a sorted description of the starting positions of
# groups and regions on the page, as well as the page type # groups and regions on the page, as well as the page type
def PageDescription(self): def PageDescription(self):
@ -267,7 +267,7 @@ class DocParser(object):
result = [] result = []
# paragraph # paragraph
(pos, pclass) = self.findinDoc('paragraph.class',start,end) (pos, pclass) = self.findinDoc('paragraph.class',start,end)
pclass = self.getClass(pclass) pclass = self.getClass(pclass)
@ -281,17 +281,22 @@ class DocParser(object):
if (sfirst != None) and (slast != None) : if (sfirst != None) and (slast != None) :
first = int(sfirst) first = int(sfirst)
last = int(slast) last = int(slast)
makeImage = (regtype == 'vertical') or (regtype == 'table') makeImage = (regtype == 'vertical') or (regtype == 'table')
makeImage = makeImage or (extraglyphs != None) makeImage = makeImage or (extraglyphs != None)
if self.fixedimage: if self.fixedimage:
makeImage = makeImage or (regtype == 'fixed') makeImage = makeImage or (regtype == 'fixed')
if (pclass != None): if (pclass != None):
makeImage = makeImage or (pclass.find('.inverted') >= 0) makeImage = makeImage or (pclass.find('.inverted') >= 0)
if self.fixedimage : if self.fixedimage :
makeImage = makeImage or (pclass.find('cl-f-') >= 0) makeImage = makeImage or (pclass.find('cl-f-') >= 0)
# before creating an image make sure glyph info exists
gidList = self.getData('info.glyph.glyphID',0,-1)
makeImage = makeImage & (len(gidList) > 0)
if not makeImage : if not makeImage :
# standard all word paragraph # standard all word paragraph
for wordnum in xrange(first, last): for wordnum in xrange(first, last):
@ -332,10 +337,10 @@ class DocParser(object):
result.append(('svg', num)) result.append(('svg', num))
return pclass, result return pclass, result
# this type of paragraph may be made up of multiple spans, inline # this type of paragraph may be made up of multiple spans, inline
# word monograms (images), and words with semantic meaning, # word monograms (images), and words with semantic meaning,
# plus glyphs used to form starting letter of first word # plus glyphs used to form starting letter of first word
# need to parse this type line by line # need to parse this type line by line
line = start + 1 line = start + 1
word_class = '' word_class = ''
@ -344,7 +349,7 @@ class DocParser(object):
if end == -1 : if end == -1 :
end = self.docSize end = self.docSize
# seems some xml has last* coming before first* so we have to # seems some xml has last* coming before first* so we have to
# handle any order # handle any order
sp_first = -1 sp_first = -1
sp_last = -1 sp_last = -1
@ -382,10 +387,10 @@ class DocParser(object):
ws_last = int(argres) ws_last = int(argres)
elif name.endswith('word.class'): elif name.endswith('word.class'):
(cname, space) = argres.split('-',1) (cname, space) = argres.split('-',1)
if space == '' : space = '0' if space == '' : space = '0'
if (cname == 'spaceafter') and (int(space) > 0) : if (cname == 'spaceafter') and (int(space) > 0) :
word_class = 'sa' word_class = 'sa'
elif name.endswith('word.img.src'): elif name.endswith('word.img.src'):
result.append(('img' + word_class, int(argres))) result.append(('img' + word_class, int(argres)))
@ -416,11 +421,11 @@ class DocParser(object):
result.append(('ocr', wordnum)) result.append(('ocr', wordnum))
ws_first = -1 ws_first = -1
ws_last = -1 ws_last = -1
line += 1 line += 1
return pclass, result return pclass, result
def buildParagraph(self, pclass, pdesc, type, regtype) : def buildParagraph(self, pclass, pdesc, type, regtype) :
parares = '' parares = ''
@ -433,7 +438,7 @@ class DocParser(object):
br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical') br_lb = (regtype == 'fixed') or (regtype == 'chapterheading') or (regtype == 'vertical')
handle_links = len(self.link_id) > 0 handle_links = len(self.link_id) > 0
if (type == 'full') or (type == 'begin') : if (type == 'full') or (type == 'begin') :
parares += '<p' + classres + '>' parares += '<p' + classres + '>'
@ -462,7 +467,7 @@ class DocParser(object):
if linktype == 'external' : if linktype == 'external' :
linkhref = self.link_href[link-1] linkhref = self.link_href[link-1]
linkhtml = '<a href="%s">' % linkhref linkhtml = '<a href="%s">' % linkhref
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkhtml = '<a href="#page%04d">' % ptarget linkhtml = '<a href="#page%04d">' % ptarget
@ -509,7 +514,7 @@ class DocParser(object):
elif wtype == 'svg' : elif wtype == 'svg' :
sep = '' sep = ''
parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num parares += '<img src="img/' + self.id + '_%04d.svg" alt="" />' % num
parares += sep parares += sep
if len(sep) > 0 : parares = parares[0:-1] if len(sep) > 0 : parares = parares[0:-1]
@ -551,7 +556,7 @@ class DocParser(object):
title = '' title = ''
alt_title = '' alt_title = ''
linkpage = '' linkpage = ''
else : else :
if len(self.link_page) >= link : if len(self.link_page) >= link :
ptarget = self.link_page[link-1] - 1 ptarget = self.link_page[link-1] - 1
linkpage = '%04d' % ptarget linkpage = '%04d' % ptarget
@ -584,14 +589,14 @@ class DocParser(object):
# walk the document tree collecting the information needed # walk the document tree collecting the information needed
# to build an html page using the ocrText # to build an html page using the ocrText
def process(self): def process(self):
htmlpage = ''
tocinfo = '' tocinfo = ''
hlst = []
# get the ocr text # get the ocr text
(pos, argres) = self.findinDoc('info.word.ocrText',0,-1) (pos, argres) = self.findinDoc('info.word.ocrText',0,-1)
@ -602,8 +607,8 @@ class DocParser(object):
# determine if first paragraph is continued from previous page # determine if first paragraph is continued from previous page
(pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1) (pos, self.parastems_stemid) = self.findinDoc('info.paraStems.stemID',0,-1)
first_para_continued = (self.parastems_stemid != None) first_para_continued = (self.parastems_stemid != None)
# determine if last paragraph is continued onto the next page # determine if last paragraph is continued onto the next page
(pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1) (pos, self.paracont_stemid) = self.findinDoc('info.paraCont.stemID',0,-1)
last_para_continued = (self.paracont_stemid != None) last_para_continued = (self.paracont_stemid != None)
@ -631,25 +636,25 @@ class DocParser(object):
# get a descriptions of the starting points of the regions # get a descriptions of the starting points of the regions
# and groups on the page # and groups on the page
(pagetype, pageDesc) = self.PageDescription() (pagetype, pageDesc) = self.PageDescription()
regcnt = len(pageDesc) - 1 regcnt = len(pageDesc) - 1
anchorSet = False anchorSet = False
breakSet = False breakSet = False
inGroup = False inGroup = False
# process each region on the page and convert what you can to html # process each region on the page and convert what you can to html
for j in xrange(regcnt): for j in xrange(regcnt):
(etype, start) = pageDesc[j] (etype, start) = pageDesc[j]
(ntype, end) = pageDesc[j+1] (ntype, end) = pageDesc[j+1]
# set anchor for link target on this page # set anchor for link target on this page
if not anchorSet and not first_para_continued: if not anchorSet and not first_para_continued:
htmlpage += '<div style="visibility: hidden; height: 0; width: 0;" id="' hlst.append('<div style="visibility: hidden; height: 0; width: 0;" id="')
htmlpage += self.id + '" title="pagetype_' + pagetype + '"></div>\n' hlst.append(self.id + '" title="pagetype_' + pagetype + '"></div>\n')
anchorSet = True anchorSet = True
# handle groups of graphics with text captions # handle groups of graphics with text captions
@ -658,12 +663,12 @@ class DocParser(object):
if grptype != None: if grptype != None:
if grptype == 'graphic': if grptype == 'graphic':
gcstr = ' class="' + grptype + '"' gcstr = ' class="' + grptype + '"'
htmlpage += '<div' + gcstr + '>' hlst.append('<div' + gcstr + '>')
inGroup = True inGroup = True
elif (etype == 'grpend'): elif (etype == 'grpend'):
if inGroup: if inGroup:
htmlpage += '</div>\n' hlst.append('</div>\n')
inGroup = False inGroup = False
else: else:
@ -673,25 +678,25 @@ class DocParser(object):
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
if inGroup: if inGroup:
htmlpage += '<img src="img/img%04d.jpg" alt="" />' % int(simgsrc) hlst.append('<img src="img/img%04d.jpg" alt="" />' % int(simgsrc))
else: else:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
elif regtype == 'chapterheading' : elif regtype == 'chapterheading' :
(pclass, pdesc) = self.getParaDescription(start,end, regtype) (pclass, pdesc) = self.getParaDescription(start,end, regtype)
if not breakSet: if not breakSet:
htmlpage += '<div style="page-break-after: always;">&nbsp;</div>\n' hlst.append('<div style="page-break-after: always;">&nbsp;</div>\n')
breakSet = True breakSet = True
tag = 'h1' tag = 'h1'
if pclass and (len(pclass) >= 7): if pclass and (len(pclass) >= 7):
if pclass[3:7] == 'ch1-' : tag = 'h1' if pclass[3:7] == 'ch1-' : tag = 'h1'
if pclass[3:7] == 'ch2-' : tag = 'h2' if pclass[3:7] == 'ch2-' : tag = 'h2'
if pclass[3:7] == 'ch3-' : tag = 'h3' if pclass[3:7] == 'ch3-' : tag = 'h3'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
else: else:
htmlpage += '<' + tag + '>' hlst.append('<' + tag + '>')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'): elif (regtype == 'text') or (regtype == 'fixed') or (regtype == 'insert') or (regtype == 'listitem'):
ptype = 'full' ptype = 'full'
@ -705,11 +710,11 @@ class DocParser(object):
if pclass[3:6] == 'h1-' : tag = 'h4' if pclass[3:6] == 'h1-' : tag = 'h4'
if pclass[3:6] == 'h2-' : tag = 'h5' if pclass[3:6] == 'h2-' : tag = 'h5'
if pclass[3:6] == 'h3-' : tag = 'h6' if pclass[3:6] == 'h3-' : tag = 'h6'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
else : else :
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'tocentry') : elif (regtype == 'tocentry') :
ptype = 'full' ptype = 'full'
@ -718,7 +723,7 @@ class DocParser(object):
first_para_continued = False first_para_continued = False
(pclass, pdesc) = self.getParaDescription(start,end, regtype) (pclass, pdesc) = self.getParaDescription(start,end, regtype)
tocinfo += self.buildTOCEntry(pdesc) tocinfo += self.buildTOCEntry(pdesc)
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'vertical') or (regtype == 'table') : elif (regtype == 'vertical') or (regtype == 'table') :
ptype = 'full' ptype = 'full'
@ -728,13 +733,13 @@ class DocParser(object):
ptype = 'end' ptype = 'end'
first_para_continued = False first_para_continued = False
(pclass, pdesc) = self.getParaDescription(start, end, regtype) (pclass, pdesc) = self.getParaDescription(start, end, regtype)
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
elif (regtype == 'synth_fcvr.center'): elif (regtype == 'synth_fcvr.center'):
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
else : else :
print ' Making region type', regtype, print ' Making region type', regtype,
@ -760,18 +765,19 @@ class DocParser(object):
if pclass[3:6] == 'h1-' : tag = 'h4' if pclass[3:6] == 'h1-' : tag = 'h4'
if pclass[3:6] == 'h2-' : tag = 'h5' if pclass[3:6] == 'h2-' : tag = 'h5'
if pclass[3:6] == 'h3-' : tag = 'h6' if pclass[3:6] == 'h3-' : tag = 'h6'
htmlpage += '<' + tag + ' class="' + pclass + '">' hlst.append('<' + tag + ' class="' + pclass + '">')
htmlpage += self.buildParagraph(pclass, pdesc, 'middle', regtype) hlst.append(self.buildParagraph(pclass, pdesc, 'middle', regtype))
htmlpage += '</' + tag + '>' hlst.append('</' + tag + '>')
else : else :
htmlpage += self.buildParagraph(pclass, pdesc, ptype, regtype) hlst.append(self.buildParagraph(pclass, pdesc, ptype, regtype))
else : else :
print ' a "graphic" region' print ' a "graphic" region'
(pos, simgsrc) = self.findinDoc('img.src',start,end) (pos, simgsrc) = self.findinDoc('img.src',start,end)
if simgsrc: if simgsrc:
htmlpage += '<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc) hlst.append('<div class="graphic"><img src="img/img%04d.jpg" alt="" /></div>' % int(simgsrc))
htmlpage = "".join(hlst)
if last_para_continued : if last_para_continued :
if htmlpage[-4:] == '</p>': if htmlpage[-4:] == '</p>':
htmlpage = htmlpage[0:-4] htmlpage = htmlpage[0:-4]

View file

@ -15,7 +15,7 @@ class PParser(object):
self.flatdoc = flatxml.split('\n') self.flatdoc = flatxml.split('\n')
self.docSize = len(self.flatdoc) self.docSize = len(self.flatdoc)
self.temp = [] self.temp = []
self.ph = -1 self.ph = -1
self.pw = -1 self.pw = -1
startpos = self.posinDoc('page.h') or self.posinDoc('book.h') startpos = self.posinDoc('page.h') or self.posinDoc('book.h')
@ -26,7 +26,7 @@ class PParser(object):
for p in startpos: for p in startpos:
(name, argres) = self.lineinDoc(p) (name, argres) = self.lineinDoc(p)
self.pw = max(self.pw, int(argres)) self.pw = max(self.pw, int(argres))
if self.ph <= 0: if self.ph <= 0:
self.ph = int(meta_array.get('pageHeight', '11000')) self.ph = int(meta_array.get('pageHeight', '11000'))
if self.pw <= 0: if self.pw <= 0:
@ -181,70 +181,69 @@ class PParser(object):
def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi): def convert2SVG(gdict, flat_xml, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi):
ml = '' mlst = []
pp = PParser(gdict, flat_xml, meta_array) pp = PParser(gdict, flat_xml, meta_array)
ml += '<?xml version="1.0" standalone="no"?>\n' mlst.append('<?xml version="1.0" standalone="no"?>\n')
if (raw): if (raw):
ml += '<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n' mlst.append('<!DOCTYPE svg PUBLIC "-//W3C/DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">\n')
ml += '<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1) mlst.append('<svg width="%fin" height="%fin" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1">\n' % (pp.pw / scaledpi, pp.ph / scaledpi, pp.pw -1, pp.ph -1))
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']) mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
else: else:
ml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' mlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
ml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n' mlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" ><head>\n')
ml += '<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']) mlst.append('<title>Page %d - %s by %s</title>\n' % (pageid, meta_array['Title'],meta_array['Authors']))
ml += '<script><![CDATA[\n' mlst.append('<script><![CDATA[\n')
ml += 'function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n' mlst.append('function gd(){var p=window.location.href.replace(/^.*\?dpi=(\d+).*$/i,"$1");return p;}\n')
ml += 'var dpi=%d;\n' % scaledpi mlst.append('var dpi=%d;\n' % scaledpi)
if (previd) : if (previd) :
ml += 'var prevpage="page%04d.xhtml";\n' % (previd) mlst.append('var prevpage="page%04d.xhtml";\n' % (previd))
if (nextid) : if (nextid) :
ml += 'var nextpage="page%04d.xhtml";\n' % (nextid) mlst.append('var nextpage="page%04d.xhtml";\n' % (nextid))
ml += 'var pw=%d;var ph=%d;' % (pp.pw, pp.ph) mlst.append('var pw=%d;var ph=%d;' % (pp.pw, pp.ph))
ml += 'function zoomin(){dpi=dpi*(0.8);setsize();}\n' mlst.append('function zoomin(){dpi=dpi*(0.8);setsize();}\n')
ml += 'function zoomout(){dpi=dpi*1.25;setsize();}\n' mlst.append('function zoomout(){dpi=dpi*1.25;setsize();}\n')
ml += 'function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n' mlst.append('function setsize(){var svg=document.getElementById("svgimg");var prev=document.getElementById("prevsvg");var next=document.getElementById("nextsvg");var width=(pw/dpi)+"in";var height=(ph/dpi)+"in";svg.setAttribute("width",width);svg.setAttribute("height",height);prev.setAttribute("height",height);prev.setAttribute("width","50px");next.setAttribute("height",height);next.setAttribute("width","50px");}\n')
ml += 'function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n' mlst.append('function ppage(){window.location.href=prevpage+"?dpi="+Math.round(dpi);}\n')
ml += 'function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n' mlst.append('function npage(){window.location.href=nextpage+"?dpi="+Math.round(dpi);}\n')
ml += 'var gt=gd();if(gt>0){dpi=gt;}\n' mlst.append('var gt=gd();if(gt>0){dpi=gt;}\n')
ml += 'window.onload=setsize;\n' mlst.append('window.onload=setsize;\n')
ml += ']]></script>\n' mlst.append(']]></script>\n')
ml += '</head>\n' mlst.append('</head>\n')
ml += '<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n' mlst.append('<body onLoad="setsize();" style="background-color:#777;text-align:center;">\n')
ml += '<div style="white-space:nowrap;">\n' mlst.append('<div style="white-space:nowrap;">\n')
if previd == None: if previd == None:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else: else:
ml += '<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n' mlst.append('<a href="javascript:ppage();"><svg id="prevsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,150,95,5,95,295" fill="#AAAAAA" /></svg></a>\n')
ml += '<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph) mlst.append('<a href="javascript:npage();"><svg id="svgimg" viewBox="0 0 %d %d" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.1" style="background-color:#FFF;border:1px solid black;">' % (pp.pw, pp.ph))
if (pp.gid != None): if (pp.gid != None):
ml += '<defs>\n' mlst.append('<defs>\n')
gdefs = pp.getGlyphs() gdefs = pp.getGlyphs()
for j in xrange(0,len(gdefs)): for j in xrange(0,len(gdefs)):
ml += gdefs[j] mlst.append(gdefs[j])
ml += '</defs>\n' mlst.append('</defs>\n')
img = pp.getImages() img = pp.getImages()
if (img != None): if (img != None):
for j in xrange(0,len(img)): for j in xrange(0,len(img)):
ml += img[j] mlst.append(img[j])
if (pp.gid != None): if (pp.gid != None):
for j in xrange(0,len(pp.gid)): for j in xrange(0,len(pp.gid)):
ml += '<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]) mlst.append('<use xlink:href="#gl%d" x="%d" y="%d" />\n' % (pp.gid[j], pp.gx[j], pp.gy[j]))
if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0): if (img == None or len(img) == 0) and (pp.gid == None or len(pp.gid) == 0):
xpos = "%d" % (pp.pw // 3) xpos = "%d" % (pp.pw // 3)
ypos = "%d" % (pp.ph // 3) ypos = "%d" % (pp.ph // 3)
ml += '<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n' mlst.append('<text x="' + xpos + '" y="' + ypos + '" font-size="' + meta_array['fontSize'] + '" font-family="Helvetica" stroke="black">This page intentionally left blank.</text>\n')
if (raw) : if (raw) :
ml += '</svg>' mlst.append('</svg>')
else : else :
ml += '</svg></a>\n' mlst.append('</svg></a>\n')
if nextid == None: if nextid == None:
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n' mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"></svg></a>\n')
else : else :
ml += '<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n' mlst.append('<a href="javascript:npage();"><svg id="nextsvg" viewBox="0 0 100 300" xmlns="http://www.w3.org/2000/svg" version="1.1" style="background-color:#777"><polygon points="5,5,5,295,95,150" fill="#AAAAAA" /></svg></a>\n')
ml += '</div>\n' mlst.append('</div>\n')
ml += '<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n' mlst.append('<div><a href="javascript:zoomin();">zoom in</a> - <a href="javascript:zoomout();">zoom out</a></div>\n')
ml += '</body>\n' mlst.append('</body>\n')
ml += '</html>\n' mlst.append('</html>\n')
return ml return "".join(mlst)

View file

@ -39,6 +39,8 @@ else :
import flatxml2svg import flatxml2svg
import stylexml2css import stylexml2css
# global switch
buildXML = False
# Get a 7 bit encoded number from a file # Get a 7 bit encoded number from a file
def readEncodedNumber(file): def readEncodedNumber(file):
@ -46,27 +48,27 @@ def readEncodedNumber(file):
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data == 0xFF: if data == 0xFF:
flag = True flag = True
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
if data >= 0x80: if data >= 0x80:
datax = (data & 0x7F) datax = (data & 0x7F)
while data >= 0x80 : while data >= 0x80 :
c = file.read(1) c = file.read(1)
if (len(c) == 0): if (len(c) == 0):
return None return None
data = ord(c) data = ord(c)
datax = (datax <<7) + (data & 0x7F) datax = (datax <<7) + (data & 0x7F)
data = datax data = datax
if flag: if flag:
data = -data data = -data
return data return data
# Get a length prefixed string from the file # Get a length prefixed string from the file
def lengthPrefixString(data): def lengthPrefixString(data):
return encodeNumber(len(data))+data return encodeNumber(len(data))+data
@ -77,7 +79,7 @@ def readString(file):
sv = file.read(stringLength) sv = file.read(stringLength)
if (len(sv) != stringLength): if (len(sv) != stringLength):
return "" return ""
return unpack(str(stringLength)+"s",sv)[0] return unpack(str(stringLength)+"s",sv)[0]
def getMetaArray(metaFile): def getMetaArray(metaFile):
# parse the meta file # parse the meta file
@ -141,10 +143,10 @@ class PageDimParser(object):
item = docList[j] item = docList[j]
if item.find('=') >= 0: if item.find('=') >= 0:
(name, argres) = item.split('=') (name, argres) = item.split('=')
else : else :
name = item name = item
argres = '' argres = ''
if name.endswith(tagpath) : if name.endswith(tagpath) :
result = argres result = argres
foundat = j foundat = j
break break
@ -298,9 +300,10 @@ def generateBook(bookDir, raw, fixedimage):
if not os.path.exists(svgDir) : if not os.path.exists(svgDir) :
os.makedirs(svgDir) os.makedirs(svgDir)
xmlDir = os.path.join(bookDir,'xml') if buildXML:
if not os.path.exists(xmlDir) : xmlDir = os.path.join(bookDir,'xml')
os.makedirs(xmlDir) if not os.path.exists(xmlDir) :
os.makedirs(xmlDir)
otherFile = os.path.join(bookDir,'other0000.dat') otherFile = os.path.join(bookDir,'other0000.dat')
if not os.path.exists(otherFile) : if not os.path.exists(otherFile) :
@ -336,7 +339,7 @@ def generateBook(bookDir, raw, fixedimage):
print 'Processing Meta Data and creating OPF' print 'Processing Meta Data and creating OPF'
meta_array = getMetaArray(metaFile) meta_array = getMetaArray(metaFile)
# replace special chars in title and authors like & < > # replace special chars in title and authors like & < >
title = meta_array.get('Title','No Title Provided') title = meta_array.get('Title','No Title Provided')
title = title.replace('&','&amp;') title = title.replace('&','&amp;')
title = title.replace('<','&lt;') title = title.replace('<','&lt;')
@ -348,11 +351,14 @@ def generateBook(bookDir, raw, fixedimage):
authors = authors.replace('>','&gt;') authors = authors.replace('>','&gt;')
meta_array['Authors'] = authors meta_array['Authors'] = authors
xname = os.path.join(xmlDir, 'metadata.xml') if buildXML:
metastr = '' xname = os.path.join(xmlDir, 'metadata.xml')
for key in meta_array: mlst = []
metastr += '<meta name="' + key + '" content="' + meta_array[key] + '" />\n' for key in meta_array:
file(xname, 'wb').write(metastr) mlst.append('<meta name="' + key + '" content="' + meta_array[key] + '" />\n')
metastr = "".join(mlst)
mlst = None
file(xname, 'wb').write(metastr)
print 'Processing StyleSheet' print 'Processing StyleSheet'
# get some scaling info from metadata to use while processing styles # get some scaling info from metadata to use while processing styles
@ -404,8 +410,9 @@ def generateBook(bookDir, raw, fixedimage):
# now get the css info # now get the css info
cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw) cssstr , classlst = stylexml2css.convert2CSS(flat_xml, fontsize, ph, pw)
file(xname, 'wb').write(cssstr) file(xname, 'wb').write(cssstr)
xname = os.path.join(xmlDir, 'other0000.xml') if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile)) xname = os.path.join(xmlDir, 'other0000.xml')
file(xname, 'wb').write(convert2xml.getXML(dict, otherFile))
print 'Processing Glyphs' print 'Processing Glyphs'
gd = GlyphDict() gd = GlyphDict()
@ -425,8 +432,9 @@ def generateBook(bookDir, raw, fixedimage):
fname = os.path.join(glyphsDir,filename) fname = os.path.join(glyphsDir,filename)
flat_xml = convert2xml.fromData(dict, fname) flat_xml = convert2xml.fromData(dict, fname)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml')) if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, fname)) xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
gp = GParser(flat_xml) gp = GParser(flat_xml)
for i in xrange(0, gp.count): for i in xrange(0, gp.count):
@ -441,29 +449,29 @@ def generateBook(bookDir, raw, fixedimage):
glyfile.close() glyfile.close()
print " " print " "
# build up tocentries while processing html
tocentries = ''
# start up the html # start up the html
# also build up tocentries while processing html
htmlFileName = "book.html" htmlFileName = "book.html"
htmlstr = '<?xml version="1.0" encoding="utf-8"?>\n' hlst = []
htmlstr += '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n' hlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
htmlstr += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n' hlst.append('<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.1 Strict//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11-strict.dtd">\n')
htmlstr += '<head>\n' hlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">\n')
htmlstr += '<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n' hlst.append('<head>\n')
htmlstr += '<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n' hlst.append('<meta http-equiv="content-type" content="text/html; charset=utf-8"/>\n')
htmlstr += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' hlst.append('<title>' + meta_array['Title'] + ' by ' + meta_array['Authors'] + '</title>\n')
htmlstr += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' hlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
hlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
htmlstr += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' hlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
htmlstr += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' hlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
htmlstr += '<link href="style.css" rel="stylesheet" type="text/css" />\n' hlst.append('<link href="style.css" rel="stylesheet" type="text/css" />\n')
htmlstr += '</head>\n<body>\n' hlst.append('</head>\n<body>\n')
print 'Processing Pages' print 'Processing Pages'
# Books are at 1440 DPI. This is rendering at twice that size for # Books are at 1440 DPI. This is rendering at twice that size for
# readability when rendering to the screen. # readability when rendering to the screen.
scaledpi = 1440.0 scaledpi = 1440.0
filenames = os.listdir(pageDir) filenames = os.listdir(pageDir)
@ -471,6 +479,7 @@ def generateBook(bookDir, raw, fixedimage):
numfiles = len(filenames) numfiles = len(filenames)
xmllst = [] xmllst = []
elst = []
for filename in filenames: for filename in filenames:
# print ' ', filename # print ' ', filename
@ -481,45 +490,51 @@ def generateBook(bookDir, raw, fixedimage):
# keep flat_xml for later svg processing # keep flat_xml for later svg processing
xmllst.append(flat_xml) xmllst.append(flat_xml)
xname = os.path.join(xmlDir, filename.replace('.dat','.xml')) if buildXML:
file(xname, 'wb').write(convert2xml.getXML(dict, fname)) xname = os.path.join(xmlDir, filename.replace('.dat','.xml'))
file(xname, 'wb').write(convert2xml.getXML(dict, fname))
# first get the html # first get the html
pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage) pagehtml, tocinfo = flatxml2html.convert2HTML(flat_xml, classlst, fname, bookDir, gd, fixedimage)
tocentries += tocinfo elst.append(tocinfo)
htmlstr += pagehtml hlst.append(pagehtml)
# finish up the html string and output it # finish up the html string and output it
htmlstr += '</body>\n</html>\n' hlst.append('</body>\n</html>\n')
htmlstr = "".join(hlst)
hlst = None
file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr) file(os.path.join(bookDir, htmlFileName), 'wb').write(htmlstr)
print " " print " "
print 'Extracting Table of Contents from Amazon OCR' print 'Extracting Table of Contents from Amazon OCR'
# first create a table of contents file for the svg images # first create a table of contents file for the svg images
tochtml = '<?xml version="1.0" encoding="utf-8"?>\n' tlst = []
tochtml += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' tlst.append('<?xml version="1.0" encoding="utf-8"?>\n')
tochtml += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >' tlst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
tochtml += '<head>\n' tlst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
tochtml += '<title>' + meta_array['Title'] + '</title>\n' tlst.append('<head>\n')
tochtml += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' tlst.append('<title>' + meta_array['Title'] + '</title>\n')
tochtml += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' tlst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
tlst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
tochtml += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' tlst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
tochtml += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' tlst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
tochtml += '</head>\n' tlst.append('</head>\n')
tochtml += '<body>\n' tlst.append('<body>\n')
tochtml += '<h2>Table of Contents</h2>\n' tlst.append('<h2>Table of Contents</h2>\n')
start = pageidnums[0] start = pageidnums[0]
if (raw): if (raw):
startname = 'page%04d.svg' % start startname = 'page%04d.svg' % start
else: else:
startname = 'page%04d.xhtml' % start startname = 'page%04d.xhtml' % start
tochtml += '<h3><a href="' + startname + '">Start of Book</a></h3>\n' tlst.append('<h3><a href="' + startname + '">Start of Book</a></h3>\n')
# build up a table of contents for the svg xhtml output # build up a table of contents for the svg xhtml output
tocentries = "".join(elst)
elst = None
toclst = tocentries.split('\n') toclst = tocentries.split('\n')
toclst.pop() toclst.pop()
for entry in toclst: for entry in toclst:
@ -530,30 +545,32 @@ def generateBook(bookDir, raw, fixedimage):
fname = 'page%04d.svg' % id fname = 'page%04d.svg' % id
else: else:
fname = 'page%04d.xhtml' % id fname = 'page%04d.xhtml' % id
tochtml += '<h3><a href="'+ fname + '">' + title + '</a></h3>\n' tlst.append('<h3><a href="'+ fname + '">' + title + '</a></h3>\n')
tochtml += '</body>\n' tlst.append('</body>\n')
tochtml += '</html>\n' tlst.append('</html>\n')
tochtml = "".join(tlst)
file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml) file(os.path.join(svgDir, 'toc.xhtml'), 'wb').write(tochtml)
# now create index_svg.xhtml that points to all required files # now create index_svg.xhtml that points to all required files
svgindex = '<?xml version="1.0" encoding="utf-8"?>\n' slst = []
svgindex += '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n' slst.append('<?xml version="1.0" encoding="utf-8"?>\n')
svgindex += '<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >' slst.append('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n')
svgindex += '<head>\n' slst.append('<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" >')
svgindex += '<title>' + meta_array['Title'] + '</title>\n' slst.append('<head>\n')
svgindex += '<meta name="Author" content="' + meta_array['Authors'] + '" />\n' slst.append('<title>' + meta_array['Title'] + '</title>\n')
svgindex += '<meta name="Title" content="' + meta_array['Title'] + '" />\n' slst.append('<meta name="Author" content="' + meta_array['Authors'] + '" />\n')
slst.append('<meta name="Title" content="' + meta_array['Title'] + '" />\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
svgindex += '<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n' slst.append('<meta name="ASIN" content="' + meta_array['ASIN'] + '" />\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
svgindex += '<meta name="GUID" content="' + meta_array['GUID'] + '" />\n' slst.append('<meta name="GUID" content="' + meta_array['GUID'] + '" />\n')
svgindex += '</head>\n' slst.append('</head>\n')
svgindex += '<body>\n' slst.append('<body>\n')
print "Building svg images of each book page" print "Building svg images of each book page"
svgindex += '<h2>List of Pages</h2>\n' slst.append('<h2>List of Pages</h2>\n')
svgindex += '<div>\n' slst.append('<div>\n')
idlst = sorted(pageIDMap.keys()) idlst = sorted(pageIDMap.keys())
numids = len(idlst) numids = len(idlst)
cnt = len(idlst) cnt = len(idlst)
@ -566,49 +583,54 @@ def generateBook(bookDir, raw, fixedimage):
nextid = None nextid = None
print '.', print '.',
pagelst = pageIDMap[pageid] pagelst = pageIDMap[pageid]
flat_svg = '' flst = []
for page in pagelst: for page in pagelst:
flat_svg += xmllst[page] flst.append(xmllst[page])
flat_svg = "".join(flst)
flst=None
svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi) svgxml = flatxml2svg.convert2SVG(gd, flat_svg, pageid, previd, nextid, svgDir, raw, meta_array, scaledpi)
if (raw) : if (raw) :
pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w') pfile = open(os.path.join(svgDir,'page%04d.svg' % pageid),'w')
svgindex += '<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid) slst.append('<a href="svg/page%04d.svg">Page %d</a>\n' % (pageid, pageid))
else : else :
pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w') pfile = open(os.path.join(svgDir,'page%04d.xhtml' % pageid), 'w')
svgindex += '<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid) slst.append('<a href="svg/page%04d.xhtml">Page %d</a>\n' % (pageid, pageid))
previd = pageid previd = pageid
pfile.write(svgxml) pfile.write(svgxml)
pfile.close() pfile.close()
counter += 1 counter += 1
svgindex += '</div>\n' slst.append('</div>\n')
svgindex += '<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n' slst.append('<h2><a href="svg/toc.xhtml">Table of Contents</a></h2>\n')
svgindex += '</body>\n</html>\n' slst.append('</body>\n</html>\n')
svgindex = "".join(slst)
slst = None
file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex) file(os.path.join(bookDir, 'index_svg.xhtml'), 'wb').write(svgindex)
print " " print " "
# build the opf file # build the opf file
opfname = os.path.join(bookDir, 'book.opf') opfname = os.path.join(bookDir, 'book.opf')
opfstr = '<?xml version="1.0" encoding="utf-8"?>\n' olst = []
opfstr += '<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n' olst.append('<?xml version="1.0" encoding="utf-8"?>\n')
olst.append('<package xmlns="http://www.idpf.org/2007/opf" unique-identifier="guid_id">\n')
# adding metadata # adding metadata
opfstr += ' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n' olst.append(' <metadata xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:opf="http://www.idpf.org/2007/opf">\n')
if 'GUID' in meta_array: if 'GUID' in meta_array:
opfstr += ' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="GUID" id="guid_id">' + meta_array['GUID'] + '</dc:identifier>\n')
if 'ASIN' in meta_array: if 'ASIN' in meta_array:
opfstr += ' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="ASIN">' + meta_array['ASIN'] + '</dc:identifier>\n')
if 'oASIN' in meta_array: if 'oASIN' in meta_array:
opfstr += ' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n' olst.append(' <dc:identifier opf:scheme="oASIN">' + meta_array['oASIN'] + '</dc:identifier>\n')
opfstr += ' <dc:title>' + meta_array['Title'] + '</dc:title>\n' olst.append(' <dc:title>' + meta_array['Title'] + '</dc:title>\n')
opfstr += ' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n' olst.append(' <dc:creator opf:role="aut">' + meta_array['Authors'] + '</dc:creator>\n')
opfstr += ' <dc:language>en</dc:language>\n' olst.append(' <dc:language>en</dc:language>\n')
opfstr += ' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n' olst.append(' <dc:date>' + meta_array['UpdateTime'] + '</dc:date>\n')
if isCover: if isCover:
opfstr += ' <meta name="cover" content="bookcover"/>\n' olst.append(' <meta name="cover" content="bookcover"/>\n')
opfstr += ' </metadata>\n' olst.append(' </metadata>\n')
opfstr += '<manifest>\n' olst.append('<manifest>\n')
opfstr += ' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n' olst.append(' <item id="book" href="book.html" media-type="application/xhtml+xml"/>\n')
opfstr += ' <item id="stylesheet" href="style.css" media-type="text/css"/>\n' olst.append(' <item id="stylesheet" href="style.css" media-type="text/css"/>\n')
# adding image files to manifest # adding image files to manifest
filenames = os.listdir(imgDir) filenames = os.listdir(imgDir)
filenames = sorted(filenames) filenames = sorted(filenames)
@ -618,17 +640,19 @@ def generateBook(bookDir, raw, fixedimage):
imgext = 'jpeg' imgext = 'jpeg'
if imgext == '.svg': if imgext == '.svg':
imgext = 'svg+xml' imgext = 'svg+xml'
opfstr += ' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n' olst.append(' <item id="' + imgname + '" href="img/' + filename + '" media-type="image/' + imgext + '"/>\n')
if isCover: if isCover:
opfstr += ' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n' olst.append(' <item id="bookcover" href="cover.jpg" media-type="image/jpeg" />\n')
opfstr += '</manifest>\n' olst.append('</manifest>\n')
# adding spine # adding spine
opfstr += '<spine>\n <itemref idref="book" />\n</spine>\n' olst.append('<spine>\n <itemref idref="book" />\n</spine>\n')
if isCover: if isCover:
opfstr += ' <guide>\n' olst.append(' <guide>\n')
opfstr += ' <reference href="cover.jpg" type="cover" title="Cover"/>\n' olst.append(' <reference href="cover.jpg" type="cover" title="Cover"/>\n')
opfstr += ' </guide>\n' olst.append(' </guide>\n')
opfstr += '</package>\n' olst.append('</package>\n')
opfstr = "".join(olst)
olst = None
file(opfname, 'wb').write(opfstr) file(opfname, 'wb').write(opfstr)
print 'Processing Complete' print 'Processing Complete'
@ -649,7 +673,6 @@ def usage():
def main(argv): def main(argv):
bookDir = '' bookDir = ''
if len(argv) == 0: if len(argv) == 0:
argv = sys.argv argv = sys.argv
@ -663,7 +686,7 @@ def main(argv):
if len(opts) == 0 and len(args) == 0 : if len(opts) == 0 and len(args) == 0 :
usage() usage()
return 1 return 1
raw = 0 raw = 0
fixedimage = True fixedimage = True

View file

@ -5,19 +5,19 @@ from __future__ import with_statement
# engine to remove drm from Kindle for Mac and Kindle for PC books # engine to remove drm from Kindle for Mac and Kindle for PC books
# for personal use for archiving and converting your ebooks # for personal use for archiving and converting your ebooks
# PLEASE DO NOT PIRATE EBOOKS! # PLEASE DO NOT PIRATE EBOOKS!
# We want all authors and publishers, and eBook stores to live # We want all authors and publishers, and eBook stores to live
# long and prosperous lives but at the same time we just want to # long and prosperous lives but at the same time we just want to
# be able to read OUR books on whatever device we want and to keep # be able to read OUR books on whatever device we want and to keep
# readable for a long, long time # readable for a long, long time
# This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle, # This borrows very heavily from works by CMBDTC, IHeartCabbages, skindle,
# unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates # unswindle, DarkReverser, ApprenticeAlf, DiapDealer, some_updates
# and many many others # and many many others
__version__ = '3.9' __version__ = '4.0'
class Unbuffered: class Unbuffered:
def __init__(self, stream): def __init__(self, stream):
@ -34,6 +34,8 @@ import string
import re import re
import traceback import traceback
buildXML = False
class DrmException(Exception): class DrmException(Exception):
pass pass
@ -50,7 +52,7 @@ else:
import mobidedrm import mobidedrm
import topazextract import topazextract
import kgenpids import kgenpids
# cleanup bytestring filenames # cleanup bytestring filenames
# borrowed from calibre from calibre/src/calibre/__init__.py # borrowed from calibre from calibre/src/calibre/__init__.py
@ -75,6 +77,8 @@ def cleanup_name(name):
return one return one
def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids): def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
global buildXML
# handle the obvious cases at the beginning # handle the obvious cases at the beginning
if not os.path.isfile(infile): if not os.path.isfile(infile):
print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist" print >>sys.stderr, ('K4MobiDeDrm v%(__version__)s\n' % globals()) + "Error: Input file does not exist"
@ -100,14 +104,14 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
outfilename = outfilename + "_" + filenametitle outfilename = outfilename + "_" + filenametitle
elif outfilename[:8] != filenametitle[:8]: elif outfilename[:8] != filenametitle[:8]:
outfilename = outfilename[:8] + "_" + filenametitle outfilename = outfilename[:8] + "_" + filenametitle
# avoid excessively long file names # avoid excessively long file names
if len(outfilename)>150: if len(outfilename)>150:
outfilename = outfilename[:150] outfilename = outfilename[:150]
# build pid list # build pid list
md1, md2 = mb.getPIDMetaInfo() md1, md2 = mb.getPIDMetaInfo()
pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles) pidlst = kgenpids.getPidList(md1, md2, k4, pids, serials, kInfoFiles)
try: try:
mb.processBook(pidlst) mb.processBook(pidlst)
@ -128,9 +132,9 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
else: else:
outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi') outfile = os.path.join(outdir, outfilename + '_nodrm' + '.mobi')
mb.getMobiFile(outfile) mb.getMobiFile(outfile)
return 0 return 0
# topaz: # topaz:
print " Creating NoDRM HTMLZ Archive" print " Creating NoDRM HTMLZ Archive"
zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz') zipname = os.path.join(outdir, outfilename + '_nodrm' + '.htmlz')
mb.getHTMLZip(zipname) mb.getHTMLZip(zipname)
@ -139,9 +143,10 @@ def decryptBook(infile, outdir, k4, kInfoFiles, serials, pids):
zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip') zipname = os.path.join(outdir, outfilename + '_SVG' + '.zip')
mb.getSVGZip(zipname) mb.getSVGZip(zipname)
print " Creating XML ZIP Archive" if buildXML:
zipname = os.path.join(outdir, outfilename + '_XML' + '.zip') print " Creating XML ZIP Archive"
mb.getXMLZip(zipname) zipname = os.path.join(outdir, outfilename + '_XML' + '.zip')
mb.getXMLZip(zipname)
# remove internal temporary directory of Topaz pieces # remove internal temporary directory of Topaz pieces
mb.cleanup() mb.cleanup()
@ -156,7 +161,7 @@ def usage(progname):
# #
# Main # Main
# #
def main(argv=sys.argv): def main(argv=sys.argv):
progname = os.path.basename(argv[0]) progname = os.path.basename(argv[0])
@ -164,9 +169,9 @@ def main(argv=sys.argv):
kInfoFiles = [] kInfoFiles = []
serials = [] serials = []
pids = [] pids = []
print ('K4MobiDeDrm v%(__version__)s ' print ('K4MobiDeDrm v%(__version__)s '
'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals()) 'provided by the work of many including DiapDealer, SomeUpdates, IHeartCabbages, CMBDTC, Skindle, DarkReverser, ApprenticeAlf, etc .' % globals())
try: try:
opts, args = getopt.getopt(sys.argv[1:], "k:p:s:") opts, args = getopt.getopt(sys.argv[1:], "k:p:s:")
@ -177,7 +182,7 @@ def main(argv=sys.argv):
if len(args)<2: if len(args)<2:
usage(progname) usage(progname)
sys.exit(2) sys.exit(2)
for o, a in opts: for o, a in opts:
if o == "-k": if o == "-k":
if a == None : if a == None :
@ -195,8 +200,8 @@ def main(argv=sys.argv):
# try with built in Kindle Info files # try with built in Kindle Info files
k4 = True k4 = True
if sys.platform.startswith('linux'): if sys.platform.startswith('linux'):
k4 = False k4 = False
kInfoFiles = None kInfoFiles = None
infile = args[0] infile = args[0]
outdir = args[1] outdir = args[1]
return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids) return decryptBook(infile, outdir, k4, kInfoFiles, serials, pids)
@ -205,4 +210,3 @@ def main(argv=sys.argv):
if __name__ == '__main__': if __name__ == '__main__':
sys.stdout=Unbuffered(sys.stdout) sys.stdout=Unbuffered(sys.stdout)
sys.exit(main()) sys.exit(main())

View file

@ -5,7 +5,8 @@ from __future__ import with_statement
import sys import sys
import os import os
import os.path import os.path
import re
import copy
import subprocess import subprocess
from struct import pack, unpack, unpack_from from struct import pack, unpack, unpack_from
@ -24,6 +25,25 @@ def _load_crypto_libcrypto():
raise DrmException('libcrypto not found') raise DrmException('libcrypto not found')
libcrypto = CDLL(libcrypto) libcrypto = CDLL(libcrypto)
# From OpenSSL's crypto aes header
#
# AES_ENCRYPT 1
# AES_DECRYPT 0
# AES_MAXNR 14 (in bytes)
# AES_BLOCK_SIZE 16 (in bytes)
#
# struct aes_key_st {
# unsigned long rd_key[4 *(AES_MAXNR + 1)];
# int rounds;
# };
# typedef struct aes_key_st AES_KEY;
#
# int AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
#
# note: the ivec string, and output buffer are both mutable
# void AES_cbc_encrypt(const unsigned char *in, unsigned char *out,
# const unsigned long length, const AES_KEY *key, unsigned char *ivec, const int enc);
AES_MAXNR = 14 AES_MAXNR = 14
c_char_pp = POINTER(c_char_p) c_char_pp = POINTER(c_char_p)
c_int_p = POINTER(c_int) c_int_p = POINTER(c_int)
@ -31,25 +51,31 @@ def _load_crypto_libcrypto():
class AES_KEY(Structure): class AES_KEY(Structure):
_fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)] _fields_ = [('rd_key', c_long * (4 * (AES_MAXNR + 1))), ('rounds', c_int)]
AES_KEY_p = POINTER(AES_KEY) AES_KEY_p = POINTER(AES_KEY)
def F(restype, name, argtypes): def F(restype, name, argtypes):
func = getattr(libcrypto, name) func = getattr(libcrypto, name)
func.restype = restype func.restype = restype
func.argtypes = argtypes func.argtypes = argtypes
return func return func
AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int]) AES_cbc_encrypt = F(None, 'AES_cbc_encrypt',[c_char_p, c_char_p, c_ulong, AES_KEY_p, c_char_p,c_int])
AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p]) AES_set_decrypt_key = F(c_int, 'AES_set_decrypt_key',[c_char_p, c_int, AES_KEY_p])
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1', # From OpenSSL's Crypto evp/p5_crpt2.c
#
# int PKCS5_PBKDF2_HMAC_SHA1(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# int keylen, unsigned char *out);
PKCS5_PBKDF2_HMAC_SHA1 = F(c_int, 'PKCS5_PBKDF2_HMAC_SHA1',
[c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p]) [c_char_p, c_ulong, c_char_p, c_ulong, c_ulong, c_ulong, c_char_p])
class LibCrypto(object): class LibCrypto(object):
def __init__(self): def __init__(self):
self._blocksize = 0 self._blocksize = 0
self._keyctx = None self._keyctx = None
self.iv = 0 self._iv = 0
def set_decrypt_key(self, userkey, iv): def set_decrypt_key(self, userkey, iv):
self._blocksize = len(userkey) self._blocksize = len(userkey)
@ -57,14 +83,17 @@ def _load_crypto_libcrypto():
raise DrmException('AES improper key used') raise DrmException('AES improper key used')
return return
keyctx = self._keyctx = AES_KEY() keyctx = self._keyctx = AES_KEY()
self.iv = iv self._iv = iv
self._userkey = userkey
rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx) rv = AES_set_decrypt_key(userkey, len(userkey) * 8, keyctx)
if rv < 0: if rv < 0:
raise DrmException('Failed to initialize AES key') raise DrmException('Failed to initialize AES key')
def decrypt(self, data): def decrypt(self, data):
out = create_string_buffer(len(data)) out = create_string_buffer(len(data))
rv = AES_cbc_encrypt(data, out, len(data), self._keyctx, self.iv, 0) mutable_iv = create_string_buffer(self._iv, len(self._iv))
keyctx = self._keyctx
rv = AES_cbc_encrypt(data, out, len(data), keyctx, mutable_iv, 0)
if rv == 0: if rv == 0:
raise DrmException('AES decryption failed') raise DrmException('AES decryption failed')
return out.raw return out.raw
@ -111,13 +140,17 @@ def SHA256(message):
# Various character maps used to decrypt books. Probably supposed to act as obfuscation # Various character maps used to decrypt books. Probably supposed to act as obfuscation
charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M" charMap1 = "n5Pr6St7Uv8Wx9YzAb0Cd1Ef2Gh3Jk4M"
charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM" charMap2 = "ZB0bYyc1xDdW2wEV3Ff7KkPpL8UuGA4gz-Tme9Nn_tHh5SvXCsIiR6rJjQaqlOoM"
# For kinf approach of K4PC/K4Mac # For kinf approach of K4Mac 1.6.X or later
# On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE" # On K4PC charMap5 = "AzB0bYyCeVvaZ3FfUuG4g-TtHh5SsIiR6rJjQq7KkPpL8lOoMm9Nn_c1XxDdW2wE"
# For Mac they seem to re-use charMap2 here # For Mac they seem to re-use charMap2 here
charMap5 = charMap2 charMap5 = charMap2
# new in K4M 1.9.X
testMap8 = "YvaZ3FfUm9Nn_c1XuG4yCAzB0beVg-TtHh5SsIiR6rJjQdW2wEq7KkPpL8lOoMxD"
def encode(data, map): def encode(data, map):
result = "" result = ""
for char in data: for char in data:
@ -144,7 +177,7 @@ def decode(data,map):
result += pack("B",value) result += pack("B",value)
return result return result
# For .kinf approach of K4PC and now K4Mac # For K4M 1.6.X and later
# generate table of prime number less than or equal to int n # generate table of prime number less than or equal to int n
def primes(n): def primes(n):
if n==2: return [2] if n==2: return [2]
@ -271,7 +304,7 @@ def GetDiskPartitionUUID(diskpart):
if not foundIt: if not foundIt:
uuidnum = '' uuidnum = ''
return uuidnum return uuidnum
def GetMACAddressMunged(): def GetMACAddressMunged():
macnum = os.getenv('MYMACNUM') macnum = os.getenv('MYMACNUM')
if macnum != None: if macnum != None:
@ -315,33 +348,11 @@ def GetMACAddressMunged():
return macnum return macnum
# uses unix env to get username instead of using sysctlbyname # uses unix env to get username instead of using sysctlbyname
def GetUserName(): def GetUserName():
username = os.getenv('USER') username = os.getenv('USER')
return username return username
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
def CryptUnprotectData(encryptedData):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
iter = 0x3e8
keylen = 0x80
crp = LibCrypto()
key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32]
iv = key_iv[32:48]
crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
def isNewInstall(): def isNewInstall():
home = os.getenv('HOME') home = os.getenv('HOME')
# soccer game fan anyone # soccer game fan anyone
@ -350,7 +361,7 @@ def isNewInstall():
if os.path.exists(dpath): if os.path.exists(dpath):
return True return True
return False return False
def GetIDString(): def GetIDString():
# K4Mac now has an extensive set of ids strings it uses # K4Mac now has an extensive set of ids strings it uses
@ -359,13 +370,13 @@ def GetIDString():
# BUT Amazon has now become nasty enough to detect when its app # BUT Amazon has now become nasty enough to detect when its app
# is being run under a debugger and actually changes code paths # is being run under a debugger and actually changes code paths
# including which one of these strings is chosen, all to try # including which one of these strings is chosen, all to try
# to prevent reverse engineering # to prevent reverse engineering
# Sad really ... they will only hurt their own sales ... # Sad really ... they will only hurt their own sales ...
# true book lovers really want to keep their books forever # true book lovers really want to keep their books forever
# and move them to their devices and DRM prevents that so they # and move them to their devices and DRM prevents that so they
# will just buy from someplace else that they can remove # will just buy from someplace else that they can remove
# the DRM from # the DRM from
# Amazon should know by now that true book lover's are not like # Amazon should know by now that true book lover's are not like
@ -388,27 +399,91 @@ def GetIDString():
return '9999999999' return '9999999999'
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used by Kindle for Mac versions < 1.6.0
class CryptUnprotectData(object):
def __init__(self):
sernum = GetVolumeSerialNumber()
if sernum == '':
sernum = '9999999999'
sp = sernum + '!@#' + GetUserName()
passwdData = encode(SHA256(sp),charMap1)
salt = '16743'
self.crp = LibCrypto()
iter = 0x3e8
keylen = 0x80
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext,charMap1)
return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine # implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.6.0 # used for Kindle for Mac Versions >= 1.6.0
def CryptUnprotectDataV2(encryptedData): class CryptUnprotectDataV2(object):
sp = GetUserName() + ':&%:' + GetIDString() def __init__(self):
passwdData = encode(SHA256(sp),charMap5) sp = GetUserName() + ':&%:' + GetIDString()
# salt generation as per the code passwdData = encode(SHA256(sp),charMap5)
salt = 0x0512981d * 2 * 1 * 1 # salt generation as per the code
salt = str(salt) + GetUserName() salt = 0x0512981d * 2 * 1 * 1
salt = encode(salt,charMap5) salt = str(salt) + GetUserName()
salt = encode(salt,charMap5)
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext
# unprotect the new header blob in .kinf2011
# used in Kindle for Mac Version >= 1.9.0
def UnprotectHeaderData(encryptedData):
passwdData = 'header_key_data'
salt = 'HEADER.2011'
iter = 0x80
keylen = 0x100
crp = LibCrypto() crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = crp.keyivgen(passwdData, salt, iter, keylen) key_iv = crp.keyivgen(passwdData, salt, iter, keylen)
key = key_iv[0:32] key = key_iv[0:32]
iv = key_iv[32:48] iv = key_iv[32:48]
crp.set_decrypt_key(key,iv) crp.set_decrypt_key(key,iv)
cleartext = crp.decrypt(encryptedData) cleartext = crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap5)
return cleartext return cleartext
# implements an Pseudo Mac Version of Windows built-in Crypto routine
# used for Kindle for Mac Versions >= 1.9.0
class CryptUnprotectDataV3(object):
def __init__(self, entropy):
sp = GetUserName() + '+@#$%+' + GetIDString()
passwdData = encode(SHA256(sp),charMap2)
salt = entropy
self.crp = LibCrypto()
iter = 0x800
keylen = 0x400
key_iv = self.crp.keyivgen(passwdData, salt, iter, keylen)
self.key = key_iv[0:32]
self.iv = key_iv[32:48]
self.crp.set_decrypt_key(self.key, self.iv)
def decrypt(self, encryptedData):
cleartext = self.crp.decrypt(encryptedData)
cleartext = decode(cleartext, charMap2)
return cleartext
# Locate the .kindle-info files # Locate the .kindle-info files
def getKindleInfoFiles(kInfoFiles): def getKindleInfoFiles(kInfoFiles):
# first search for current .kindle-info files # first search for current .kindle-info files
@ -424,12 +499,22 @@ def getKindleInfoFiles(kInfoFiles):
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
found = True found = True
# add any .kinf files # add any .rainier*-kinf files
cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"' cmdline = 'find "' + home + '/Library/Application Support" -name ".rainier*-kinf"'
cmdline = cmdline.encode(sys.getfilesystemencoding()) cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False) p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate() out1, out2 = p1.communicate()
reslst = out1.split('\n') reslst = out1.split('\n')
for resline in reslst:
if os.path.isfile(resline):
kInfoFiles.append(resline)
found = True
# add any .kinf2011 files
cmdline = 'find "' + home + '/Library/Application Support" -name ".kinf2011"'
cmdline = cmdline.encode(sys.getfilesystemencoding())
p1 = subprocess.Popen(cmdline, shell=True, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=False)
out1, out2 = p1.communicate()
reslst = out1.split('\n')
for resline in reslst: for resline in reslst:
if os.path.isfile(resline): if os.path.isfile(resline):
kInfoFiles.append(resline) kInfoFiles.append(resline)
@ -438,7 +523,7 @@ def getKindleInfoFiles(kInfoFiles):
print('No kindle-info files have been found.') print('No kindle-info files have been found.')
return kInfoFiles return kInfoFiles
# determine type of kindle info provided and return a # determine type of kindle info provided and return a
# database of keynames and values # database of keynames and values
def getDBfromFile(kInfoFile): def getDBfromFile(kInfoFile):
names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"] names = ["kindle.account.tokens","kindle.cookie.item","eulaVersionAccepted","login_date","kindle.token.item","login","kindle.key.item","kindle.name.info","kindle.device.info", "MazamaRandomNumber", "max_date", "SIGVERIF"]
@ -449,7 +534,9 @@ def getDBfromFile(kInfoFile):
data = infoReader.read() data = infoReader.read()
if data.find('[') != -1 : if data.find('[') != -1 :
# older style kindle-info file # older style kindle-info file
cud = CryptUnprotectData()
items = data.split('[') items = data.split('[')
for item in items: for item in items:
if item != '': if item != '':
@ -462,87 +549,175 @@ def getDBfromFile(kInfoFile):
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
encryptedValue = decode(rawdata,charMap2) encryptedValue = decode(rawdata,charMap2)
cleartext = CryptUnprotectData(encryptedValue) cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1
if cnt == 0: if cnt == 0:
DB = None DB = None
return DB return DB
# else newer style .kinf file used by K4Mac >= 1.6.0 if hdr == '/':
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split # else newer style .kinf file used by K4Mac >= 1.6.0
# the .kinf file uses "/" to separate it into records
# so remove the trailing "/" to make it easy to use split
data = data[:-1]
items = data.split('/')
cud = CryptUnprotectDataV2()
# loop through the item records until all are processed
while len(items) > 0:
# get the first item record
item = items.pop(0)
# the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32]
keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual
# CryptProtectData Blob that represents that keys contents
# "entropy" not used for K4Mac only K4PC
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation
# of the number of records that follow
# and make up the contents
srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt)
# read and store in rcnt records of data
# that make up the contents value
edlst = []
for i in xrange(rcnt):
item = items.pop(0)
edlst.append(item)
keyname = "unknown"
for name in names:
if encodeHash(name,charMap5) == keyhash:
keyname = name
break
if keyname == "unknown":
keyname = keyhash
# the charMap5 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from
# working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5
encdata = "".join(edlst)
contlen = len(encdata)
# now properly split and recombine
# by moving noffset chars from the start of the
# string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset]
encdata = encdata[noffset:]
encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5)
cleartext = cud.decrypt(encryptedValue)
DB[keyname] = cleartext
cnt = cnt + 1
if cnt == 0:
DB = None
return DB
# the latest .kinf2011 version for K4M 1.9.1
# put back the hdr char, it is needed
data = hdr + data
data = data[:-1] data = data[:-1]
items = data.split('/') items = data.split('/')
# the headerblob is the encrypted information needed to build the entropy string
headerblob = items.pop(0)
encryptedValue = decode(headerblob, charMap1)
cleartext = UnprotectHeaderData(encryptedValue)
# now extract the pieces in the same way
# this version is different from K4PC it scales the build number by multipying by 735
pattern = re.compile(r'''\[Version:(\d+)\]\[Build:(\d+)\]\[Cksum:([^\]]+)\]\[Guid:([\{\}a-z0-9\-]+)\]''', re.IGNORECASE)
for m in re.finditer(pattern, cleartext):
entropy = str(int(m.group(2)) * 0x2df) + m.group(4)
cud = CryptUnprotectDataV3(entropy)
# loop through the item records until all are processed # loop through the item records until all are processed
while len(items) > 0: while len(items) > 0:
# get the first item record # get the first item record
item = items.pop(0) item = items.pop(0)
# the first 32 chars of the first record of a group # the first 32 chars of the first record of a group
# is the MD5 hash of the key name encoded by charMap5 # is the MD5 hash of the key name encoded by charMap5
keyhash = item[0:32] keyhash = item[0:32]
keyname = "unknown" keyname = "unknown"
# the raw keyhash string is also used to create entropy for the actual # unlike K4PC the keyhash is not used in generating entropy
# CryptProtectData Blob that represents that keys contents # entropy = SHA1(keyhash) + added_entropy
# "entropy" not used for K4Mac only K4PC # entropy = added_entropy
# entropy = SHA1(keyhash)
# the remainder of the first record when decoded with charMap5
# the remainder of the first record when decoded with charMap5
# has the ':' split char followed by the string representation # has the ':' split char followed by the string representation
# of the number of records that follow # of the number of records that follow
# and make up the contents # and make up the contents
srcnt = decode(item[34:],charMap5) srcnt = decode(item[34:],charMap5)
rcnt = int(srcnt) rcnt = int(srcnt)
# read and store in rcnt records of data # read and store in rcnt records of data
# that make up the contents value # that make up the contents value
edlst = [] edlst = []
for i in xrange(rcnt): for i in xrange(rcnt):
item = items.pop(0) item = items.pop(0)
edlst.append(item) edlst.append(item)
keyname = "unknown" keyname = "unknown"
for name in names: for name in names:
if encodeHash(name,charMap5) == keyhash: if encodeHash(name,testMap8) == keyhash:
keyname = name keyname = name
break break
if keyname == "unknown": if keyname == "unknown":
keyname = keyhash keyname = keyhash
# the charMap5 encoded contents data has had a length # the testMap8 encoded contents data has had a length
# of chars (always odd) cut off of the front and moved # of chars (always odd) cut off of the front and moved
# to the end to prevent decoding using charMap5 from # to the end to prevent decoding using testMap8 from
# working properly, and thereby preventing the ensuing # working properly, and thereby preventing the ensuing
# CryptUnprotectData call from succeeding. # CryptUnprotectData call from succeeding.
# The offset into the charMap5 encoded contents seems to be: # The offset into the testMap8 encoded contents seems to be:
# len(contents) - largest prime number less than or equal to int(len(content)/3) # len(contents) - largest prime number less than or equal to int(len(content)/3)
# (in other words split "about" 2/3rds of the way through) # (in other words split "about" 2/3rds of the way through)
# move first offsets chars to end to align for decode by charMap5 # move first offsets chars to end to align for decode by testMap8
encdata = "".join(edlst) encdata = "".join(edlst)
contlen = len(encdata) contlen = len(encdata)
# now properly split and recombine # now properly split and recombine
# by moving noffset chars from the start of the # by moving noffset chars from the start of the
# string to the end of the string # string to the end of the string
noffset = contlen - primes(int(contlen/3))[-1] noffset = contlen - primes(int(contlen/3))[-1]
pfx = encdata[0:noffset] pfx = encdata[0:noffset]
encdata = encdata[noffset:] encdata = encdata[noffset:]
encdata = encdata + pfx encdata = encdata + pfx
# decode using charMap5 to get the CryptProtect Data # decode using testMap8 to get the CryptProtect Data
encryptedValue = decode(encdata,charMap5) encryptedValue = decode(encdata,testMap8)
cleartext = CryptUnprotectDataV2(encryptedValue) cleartext = cud.decrypt(encryptedValue)
# Debugging
# print keyname # print keyname
# print cleartext # print cleartext
# print cleartext.encode('hex')
# print
DB[keyname] = cleartext DB[keyname] = cleartext
cnt = cnt + 1 cnt = cnt + 1

Some files were not shown because too many files have changed in this diff Show more