code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from dataclasses import dataclass
from click import password_option
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.fernet import Fernet
from threading import Thread
import datetime
import cryptography
import asyncio
import json
import base64
import time
import sys
import ssl
import os
if sys.platform == "win32":
directory_cutter = "\\"
elif sys.platform == "linux":
directory_cutter = "/"
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ModuleNotFoundError:
# uvloop not installed
# uvloop is not required for the script to run but it won't be as fast
pass
def encrypt(plainText, password):
if isinstance(password, str):
password = password.encode()
salt = os.urandom(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend(),
)
key = base64.urlsafe_b64encode(kdf.derive(password))
f = Fernet(key)
cText = f.encrypt(plainText)
return cText, salt
def encryptFile(filePath, password):
if isinstance(password, str):
password = password.encode()
if not os.path.exists(filePath):
return False
if not os.path.isfile(filePath):
return False
try:
with open(filePath, "rb") as f:
data = f.read()
cipherData, salt = encrypt(data, password)
with open(filePath, "wb") as f:
f.write(b"ENCRYPTED_FILE" + salt + cipherData)
return True
except Exception as e:
print("Error encrypting file: {}".format(e))
return False
def decrypt(cText, salt, password):
if isinstance(password, str):
password = password.encode()
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend(),
)
key = base64.urlsafe_b64encode(kdf.derive(password))
f = Fernet(key)
try:
plainText = f.decrypt(cText)
return plainText
except cryptography.fernet.InvalidToken:
return False
except Exception as e:
print("Error decrypting data: {}".format(e))
return False
def decryptFile(filePath, password):
if isinstance(password, str):
password = password.encode()
if not os.path.exists(filePath):
print("ERROR: Path does not exist")
return False
if not os.path.isfile(filePath):
print("ERROR: Path is a directory")
return False
try:
with open(filePath, "rb") as f:
data = f.read()
if not data.startswith(b"ENCRYPTED_FILE"):
return True
data = data.lstrip(b"ENCRYPTED_FILE")
if len(data) <= 16:
print("ERROR: File data corrupt or not encrypted")
return False
salt = data[:16]
cipherData = data.lstrip(salt)
plainData = decrypt(cipherData, salt, password)
with open(filePath, "wb") as f:
f.write(plainData)
except Exception as e:
print("Error decrypting file: {}".format(e))
def make_bytes(var):
if isinstance(var, str):
var = var.encode()
if not isinstance(var, bytes):
return make_bytes(str(var))
return var
def convVarType(var, t):
varDict = {"s": str, "i": int, "f": float}
if t.lower() == "b":
if var.lower() == "true":
return True
if var.lower() == "false":
return False
return bool(var)
if t in varDict:
return varDict[t](var)
return var
def prepData(data, metaData=None):
# Prepares the data to be sent
# Structure: DATA:| <data_length>.zfill(18) <raw-data> META:| <meta-string>
# (ignore spaces)
data = make_bytes(json.dumps(data))
pData = ""
pData = b"DATA:|" + str(len(data)).encode().zfill(18) + data
if metaData:
pData = pData + b"META:|" + make_bytes(json.dumps(metaData))
return pData
def dissectData(data):
# Used after data is received to prepare it for later
rawData = ""
metaData = None
if data.startswith(b"DATA:|"):
data = data.lstrip(b"DATA:|") # Remove "DATA:|"
dataLen = int(data[:18]) # Extract length of data
data = data[18:] # Remove the data length
rawData = data[:dataLen] # Get the raw data
rawData = json.loads(rawData) # Decode the data
metaStr = data[dataLen:] # Get the meta-data (if any)
if metaStr != "":
if metaStr.startswith(b"META:|"): # Received Meta-Data
metaStr = metaStr.lstrip(b"META:|") # Remove "META:|"
metaData = json.loads(metaStr)
else:
return data, None, True
return rawData, metaData, False
@dataclass
class Login_Attempt():
timestamp: float
username: str
address: str
def to_datetime(self):
return datetime.datetime.fromtimestamp(self.timestamp)
def to_string(self):
return "{} - {} - {}".format(self.to_datetime(), self.username, self.address)
def to_json(self):
return {
"timestamp": self.timestamp,
"username": self.username,
"address": self.address
}
def from_json(self, data):
self.timestamp = data["timestamp"]
self.username = data["username"]
self.address = data["address"]
return self
def __eq__(self, other):
return self.timestamp == other.timestamp and self.username == other.username and self.address == other.address
def __hash__(self):
return hash(self.timestamp) ^ hash(self.username) ^ hash(self.address)
def __lt__(self, other):
return self.timestamp < other.timestamp
def __le__(self, other):
return self.timestamp <= other.timestamp
def __gt__(self, other):
return self.timestamp > other.timestamp
def __ge__(self, other):
return self.timestamp >= other.timestamp
def __ne__(self, other):
return self.timestamp != other.timestamp or self.username != other.username or self.address != other.address
def __str__(self):
return self.to_string()
class User:
def __init__(self, username):
self.username = username
self._cipher_pass = None # The encrypted password (ciphertext, salt)
self.password = None # This stays at `None` until the user is verified
self.hasPassword = False
self.connections = []
self.loginHistory = []
# Structure: [ [<time.time()>, <IP_Address>], [<time.time()>, <IP_Address>] ]
# Login 1 Login 2
self.loginAttempts = []
def encryptData(self, data):
if self.hasPassword and self.password:
cData, salt = encrypt(data, self.password.encode())
data = salt + cData
return data
return data
def decryptData(self, data):
if self.hasPassword and self.password:
salt = data[:16]
cData = data[16:]
data = decrypt(cData, salt, self.password.encode())
return data
return data
def reset(self):
self.password = None
self.connections = []
def save(self, userDir):
user_info = {
"username": self.username,
"hasPassword": self.hasPassword,
"loginHistory": self.loginHistory,
"loginAttempts": [attempt.to_json() for attempt in self.loginAttempts],
}
if self._cipher_pass and self.hasPassword:
secret_info = self._cipher_pass[1] + self._cipher_pass[0]
savePath = os.path.join(userDir, self.username + ".json")
secretPath = os.path.join(userDir, self.username + "-secret")
with open(savePath, "w") as f:
json.dump(user_info, f)
if self._cipher_pass and self.hasPassword:
with open(secretPath, "wb") as f:
f.write(secret_info)
return savePath
def load(self, filePath):
filePath = filePath.rstrip(directory_cutter)
if not os.path.exists(filePath):
return
fileName = os.path.basename(filePath)
userDir = filePath.rstrip(fileName)
if fileName.endswith(".json"):
secretPath = os.path.join(userDir, fileName.replace(".json", "-secret"))
with open(filePath, "r") as f:
user_info = json.load(f)
secret_info = None
if os.path.exists(secretPath):
with open(secretPath, "rb") as f:
secret_info = f.read()
if secret_info and user_info["hasPassword"]:
if len(secret_info) > 16:
self._cipher_pass = [secret_info[16:], secret_info[:16]]
self.username = user_info["username"]
self.hasPassword = user_info["hasPassword"]
self.loginHistory = user_info["loginHistory"]
self.loginAttempts = [Login_Attempt(**attempt) for attempt in user_info["loginAttempts"]]
return self
def verify(self, password):
if isinstance(password, str):
password = password.encode()
if self.hasPassword:
if self._cipher_pass and password:
if password == decrypt(
self._cipher_pass[0], self._cipher_pass[1], password
):
return True
else:
return False
else:
return True
def login(self, username, password, connection):
if username == self.username and self.verify(password):
if not connection in self.connections:
self.connections.append(connection)
self.password = password
self.loginHistory.append([time.time(), connection.addr])
connection.verifiedUser = True
connection.currentUser = self
return True
new_login_attempt = Login_Attempt(time.time(), username, connection.addr)
self.loginAttempts.append(new_login_attempt)
return False
def logout(self, client):
client.verifiedUser = False
client.currentUser = None
if client in self.connections:
self.connections.remove(client)
if len(self.connections) == 0:
self.password = None
def addPassword(self, password):
if isinstance(password, str):
password = password.encode()
if not self.hasPassword:
cText, salt = encrypt(password, password)
self._cipher_pass = [cText, salt]
self.password = password
self.hasPassword = True
class Connection:
sepChar = b"\n\t_SEPARATOR_\t\n"
def __init__(self, addr, port, reader, writer, server):
self.connectionTime = time.time()
self.addr = addr
self.port = port
self.reader = reader
self.writer = writer
self.server = server
self.verifiedUser = False
self.currentUser = None
self._next_message_length = 0
self.downloading = False
self._usr_enc = False
def getDownloadProgress(self):
if not self.writer.is_closing():
if self.reader:
return len(self.reader._buffer), self._next_message_length
# <current buffer length>, <target buffer length>
return 0
async def _send_data(self, data, metaData=None, enc=True):
data = prepData(data, metaData=metaData)
if self.verifiedUser and enc and self._usr_enc:
data = self.currentUser.encryptData(data)
data = data + self.sepChar
await self.send_raw("msgLen={}".format(str(len(data))))
self.writer.write(data)
await self.writer.drain()
def sendData(self, data, metaData=None, enc=True):
if self.server.loop:
try:
asyncio.run_coroutine_threadsafe(
self._send_data(data, metaData=metaData, enc=enc), self.server.loop
)
except Exception as e:
print("Error sending data")
raise e
async def send_raw(self, data, enc=True):
try:
data = make_bytes(data)
if self.verifiedUser and enc and self._usr_enc:
data = self.currentUser.encryptData(data)
data = data + self.sepChar
self.writer.write(data)
await self.writer.drain()
except ConnectionResetError:
pass
def sendRaw(self, data, enc=True):
if self.server.loop:
try:
asyncio.run_coroutine_threadsafe(
self.send_raw(data, enc=enc), self.server.loop
)
except Exception as e:
print("Error sending data")
raise e
def disconnect(self, reason=None):
if self.writer.is_closing():
return
if self.server:
if self.server.loop:
self.server.log(
"Disconnecting {} - {}...".format(self.addr, reason), "red"
)
self.sendRaw("disconnect")
try:
self.writer.close()
except Exception as e:
print("Error closing stream")
raise e
self.logout()
def blacklist(self, bTime=600):
if self.server.loop:
asyncio.run_coroutine_threadsafe(
self.server.blacklistIP(self.addr, bTime=bTime), self.server.loop
)
def logout(self):
if self.currentUser:
self.currentUser.logout(self)
class Host:
sepChar = b"\n\t_SEPARATOR_\t\n"
def __init__(
self,
addr,
port,
verbose=False,
logging=False,
logFile=None,
loginRequired=False,
multithreading=True,
useTermColors=True,
):
self.running = False
self.addr = addr
self.port = port
self.clients = []
self.verbose = verbose
self.loginRequired = loginRequired
self.loginTimeout = 12.0 # The amount of time to wait for a login before disconnecting the client (if logins are required)
self.loginDelay = 0.6
self.multithreading = multithreading
self.loop = None
self.defaultBlacklistTime = 600
self.download_indication_size = 1024 * 10
self.buffer_update_interval = 0.01
self.default_buffer_limit = 644245094400
self._enable_buffer_monitor = True
self.loginAttempts = []
self.blacklistThreshold = 1800 # (In seconds)
# If too many login attempts are made within this threshold, the address will be blacklisted
# 1800 = 30 minutes
self.blacklistLimit = 5
self.blacklist = {}
# Structure: { <IP_address>: <time.time() + duration> }
self._lock = asyncio.Lock()
self.userPath = "users"
self.users = {}
# Structure: {"username": <User Class>}
self.logging = logging
self.logFile = logFile
self._save_vars = ["blacklist", "loginAttempts"]
# A list containing all server variables that will be saved when `self.save_server` is executed
self.termColors = {
"end": "\033[0m",
"bold": "\033[1m",
"italic": "\033[3m",
"underline": "\033[4m",
"blinking": "\033[5m",
"highlight": "\033[7m",
"red": "\033[31m",
"green": "\033[32m",
"yellow": "\033[33m",
"blue": "\033[34m",
"white": "\033[37m",
"grey_bg": "\033[40m",
"red_bg": "\033[41m",
}
self.useTermColors = useTermColors
def _pack_server_info(self):
server_info = {}
for sVar in self._save_vars:
if sVar in self.__dict__:
if sVar == "loginAttempts":
server_info[sVar] = [attempt.to_json() for attempt in self.loginAttempts]
else:
server_info[sVar] = self.__dict__[sVar]
return server_info
def _start_loop(self, loop, task, finishFunc):
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.ensure_future(task()))
if finishFunc:
asyncio.run(finishFunc())
def newLoop(self, task, finishFunc=None):
new_loop = asyncio.new_event_loop()
t = Thread(target=self._start_loop, args=(new_loop, task, finishFunc))
t.start()
def get_login_attempts(self, username=None, address=None, start_date=None, end_date=None):
attempts = self.loginAttempts[:]
# Change start_date and end_date to float timestamps if possible
if isinstance(start_date, datetime.datetime):
start_date = start_date.timestamp()
if isinstance(end_date, datetime.datetime):
end_date = end_date.timestamp()
if username is not None:
# Filter out all attempts that do not match the username
attempts = [a for a in attempts if a.username == username]
if address is not None:
# Filter out all attempts that do not match the address
attempts = [a for a in attempts if a.address == address]
if start_date is not None:
# Filter out all login attempts before the start date
attempts = [a for a in attempts if a.timestamp >= start_date]
if end_date is not None:
# Filter out all login attempts after the end date
attempts = [a for a in attempts if a.timestamp <= end_date]
return self.loginAttempts
def save(self, location, password=None):
location = location.rstrip(directory_cutter)
base_name = os.path.basename(location)
location_directory = location.rstrip(base_name)
if os.path.exists(location_directory) or location.count(directory_cutter) == 0:
if not (location.endswith(".json")):
location += ".json"
with open(location, "w") as f:
json.dump(self._pack_server_info(), f)
if password:
if isinstance(password, str):
password = password.encode()
encryptFile(location, password)
def load(self, location, password=None):
if os.path.exists(location):
if os.path.isfile(location):
if password:
if isinstance(password, str):
password = password.encode()
if isinstance(password, bytes):
decryptFile(location, password)
with open(location, "rb") as f:
server_info = json.load(f)
if password:
if isinstance(password, bytes):
encryptFile(location, password)
for sVar in self._save_vars:
if sVar in self.__dict__ and sVar in server_info:
if sVar == "loginAttempts":
self.loginAttempts = [Login_Attempt(**attempt) for attempt in server_info[sVar]]
else:
self.__dict__[sVar] = server_info[sVar]
def loadUsers(self, customPath=None):
if customPath is not None:
self.userPath = customPath
self.log("Loading users...")
for i in os.listdir(self.userPath):
iPath = os.path.join(self.userPath, i)
if os.path.isfile(iPath) and iPath.endswith(".json"):
user = User("").load(iPath)
self.users[user.username] = user
self.log("Users loaded")
def saveUsers(self, customPath=None):
if customPath is not None:
self.userPath = customPath
self.log("Saving users...")
for username in self.users:
savePath = self.users[username].save(self.userPath)
self.log("Users saved")
def addUser(self, username, password=None):
if "." in username:
return False
user = User(username)
if password:
if isinstance(password, bytes):
password = password.decode()
if not isinstance(password, str):
password = str(password)
user.addPassword(password)
if not username in self.users:
self.users[username] = user
return True
return False
async def _add_to_log(self, text, modifier, blinking):
await self._lock.acquire()
if modifier:
modifier = modifier.lower()
if not modifier in self.termColors:
modifier = None
if not modifier:
modifier = "end"
if isinstance(text, bytes):
text = text.decode()
logTime = time.time()
logText = "[{}]\t{}\n".format(logTime, text)
if self.logging:
if not os.path.exists(self.logFile):
with open(self.logFile, "wb") as f:
pass # Create the path
with open(self.logFile, "ab") as f:
f.write(logText.encode())
if self.verbose:
if self.useTermColors:
textMod = self.termColors[modifier]
if blinking:
textMod += self.termColors["blinking"]
stdoutText = (
self.termColors["bold"]
+ "[{}]".format(logTime)
+ self.termColors["end"]
+ "\t"
+ textMod
+ str(text)
+ self.termColors["end"]
+ "\n"
)
else:
stdoutText = logText
sys.stdout.write(stdoutText)
sys.stdout.flush()
self._lock.release()
def log(self, text, modifier=None, blinking=False):
if not self.loop:
print("Loop not running - unable to log text")
asyncio.run_coroutine_threadsafe(
self._add_to_log(text, modifier, blinking), self.loop
)
def gotData(self, client, data, metaData):
pass
def lostClient(self, client):
pass
def newClient(self, client):
pass
def blacklisted(self, addr):
pass
def loggedIn(self, client, user):
pass
def downloadStarted(self, client):
pass
def downloadStopped(self, client):
pass
def serverStarted(self, server):
pass
async def blacklistIP(self, addr, bTime=None):
if not bTime:
bTime = self.defaultBlacklistTime
self.blacklist[addr] = time.time() + bTime
self.log(
"Blacklisted {} for {} seconds".format(addr, bTime), "red_bg", blinking=True
)
for client in self.clients:
if client.addr == addr:
client.disconnect("Blacklisted")
if self.multithreading:
Thread(target=self.blacklisted, args=[addr]).start()
else:
self.blacklisted(addr)
def _buffer_monitor(self, client, reader):
client.downloading = False
while self.running and not client.writer.is_closing():
if (
len(reader._buffer) >= self.download_indication_size
and not client.downloading
):
client.downloading = True
Thread(target=self.downloadStarted, args=[client]).start()
if not reader._buffer and client.downloading:
client.downloading = False
Thread(target=self.downloadStopped, args=[client]).start()
time.sleep(self.buffer_update_interval)
async def getData(self, client, reader):
data = b""
try:
data = await reader.readuntil(self.sepChar)
except asyncio.LimitOverrunError as e:
self.log(
"ERROR: Buffer limit too small for incoming data ("
" asyncio.LimitOverrunError ) - {}:{}".format(client.addr, client.port),
"red_bg",
)
except asyncio.exceptions.IncompleteReadError:
self.log(
"asyncio.exceptions.IncompleteReadError - {}:{}".format(
client.addr, client.port
),
"red",
)
except Exception as e:
self.log("{} - {}:{}".format(e, client.addr, client.port))
raise e
return data.rstrip(self.sepChar)
async def _got_login_info(self, client, username, password):
self.log("Login acquired - verifying {}...".format(client.addr), "yellow")
user = self.users[username]
await asyncio.sleep(self.loginDelay)
if user.login(username, password, client):
self.log("{} logged in".format(username), "green")
client.sendRaw(b"login accepted", enc=False)
if self.multithreading:
Thread(target=self.loggedIn, args=[client, user]).start()
else:
self.loggedIn(client, user)
return True
self.log(
"Failed login attempt - {} - {}:{}".format(
username, client.addr, client.port
),
"red_bg",
)
new_login_attempt = Login_Attempt(time.time(), username, client.addr)
self.loginAttempts.append(new_login_attempt)
number_of_attempts = len(
self.get_login_attempts(
address = client.addr,
start_date = time.time() - self.blacklistThreshold,
)
)
if number_of_attempts > self.blacklistLimit:
await self.blacklistIP(client.addr)
return False
async def _got_msg_length(self, client, data):
if not data[7:].isalnum():
return
client._next_message_length = int(data[7:])
if client._next_message_length < self.default_buffer_limit:
client.reader._limit = client._next_message_length
async def _got_encData_info(self, client, data):
self.log(
"{} set encryption to {}".format(
client.currentUser.username, data.split(":")[1]
)
)
if data.split(":")[1] == "True":
client._usr_enc = True
elif data.split(":")[1] == "False":
client._usr_enc = False
async def gotRawData(self, client, data):
if isinstance(data, bytes):
data = data.decode()
if data.startswith("msgLen=") and len(data) > 7:
await self._got_msg_length(client, data)
elif data.startswith("LOGIN:") and "|" in data:
if len(data.split("|")) == 2:
data = data[6:]
username, password = data.split("|")
if username in self.users:
success = await self._got_login_info(client, username, password)
if not success:
client.disconnect("Failed login")
else:
self.log(
"Login Failed - Username '{}' not recognized".format(username),
"red",
)
client.sendRaw(b"login failed")
elif data.startswith("encData:"):
await self._got_encData_info(client, data)
elif data == "logout":
if client.verifiedUser and client.currentUser:
client.currentUser.logout(client)
self.log(
"User logged out - {} - {}:{}".format(
client.currentUser.username, client.addr, client.port
)
)
async def _process_data(self, client, data):
if client.verifiedUser and client._usr_enc:
data = client.currentUser.decryptData(data)
if data:
data, metaData, isRaw = dissectData(data)
if isRaw:
await self.gotRawData(client, data)
elif (self.loginRequired and client.verifiedUser) or not self.loginRequired:
if self.multithreading:
Thread(target=self.gotData, args=[client, data, metaData]).start()
else:
self.gotData(client, data, metaData)
async def _setup_new_client(self, reader, writer):
addr, port = writer.get_extra_info("peername")
client = Connection(addr, port, reader, writer, self)
self.clients.append(client)
if self._enable_buffer_monitor:
Thread(target=self._buffer_monitor, args=[client, reader]).start()
self.log("New Connection: {}:{}".format(client.addr, client.port), "green")
if client.addr in self.blacklist:
if self.blacklist[client.addr] < time.time():
self.blacklist.pop(client.addr)
else:
client.disconnect("Blacklisted")
return client
async def _handle_client(self, reader, writer):
client = await self._setup_new_client(reader, writer)
if not client.writer.is_closing():
if self.loginRequired and not client.verifiedUser:
client.sendRaw(b"login required")
if self.multithreading:
Thread(target=self.newClient, args=[client]).start()
else:
self.newClient(client)
while self.running and not writer.is_closing():
if self.loginRequired and not client.verifiedUser:
if (
time.time() - client.connectionTime >= self.loginTimeout
and not client.verifiedUser
):
client.disconnect("Login timeout")
data = await self.getData(client, reader)
if not data:
break
await self._process_data(client, data)
self.log("Lost Connection: {}:{}".format(client.addr, client.port))
self.clients.remove(client)
try:
writer.close()
except Exception as e:
print("Error closing stream")
raise e
client.logout()
if self.multithreading:
Thread(target=self.lostClient, args=[client]).start()
else:
self.lostClient(client)
async def start(
self, useSSL=False, sslCert=None, sslKey=None, buffer_limit=65536, ssl_timeout=3
):
self.running = True
ssl_context = None
self.loop = asyncio.get_running_loop()
server = None
if self.logging:
if self.logFile is None:
self.logFile = "log.txt"
self.log("Starting server...", "blue")
if not os.path.exists(self.userPath):
self.log("Creating user directory", "blue")
os.mkdir(self.userPath)
self.loadUsers()
if useSSL and sslCert and sslKey:
self.log("Loading SSL certificate...", "blue")
if os.path.exists(sslCert) and os.path.exists(sslKey):
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.load_cert_chain(sslCert, sslKey)
self.log("SSL certificate loaded", "green")
server = await asyncio.start_server(
self._handle_client,
self.addr,
self.port,
ssl=ssl_context,
limit=buffer_limit,
ssl_handshake_timeout=ssl_timeout,
)
else:
self.log("Unable to load certificate files", "red")
return
else:
server = await asyncio.start_server(
self._handle_client, self.addr, self.port, limit=buffer_limit
)
if server:
self.log("Server started", "green")
Thread(target=self.serverStarted, args=[self]).start()
async with server:
await server.serve_forever()
else:
self.running = False
self.log("Unable to start server", "red")
class Client:
sepChar = b"\n\t_SEPARATOR_\t\n"
def __init__(self, multithreading=False):
self.connected = False
self.reader = None
self.writer = None
self.hostAddr = None
self.hostPort = None
self.connection_updated = time.time() # Last time the connection status was changed
self.login = (None, None)
self.multithreading = multithreading
self.loop = None
self.download_indication_size = 1024 * 10
self.buffer_update_interval = 0.01
self._next_message_length = 0
self.default_buffer_limit = 644245094400
self._enable_buffer_monitor = True
self.downloading = False
self._got_disconnect = False
self._login_failed = False
self.verifiedUser = False
self._usr_enc = False
def setUserEncrypt(self, newValue):
async def SET_USER_ENCD(self, newValue):
self._usr_enc = newValue
if isinstance(newValue, bool) and self.loop:
sData = "encData:{}".format(str(newValue))
self.sendRaw(sData)
asyncio.run_coroutine_threadsafe(SET_USER_ENCD(self, newValue), self.loop)
def _start_loop(self, loop, task, finishFunc):
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.ensure_future(task()))
if finishFunc:
asyncio.run(finishFunc())
def newLoop(self, task, finishFunc=None):
new_loop = asyncio.new_event_loop()
t = Thread(target=self._start_loop, args=(new_loop, task, finishFunc))
t.start()
def encryptData(self, data):
if self.login[1]:
cData, salt = encrypt(data, self.login[1].encode())
data = salt + cData
return data
return data
def decryptData(self, data):
if self.login[1]:
salt = data[:16]
cData = data[16:]
data = decrypt(cData, salt, self.login[1].encode())
return data
return data
def gotData(self, data, metaData):
pass
def lostConnection(self):
pass
def madeConnection(self):
pass
def loggedIn(self):
pass
def downloadStarted(self):
pass
def downloadStopped(self):
pass
def getDownloadProgress(self):
if not self.writer.is_closing():
if self.reader:
return len(self.reader._buffer), self._next_message_length
# <current buffer length>, <target buffer length>
return 0
def _buffer_monitor(self, reader):
self.downloading = False
while self.connected and not self.writer.is_closing():
if (
len(reader._buffer) >= self.download_indication_size
and not self.downloading
):
self.downloading = True
Thread(target=self.downloadStarted).start()
if not reader._buffer and self.downloading:
self.downloading = False
self._next_message_length = 0
Thread(target=self.downloadStopped).start()
time.sleep(self.buffer_update_interval)
async def getData(self, reader, writer):
data = b""
try:
data = await reader.readuntil(self.sepChar)
except asyncio.LimitOverrunError:
print(
"ERROR: Buffer limit too small for incoming data ("
" asyncio.LimitOverrunError )"
)
except asyncio.exceptions.IncompleteReadError:
print("asyncio.exceptions.IncompleteReadError")
except Exception as e:
print("Error retrieving data")
raise e
return data.rstrip(self.sepChar)
async def _got_msg_length(self, data):
if not data[7:].isalnum():
return
self._next_message_length = int(data[7:])
if self._next_message_length < self.default_buffer_limit:
self.reader._limit = self._next_message_length
async def _login_accepted(self):
self.verifiedUser = True
if self.multithreading:
Thread(target=self.loggedIn).start()
else:
self.loggedIn()
async def send_login_info(self):
if self.login[0] and self.login[1]:
username = self.login[0]
password = self.login[1]
username = make_bytes(username)
password = make_bytes(password)
self.sendRaw(b"LOGIN:" + username + b"|" + password)
async def gotRawData(self, data):
if isinstance(data, bytes):
data = data.decode()
if data.startswith("msgLen=") and len(data) > 7:
await self._got_msg_length(data)
elif data == "login required":
await self.send_login_info()
elif data == "login accepted":
await self._login_accepted()
elif data == "login failed":
self._login_failed = True
elif data == "disconnect":
self._got_disconnect = True
async def logout(self):
self.sendRaw(b"logout")
async def _process_data(self, data):
if self.login[1] and self.verifiedUser and self._usr_enc:
data = self.decryptData(data)
if data:
data, metaData, isRaw = dissectData(data)
if isRaw:
await self.gotRawData(data)
else:
if self.multithreading:
Thread(target=self.gotData, args=[self, data, metaData]).start()
else:
self.gotData(self, data, metaData)
async def _handle_host(self):
if self._enable_buffer_monitor:
Thread(target=self._buffer_monitor, args=[self.reader]).start()
if self.multithreading:
Thread(target=self.madeConnection).start()
else:
self.madeConnection()
while self.connected and self.reader and not self.writer.is_closing():
data = await self.getData(self.reader, self.writer)
if not data:
self.connected = False
break
await self._process_data(data)
self.connected = False
return self.lostConnection
async def _handle_self(self):
while self.connected:
await asyncio.sleep(0.2)
if not self.connected and self.reader:
self.reader.feed_data(self.sepChar)
async def connect(
self,
hostAddr,
hostPort,
login=(None, None),
useSSL=False,
sslCert=None,
buffer_limit=65536,
):
self.login = login
self._got_disconnect = False
self._login_failed = False
try:
ssl_context = None
if useSSL and sslCert:
if os.path.exists(sslCert):
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_context.load_verify_locations(sslCert)
if ssl_context:
self.reader, self.writer = await asyncio.open_connection(
hostAddr, hostPort, ssl=ssl_context, limit=buffer_limit
)
else:
self.reader, self.writer = await asyncio.open_connection(
hostAddr, hostPort, limit=buffer_limit
)
self.connected = True
self.connection_updated = time.time()
self.loop = asyncio.get_running_loop()
future = asyncio.run_coroutine_threadsafe(self._handle_self(), self.loop)
result = self.loop.call_soon_threadsafe(await self._handle_host())
except Exception as e:
print("Error with connection")
self.connected = False
self.connection_updated = time.time()
raise e
self.connected = False
self.connection_updated = time.time()
async def _send_data(self, data, metaData=None):
if not self.connected:
print("ERROR: Event loop not connected. Unable to send data")
return None
data = prepData(data, metaData=metaData)
if self.login[1] and self.verifiedUser and self._usr_enc:
data = self.encryptData(data)
data = data + self.sepChar
await self.send_raw("msgLen={}".format(str(len(data))))
self.writer.write(data)
await self.writer.drain()
def sendData(self, data, metaData=None):
if self.loop:
try:
asyncio.run_coroutine_threadsafe(
self._send_data(data, metaData=metaData), self.loop
)
except Exception as e:
print("Error sending data")
self.disconnect()
else:
self.disconnect()
async def send_raw(self, data):
if not self.connected:
print("ERROR: Event loop not connected. Unable to send data")
return None
data = make_bytes(data)
if self.login[1] and self.verifiedUser and self._usr_enc:
data = self.encryptData(data)
data = data + self.sepChar
self.writer.write(data)
await self.writer.drain()
def sendRaw(self, data):
if self.loop:
try:
asyncio.run_coroutine_threadsafe(self.send_raw(data), self.loop)
except Exception as e:
print("Error sending data")
self.disconnect()
raise e
else:
self.disconnect()
def waitForConnection(self, timeout=None):
startTime = time.time()
while self.connection_updated < startTime:
if timeout:
if time.time() >= startTime + float(timeout):
break
return self.connected
def waitForLogin(self, timeout=None):
startTime = time.time()
while (
not self.verifiedUser
and not self._got_disconnect
and not self._login_failed
and self.connected
):
try:
if timeout:
if time.time() >= startTime + float(timeout):
break
except Exception as e:
print("Error waiting for login: {}".format(e))
self.disconnect()
return self.verifiedUser
def disconnect(self):
if self.connected:
self.connected = False
if self.writer:
try:
self.writer.close()
except Exception as e:
print("Error closing stream: {}".format(e))
|
Alvaro
|
/Alvaro-1.1.3.tar.gz/Alvaro-1.1.3/alvaro/alvaro.py
|
alvaro.py
|
# Aly_facts
## What is Aly_facts?
Aly_facts provides facts about a user on discord named entity_night aka aly aka Alyssa. This module was made for private usage.
## Installation
```shell
pip install aly_facts
```
## Guide
### Syntax
Here's the basic syntax to follow when using the module
```py
import Aly_facts as aly
result = aly.function_name()
print(result)
```
### Example
Here's an example code snippet to show the usage
```py
import Aly_facts as aly
result = aly.get_facts()
print(result)
```
### Functions
| Function | Description |
|-------------------|----------------------------------------|
| get_facts() | Returns random facts about Aly
| get_all_facts() | Returns every fact about Aly as a list |
| get_aly_pics() | Returns random Aly pictures |
| version() | Returns the package version |
## Want more functions or something changed?
Make a pull request to the [GitHub repo](https://github.com/NotFaizen/Aly_facts) or join the [discord](https://discord.gg/kqmyWNtbaY), make a ticket and ping me aka NotFaizen
## Shameless plug
### My socials
Instagram :: [notfaizen](https://www.instagram.com/notfaizen/)
Twitter :: [TheRealFaizen](https://twitter.com/TheRealFaizen)
Reddit :: [NotFaizen](https://www.reddit.com/user/NotFaizen)
GitHub :: [NotFaizen](https://github.com/NotFaizen)
Discord :: [NotFaizen](https://discord.com/users/672791500995690517)
### Discord servers you should join
Basement Developers - [Click here](https://discord.gg/mECmB39fyE)
Basement Bots Lab - [Click here](https://discord.gg/RPmvdnR88w)
Pop Cat Community - [Click here](https://dsc.gg/popcatcom)
CraftBlur Official - [Click here](https://discord.gg/kqmyWNtbaY)
|
Aly-facts
|
/Aly_facts-1.0.tar.gz/Aly_facts-1.0/README.md
|
README.md
|
# AlzScPred :- A Tool for Identification of Alzheimer's from Single Cell Genome using Deep Learning
A computational approach tool to predict Alzheimer's affected patients from their single cell RNA seq data.
## Introduction
Alzheimer's Disease is progressing as the most common cause of neurological disorder worldwide.
This tool aims to use Artifical Neural Network (Deep Learning) model to classify Normal Control(NC) patients and Alzheimer’s Disease(AD) patients from
their single cell RNA seq data. The tool takes 10x single cell genomics data as input and predicts whether the patient is diseased or healthy with the help of highly trained model.
An excellent feature selection method called mRMR (Minimum Redundancy Maximum Relevance) was used to find out top 100 features for classification.
Followed by Incremental Feature Selection (IFS) which led to identification of 35 conserved genes which act as promising biomarkers
in classification and prediction of Normal and Diseased patients.
Please read/cite the content about the AlzScPredict for complete information including algorithm behind the approach.
## Installation
Install my-project with pip
```bash
pip install AlzScPred
```
You if previously installed please update the python package to the latest version using the command below
```bash
pip3 install --upgrade AlzScPred
```
## Usage/Examples
After installation of the AlzScPredict package in your python enviornment. Import the library using the below code.
```python
import AlzScPred
```
The AlzScPredict comes with 1 inbuilt module .
- Prediction Module
Please import the modules in your python enviornment before executing the code below.
```python
from AlzScPred import Validation
```
After importing all the important pre requisites. You can follow the demo below for your case.
```python
import pandas as pd
df = pd.read_csv("Your file path here")
# Prediction:- Execute the code below to get the output. It takes 1 argument i.e the dataframe with features in columns and rows as cells.
Validation.predict_patient(df)
```
Note: Please make sure that your single cell data file is prepared in the above example.csv format. And the file should also contain the read count data for the selected 35 genes in the above 35_genes.txt file.
## Output
The output of the code can be viewed in your python output terminal. Example can be seen in the output.png file.
## Authors
- [Aman Srivastava](https://github.com/AmanSriv97).
- Akanksha Jarwal.
- Anjali Dhall.
- Sumeet Patiyal.
- [Prof. G.P.S. Raghava](https://webs.iiitd.edu.in/raghava/)
|
AlzScPred
|
/AlzScPred-0.0.3.tar.gz/AlzScPred-0.0.3/README.md
|
README.md
|
import sys
DEFAULT_VERSION = "0.6c9"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
else:
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
except pkg_resources.DistributionNotFound:
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/ez_setup.py
|
ez_setup.py
|
For installation instructions and other information visit:
http://code.google.com/p/amfast/
Copyright (c) 2009 Dave Thompson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/README.txt
|
README.txt
|
from amfast.encode import encode, encode_packet
from amfast.context import EncoderContext
from amfast.class_def import ClassDefMapper
class Encoder(object):
"""A wrapper class for convenient access to amfast.encode.encode.
Encoder
========
* amf3 - bool - True to encode as AMF3.
* use_collections - bool - True to encode lists and tuples as ArrayCollections.
* use_proxies - bool - True to encode dicts as ObjectProxies.
* use_references - bool - True to encode multiply occuring objects by reference.
* use_legacy_xml - bool - True to XML as XMLDocument instead of e4x.
* include_private - bool - True to encode attributes starting with '_'.
* class_def_mapper - amfast.class_def.ClassDefMapper - The object that retrieves ClassDef objects.
* buffer - file-like-object - Output buffer. Set to None to output to a string.
"""
def __init__(self, amf3=False, use_collections=False, use_proxies=False,
use_references=True, use_legacy_xml=False, include_private=False,
class_def_mapper=None, buffer=None):
self.amf3 = amf3
self.use_collections = use_collections
self.use_proxies = use_proxies
self.use_references = use_references
self.use_legacy_xml = use_legacy_xml
self.include_private = include_private
if class_def_mapper is None:
class_def_mapper = ClassDefMapper()
self.class_def_mapper = class_def_mapper
self.buffer = buffer
def _getContext(self, amf3=None):
if amf3 is None:
amf3 = self.amf3
kwargs = {
'amf3': amf3,
'use_collections': self.use_collections,
'use_proxies': self.use_proxies,
'use_references': self.use_references,
'use_legacy_xml': self.use_legacy_xml,
'include_private': self.include_private,
'class_def_mapper': self.class_def_mapper
}
if self.buffer is not None:
kwargs['buffer'] = self.buffer
return EncoderContext(**kwargs);
def encode(self, val, amf3=None):
"""Encode a value to AMF."""
return encode(val, self._getContext(amf3))
def encode_packet(self, val, amf3=None):
"""Encode an AMF packet."""
return encode_packet(val, self._getContext(amf3))
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/encoder.py
|
encoder.py
|
"""
Copyright (c) 2001-2010
Allen Short
Andy Gayton
Andrew Bennetts
Antoine Pitrou
Apple Computer, Inc.
Benjamin Bruheim
Bob Ippolito
Canonical Limited
Christopher Armstrong
David Reid
Donovan Preston
Eric Mangold
Eyal Lotem
Itamar Shtull-Trauring
James Knight
Jason A. Mobarak
Jean-Paul Calderone
Jessica McKellar
Jonathan Jacobs
Jonathan Lange
Jonathan D. Simms
Jürgen Hermann
Kevin Horn
Kevin Turner
Mary Gardiner
Matthew Lefkowitz
Massachusetts Institute of Technology
Moshe Zadka
Paul Swartz
Pavel Pergamenshchik
Ralph Meijer
Sean Riley
Software Freedom Conservancy
Travis B. Hartwell
Thijs Triemstra
Thomas Herve
Timothy Allen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys, os
class _inf(object):
"""
An object that is bigger than all other objects.
"""
def __cmp__(self, other):
"""
@param other: Another object.
@type other: any
@return: 0 if other is inf, 1 otherwise.
@rtype: C{int}
"""
if other is _inf:
return 0
return 1
_inf = _inf()
class IncomparableVersions(TypeError):
"""
Two versions could not be compared.
"""
class Version(object):
"""
An object that represents a three-part version number.
If running from an svn checkout, include the revision number in
the version string.
"""
def __init__(self, package, major, minor, micro, prerelease=None):
"""
@param package: Name of the package that this is a version of.
@type package: C{str}
@param major: The major version number.
@type major: C{int}
@param minor: The minor version number.
@type minor: C{int}
@param micro: The micro version number.
@type micro: C{int}
@param prerelease: The prerelease number.
@type prerelease: C{int}
"""
self.package = package
self.major = major
self.minor = minor
self.micro = micro
self.prerelease = prerelease
def short(self):
"""
Return a string in canonical short version format,
<major>.<minor>.<micro>[+rSVNVer].
"""
s = self.base()
svnver = self._getSVNVersion()
if svnver:
s += '+r' + str(svnver)
return s
def base(self):
"""
Like L{short}, but without the +rSVNVer.
"""
if self.prerelease is None:
pre = ""
else:
pre = "pre%s" % (self.prerelease,)
return '%d.%d.%d%s' % (self.major,
self.minor,
self.micro,
pre)
def __repr__(self):
svnver = self._formatSVNVersion()
if svnver:
svnver = ' #' + svnver
if self.prerelease is None:
prerelease = ""
else:
prerelease = ", prerelease=%r" % (self.prerelease,)
return '%s(%r, %d, %d, %d%s)%s' % (
self.__class__.__name__,
self.package,
self.major,
self.minor,
self.micro,
prerelease,
svnver)
def __str__(self):
return '[%s, version %s]' % (
self.package,
self.short())
def __cmp__(self, other):
"""
Compare two versions, considering major versions, minor versions, micro
versions, then prereleases.
A version with a prerelease is always less than a version without a
prerelease. If both versions have prereleases, they will be included in
the comparison.
@param other: Another version.
@type other: L{Version}
@return: NotImplemented when the other object is not a Version, or one
of -1, 0, or 1.
@raise IncomparableVersions: when the package names of the versions
differ.
"""
if not isinstance(other, self.__class__):
return NotImplemented
if self.package != other.package:
raise IncomparableVersions("%r != %r"
% (self.package, other.package))
if self.prerelease is None:
prerelease = _inf
else:
prerelease = self.prerelease
if other.prerelease is None:
otherpre = _inf
else:
otherpre = other.prerelease
x = cmp((self.major,
self.minor,
self.micro,
prerelease),
(other.major,
other.minor,
other.micro,
otherpre))
return x
def _parseSVNEntries_4(self, entriesFile):
"""
Given a readable file object which represents a .svn/entries file in
format version 4, return the revision as a string. We do this by
reading first XML element in the document that has a 'revision'
attribute.
"""
from xml.dom.minidom import parse
doc = parse(entriesFile).documentElement
for node in doc.childNodes:
if hasattr(node, 'getAttribute'):
rev = node.getAttribute('revision')
if rev is not None:
return rev.encode('ascii')
def _parseSVNEntries_8(self, entriesFile):
"""
Given a readable file object which represents a .svn/entries file in
format version 8, return the revision as a string.
"""
entriesFile.readline()
entriesFile.readline()
entriesFile.readline()
return entriesFile.readline().strip()
# Add handlers for version 9 and 10 formats, which are the same as
# version 8 as far as revision information is concerned.
_parseSVNEntries_9 = _parseSVNEntries_8
_parseSVNEntriesTenPlus = _parseSVNEntries_8
def _getSVNVersion(self):
"""
Figure out the SVN revision number based on the existance of
<package>/.svn/entries, and its contents. This requires discovering the
format version from the 'format' file and parsing the entries file
accordingly.
@return: None or string containing SVN Revision number.
"""
mod = sys.modules.get(self.package)
if mod:
svn = os.path.join(os.path.dirname(mod.__file__), '.svn')
if not os.path.exists(svn):
# It's not an svn working copy
return None
formatFile = os.path.join(svn, 'format')
if os.path.exists(formatFile):
# It looks like a less-than-version-10 working copy.
format = file(formatFile).read().strip()
parser = getattr(self, '_parseSVNEntries_' + format, None)
else:
# It looks like a version-10-or-greater working copy, which
# has version information in the entries file.
parser = self._parseSVNEntriesTenPlus
if parser is None:
return 'Unknown'
entriesFile = os.path.join(svn, 'entries')
entries = file(entriesFile)
try:
try:
return parser(entries)
finally:
entries.close()
except:
return 'Unknown'
def _formatSVNVersion(self):
ver = self._getSVNVersion()
if ver is None:
return ''
return ' (SVN r%s)' % (ver,)
def getVersionString(version):
"""
Get a friendly string for the given version object.
@param version: A L{Version} object.
@return: A string containing the package and short version number.
"""
result = '%s %s' % (version.package, version.short())
return result
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/version.py
|
version.py
|
import sys
import traceback
import calendar
from datetime import datetime
import logging
from amfast import version
version = version.Version('amfast', 0, 5, 3)
__version__ = version.short()
class AmFastError(Exception):
"""Base exception for this package."""
# To get around 2.6 deprecation warning
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
def epoch_from_date(date):
"""Returns epoch milliseconds."""
return long(calendar.timegm(date.timetuple()) * 1000)
def date_from_epoch(epoch_secs):
"""Returns datetime."""
return datetime.utcfromtimestamp(epoch_secs)
def format_byte_string(byte_string):
"""Get a human readable description of a byte string."""
bytes = []
for i, x in enumerate(byte_string):
val = ord(x)
char = ''
if val > 31 and val < 127:
char = "%s" % x
bytes.append("%d: %d-%02X-'%s'" % (i, val, val, char))
return ' '.join(bytes)
# --- setup module level logging --- #
class NullHandler(logging.Handler):
"""A logging handler that does nothing, so that end-users
do not receive 'No handlers could be found...' warning.
Yes, this is the way the Python docs recommend solving this problem.
"""
def emit(self, record):
pass
log_debug = False # True to log verbose debug strings.
log_raw = False # True to log raw AMF byte strings.
logged_attr = '_amfast_logged' # Add to exceptions to indicate that it has been logged.
logger = logging.getLogger('AmFast')
logger.addHandler(NullHandler())
logger.setLevel(logging.DEBUG)
month_names = [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def get_log_timestamp():
dt = datetime.now()
return '[%s/%s/%s:%s:%s:%s]' % (dt.day, month_names[dt.month - 1],
dt.year, dt.hour, dt.minute, dt.second)
def log_exc(e):
"""Format an exception for logging."""
if hasattr(e, logged_attr):
return
else:
setattr(e, logged_attr, True)
error_type, error_value, trbk = sys.exc_info()
tb_list = traceback.format_tb(trbk)
msg = [get_log_timestamp() + " EXCEPTION"]
msg.append("# ---- EXCEPTION DESCRIPTION BEGIN ---- #")
msg.append("# ---- Type ---- #\n%s\n# ---- Detail ---- #\n%s" % \
(error_type.__name__, error_value))
msg.append("# ---- Traceback ---- #")
msg.append("-\n".join(tb_list))
msg.append("# ---- EXCEPTION DESCRIPTION END ---- #")
logger.error("\n".join(msg))
# --- Setup threading implementation --- #
try:
import threading
mutex_cls = threading.RLock
use_dummy_threading = False
except ImportError:
import dummy_threading
mutex_cls = dummy_threading.RLock
use_dummy_threading = True
if log_debug:
logger.debug("AmFast is using dummy_threading module.")
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/__init__.py
|
__init__.py
|
import os
class CodeGenerator(object):
"""Generates Actionscript source code from class defs.
attributes:
============
* indent - string, indention string to use. Default = '\t'
"""
def __init__(self, indent='\t'):
self.indent = indent
def generateFilesFromMapper(self, class_mapper, dir='.', use_accessors=False,
packaged=False, constructor=False, bindable=False,
extends=None, implements=None):
"""Generates an Actionscript class files.
arguments:
===========
* class_mapper - amfast.class_def.ClassDefMapper, ClassDefMapper being used to generate the action script classes.
* dir - string, directory to store generated files in. Default = '.'
* use_accessors - bool, If True create getter and setter methods for attributes. Default = False
* packaged - bool, If True wrap created class in a package (for AS3). Default = False
* constructor - bool, If True create a constructor method. Default = False
* bindable - bool, If True, add the 'Bindable' metadata tag. Default = False
* extends - string, name of the class that this class inherits from. Default = None
* implements - list or tuple, a list of interface names that this class implments. Default = None
"""
for class_def in class_mapper:
if class_def._built_in is True:
continue
self.generateClassFile(class_def, dir, use_accessors,
packaged, constructor, bindable, extends, implements)
def generateClassFile(self, class_def, dir='.', use_accessors=False,
packaged=False, constructor=False, bindable=False,
extends=None, implements=None):
"""Generates an Actionscript class source file.
arguments:
===========
* class_def - amfast.class_def.ClassDef, ClassDef being used to generate the action script class.
* dir - string, directory to store generated files in. Default = '.'
* use_accessors - bool, If True create getter and setter methods for attributes. Default = False
* packaged - bool, If True wrap created class in a package (for AS3). Default = False
* constructor - bool, If True create a constructor method. Default = False
* bindable - bool, If True, add the 'Bindable' metadata tag. Default = False
* extends - string, name of the class that this class inherits from. Default = None
* implements - list or tuple, a list of interface names that this class implments. Default = None
"""
package = class_def.alias.split('.')
class_ = package.pop()
# Create all output directories
path = dir
for i in range(0, len(package)):
path = os.path.join(path, package[i])
if os.path.exists(path) is not True:
os.mkdir(path)
out = open(os.path.join(path, '%s.as' % class_), 'w')
out.write(self.generateClassStr(class_def, use_accessors, packaged,
constructor, bindable, extends, implements))
out.close()
def generateClassStr(self, class_def, use_accessors=False,
packaged=False, constructor=False, bindable=False,
extends=None, implements=None):
"""Generates an Actionscript class source string.
arguments:
===========
* class_def - amfast.class_def.ClassDef, ClassDef being used to generate the action script class.
* use_accessors - bool, If True create getter and setter methods for attributes. Default = False
* packaged - bool, If True wrap created class in a package (for AS3). Default = False
* constructor - bool, If True create a constructor method. Default = False
* bindable - bool, If True, add the 'Bindable' metadata tag. Default = False
* extends - string, name of the class that this class inherits from. Default = None
* implements - list or tuple, a list of interface names that this class implments. Default = None
"""
class_str = []
package = class_def.alias.split('.')
class_ = package.pop()
package = '.'.join(package)
indent = ''
if packaged is True:
class_str.append('package %s' % package)
class_str.append('{')
indent = self.indent
if bindable is True:
class_str.append(indent + '[Bindable]')
class_str.append(indent + "[RemoteClass(alias='%s')]" % class_def.alias)
class_def_str = indent
if packaged is True:
class_def_str += 'public '
if hasattr(class_def, "DYNAMIC_CLASS_DEF") is True:
class_def_str += 'dynamic '
if hasattr(class_def, "EXTERNIZEABLE_CLASS_DEF") is True:
imp = ['IExternalizable']
if implements is not None:
imp.extend(implements)
implements = imp
class_def_str += 'class %s' % class_
if extends is not None:
class_def_str += ' extends %s' % extends
if implements is not None and len(implements) > 0:
class_def_str += ' implements %s' % ', '.join(implements)
class_str.append(class_def_str);
class_str.append(indent + '{')
for attr in class_def.static_attrs:
if use_accessors is True:
class_str.append(self.generateAccessor(attr, indent + self.indent))
else:
class_str.append(indent + self.indent + 'public var %s:Object;' % attr)
if constructor is True:
class_str.append('\n' + indent + self.indent + 'public function %s():void' % class_)
class_str.append(indent + self.indent + '{')
if extends is not None:
class_str.append(indent + self.indent + self.indent + 'super();')
class_str.append(indent + self.indent + '}')
class_str.append(indent + '}')
if packaged is True:
class_str.append('}')
return '\n'.join(class_str)
def generateAccessor(self, attr, indent=''):
"""Generates an Actionscript getter and setter source string.
arguments:
===========
* attr - string, the attribute name to generate code for.
* indent - string, indent to add to generated code. Default = ''.
"""
attr_str = []
attr_str.append('\n' + indent + "private var _%s:Object;" % attr)
attr_str.append(indent + "public function get %s():Object" % attr)
attr_str.append(indent + "{")
attr_str.append(indent + self.indent + "return _%s;" % attr)
attr_str.append(indent + "}")
attr_str.append('\n' + indent + "public function set %s(value:Object):void" % attr)
attr_str.append(indent + "{")
attr_str.append(indent + self.indent + " _%s = value;" % attr)
attr_str.append(indent + "}")
return '\n'.join(attr_str);
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/class_def/code_generator.py
|
code_generator.py
|
from amfast import class_def
from amfast.class_def.as_types import AsNoProxy
# Import from different places depending on which
# version of SA is being used.
try:
from sqlalchemy.orm import class_mapper, object_mapper
except ImportError:
from sqlalchemy.orm.util import class_mapper, object_mapper
# Exception is different depending on which
# version of SA is being used.
UnmappedInstanceError = None
try:
class_mapper(dict)
except Exception, e:
UnmappedInstanceError = e.__class__
class SaClassDef(class_def.ClassDef):
"""Defines how objects with a class mapped by SQLAlchemy should be serialized and de-serialized.
Mapped attributes are considered static.
Dynamic attributes are ignored.
The class must be mapped with SQLAlchemy BEFORE calling ClassDefMapper.map_class().
"""
KEY_ATTR = 'sa_key' # sa instance key
LAZY_ATTR = 'sa_lazy' # list of lazy attribute names
# Set to True to always encode
# KEY_ATTR and LAZY_ATTR as Array objects.
no_proxy_sa_attrs = True
def __init__(self, class_, alias=None, static_attrs=None, amf3=None,
encode_types=None, decode_types=None):
"""Static attributes are inferred from the class mapper,
so static_attrs needs to be passed only if there are additional
un-mapped attributes that need to be considered static."""
try:
self.mapper = class_mapper(class_)
self.mapper.compile()
except UnmappedInstanceError:
raise class_def.ClassDefError("Class does not have a SA mapper associated with it.")
if static_attrs is None:
static_attrs = ()
self.unmapped_attrs = static_attrs
self.mapped_attrs = []
for prop in self.mapper.iterate_properties:
self.mapped_attrs.append(prop.key)
# Check for duplicates
for attr in self.mapped_attrs:
if attr in self.unmapped_attrs:
raise class_def.ClassDefError("Mapped attributes cannot be listed in the static_attrs argument.")
combined_attrs = [self.KEY_ATTR, self.LAZY_ATTR]
combined_attrs.extend(self.mapped_attrs)
combined_attrs.extend(self.unmapped_attrs)
class_def.ClassDef.__init__(self, class_, alias=alias,
static_attrs=combined_attrs, amf3=amf3, encode_types=encode_types,
decode_types=decode_types)
def getStaticAttrVals(self, obj):
# Set key and lazy
lazy_attrs = []
if self.__class__.no_proxy_sa_attrs is True:
vals = [AsNoProxy(self.mapper.primary_key_from_instance(obj)), AsNoProxy(lazy_attrs)]
else:
vals = [self.mapper.primary_key_from_instance(obj), lazy_attrs]
# Set mapped values
attr_count = len(self.mapped_attrs)
for i in xrange(0, attr_count):
attr = self.mapped_attrs[i]
# Look at __dict__ directly,
# otherwise SA will touch the attr.
if attr in obj.__dict__:
vals.append(getattr(obj, attr))
else:
# This attr is lazy
vals.append(None)
lazy_attrs.append(attr)
# Set un-mapped values
vals.extend([getattr(obj, attr, None) for attr in self.unmapped_attrs])
return vals
def getInstance(self):
return self.mapper.class_manager.new_instance()
def applyAttrVals(self, obj, vals):
# Delete lazy-loaded attrs from vals
if self.LAZY_ATTR in vals:
lazy_attrs = vals[self.LAZY_ATTR]
if lazy_attrs is not None:
for lazy_attr in lazy_attrs:
if lazy_attr in vals:
del vals[lazy_attr]
del vals[self.LAZY_ATTR]
class_def.ClassDef.applyAttrVals(self, obj, vals)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/class_def/sa_class_def.py
|
sa_class_def.py
|
import threading
from amfast import AmFastError
class ClassDefError(AmFastError):
"""ClassDef related errors."""
pass
class ClassDef(object):
"""Defines how objects of a given class are serialized and de-serialized.
This class can be sub-classed to provide custom serialization.
attributes
===========
* class_ - class, the class object mapped to this definition
* alias - string, the AMF alias name of the mapped class
* static_attrs - tuple or list, a tuple of static attribute names,
all values must be strings or unicode.
* amf3 - bool, if True, this object will be encoded in AMF3.
* encode_types - dict, keys = attribute names, values = callables.
Callables must accept a single parameter
(the object being encoded) and return a new object.
* decode_types - dict, keys = attribute names, values = callables.
Callables must accept a single parameter
(the object being decoded) and return a new object.
"""
CLASS_DEF = True
def __init__(self, class_, alias=None, static_attrs=None,
amf3=None, encode_types=None, decode_types=None, _built_in=False):
"""arguments
=============
* class_ - class, the class being mapped.
* alias - string, specifies the amf class alias. Default = module.class
* static_attrs - tuple or list, a tuple of static attribute strings. Default = empty tuple
* amf3 - bool, if True, this object will be encoded in AMF3. Default = True
* encode_types - dict, keys = attribute names, values = callables. Default = None
* decode_types - dict, keys = attribute names, values = callables. Default = None
"""
self.class_ = class_
self._built_in = _built_in
if alias is None:
if hasattr(class_, ALIAS):
alias = getattr(class_, ALIAS)
else:
alias = '.'.join((class_.__module__, class_.__name__))
self.alias = alias
if static_attrs is None:
if hasattr(class_, STATIC_ATTRS):
static_attrs = self.static_attrs = getattr(class_, STATIC_ATTRS)
else:
static_attrs = ()
self.static_attrs = static_attrs
if amf3 is None:
if hasattr(class_, AMF3):
amf3 = getattr(class_, AMF3)
else:
amf3 = True
self.amf3 = amf3
self.encode_types = encode_types
self.decode_types = decode_types
def getStaticAttrVals(self, obj):
"""Returns a list of values of attributes defined in self.static_attrs
If this method is overridden to provide custom behavior, please note:
Returned values MUST BE IN THE SAME ORDER AS THEY APPEAR IN self.static_attrs.
arguments
==========
* obj - object, the object to get attribute values from.
"""
return [getattr(obj, attr, None) for attr in self.static_attrs]
def getInstance(self):
"""Returns an instance of the mapped class to be used
when an object of this type is deserialized.
"""
return self.class_.__new__(self.class_)
def applyAttrVals(self, obj, vals):
"""Set decoded attribute values on the object.
arguments
==========
* obj - object, the object to set the attribute values on.
* vals - dict, keys == attribute name, values == attribute values.
"""
[setattr(obj, key, val) for key, val in vals.iteritems()]
class DynamicClassDef(ClassDef):
"""A ClassDef with dynamic attributes."""
DYNAMIC_CLASS_DEF = True
def __init__(self, class_, alias=None, static_attrs=None, amf3=True,
encode_types=None, decode_types=None, include_private=None, _built_in=False):
ClassDef.__init__(self, class_, alias, static_attrs, amf3,
encode_types, decode_types, _built_in)
self.include_private = include_private
def getDynamicAttrVals(self, obj, include_private=False):
"""Returns a dict where keys are attribute names and values are attribute values.
arguments
==========
obj - object, the object to get attributes for.
include_private - bool, if False do not include attributes with
names starting with '_'. Default = False.
"""
if self.include_private is None:
ip = include_private
else:
ip = self.include_private
return get_dynamic_attr_vals(obj, self.static_attrs, ip);
class ExternClassDef(ClassDef):
"""A ClassDef where the byte string encoding/decoding is customized.
The Actionscript version of the class must implement IExternalizeable.
"""
EXTERNALIZABLE_CLASS_DEF = True
def __init__(self, class_, alias=None, static_attrs=None, _built_in=False):
ClassDef.__init__(self, class_, alias, static_attrs, amf3=True,
_built_in=_built_in)
def writeExternal(self, obj, context):
"""
This method must be overridden in a sub-class.
arguments
==========
* obj - object, The object that is being encoded.
* context - amfast.decoder.EncoderContext, holds encoding related properties.
"""
raise ClassDefError("This method must be implemented by a sub-class.")
def readExternal(self, obj, context):
"""
This method must be overridden in a sub-class.
arguments
==========
* obj - object, The object that the byte string is being applied to.
* context - amfast.decoder.DecoderContext, holds decoding related properties.
"""
raise ClassDefError("This method must be implemented by a sub-class.")
class _ProxyClassDef(ExternClassDef):
"""A special class used internally to encode/decode Proxied objects."""
PROXY_CLASS_DEF = True
PROXY_ALIAS = 'proxy'
class _ProxyObject(object):
"""Empty class used for mapping."""
pass
def __init__(self):
ExternClassDef.__init__(self, self._ProxyObject, self.PROXY_ALIAS,
None, _built_in=True)
class _ArrayCollectionClassDef(_ProxyClassDef):
"""A special ClassDef used internally to encode/decode an ArrayCollection."""
ARRAY_COLLECTION_CLASS_DEF = True
PROXY_ALIAS = 'flex.messaging.io.ArrayCollection'
def __init__(self):
_ProxyClassDef.__init__(self)
class _ObjectProxyClassDef(_ProxyClassDef):
"""A special ClassDef used internally to encode/decode an ObjectProxy."""
OBJECT_PROXY_CLASS_DEF = True
PROXY_ALIAS = 'flex.messaging.io.ObjectProxy'
def __init__(self):
_ProxyClassDef.__init__(self)
class ClassDefMapper(object):
"""Map classes to ClassDefs, retrieve class_defs by class or alias name."""
def __init__(self):
"""
arguments
==========
* class_def_attr - string, an attribute with this name will be added
mapped classes. Default = '_amf_alias'
"""
self._lock = threading.RLock()
self._mapped_classes = {}
self._mapped_aliases = {}
self._mapBuiltIns()
def __iter__(self):
return self._mapped_aliases.itervalues()
def _mapBuiltIns(self):
"""Map built-in ClassDefs for default behavior."""
from as_types import AsError
from amfast.remoting import flex_messages as messaging
# Proxy objects
self.mapClass(_ArrayCollectionClassDef())
self.mapClass(_ObjectProxyClassDef())
# Exceptions
self.mapClass(ClassDef(AsError, _built_in=True))
self.mapClass(ClassDef(messaging.FaultError, _built_in=True))
# Flex remoting messages
self.mapClass(ClassDef(messaging.RemotingMessage, _built_in=True))
self.mapClass(messaging.AsyncSmallMsgDef(messaging.AsyncMessage,
alias="DSA", _built_in=True))
self.mapClass(ClassDef(messaging.AsyncMessage, _built_in=True))
self.mapClass(messaging.CommandSmallMsgDef(messaging.CommandMessage,
alias="DSC", _built_in=True))
self.mapClass(ClassDef(messaging.CommandMessage, _built_in=True))
self.mapClass(ClassDef(messaging.AcknowledgeMessage, _built_in=True))
self.mapClass(ClassDef(messaging.ErrorMessage, _built_in=True))
def mapClass(self, class_def):
"""Map a class_def implementation, so that it can be retrieved based on class attributes.
arguments
==========
* class_def - ClassDef, ClassDef being mapped.
"""
if not hasattr(class_def, 'CLASS_DEF'):
raise ClassDefError("class_def argument must be a ClassDef object.")
self._lock.acquire()
try:
self._mapped_classes[class_def.class_] = class_def
self._mapped_aliases[class_def.alias] = class_def
finally:
self._lock.release()
def getClassDefByClass(self, class_):
"""Get a ClassDef.
Returns None if ClassDef is not found.
arguments
==========
* class_ - class, the class to find a ClassDef for.
"""
return self._mapped_classes.get(class_, None)
def getClassDefByAlias(self, alias):
"""Get a ClassDef.
Returns None in not ClassDef is found.
arguments
==========
* alias - string, the alias to find a ClassDef for.
"""
return self._mapped_aliases.get(alias, None)
def unmapClass(self, class_):
"""Unmap a class definition.
arguments
==========
* class_ - class, the class to remove a ClassDef for.
"""
self._lock.acquire()
try:
for alias, klass in self._mapped_aliases.iteritems():
if class_ == klass:
del self._mapped_aliases[alias]
class_id = id(class_)
if class_id in self._mapped_classes:
del self._mapped_classes[class_id]
finally:
self._lock.release()
# ---- module attributes ---- #
def get_dynamic_attr_vals(obj, ignore_attrs=None, include_private=False):
"""Returns a dict of attribute values to encode.
keys = attribute names, values = attribute values.
argmuents
==========
* obj - object, object to get dynamic attribute values from.
* ignore_attrs - list or tuple of attributes to ignore. Default = empty tuple.
* include_private - bool, if False do not include attributes that start with '_'.
Default = False.
"""
vals = {}
if hasattr(obj, '__dict__'):
for attr, val in obj.__dict__.iteritems():
if ignore_attrs is not None:
if attr in ignore_attrs:
continue
if (include_private is False) and (attr.startswith('_')):
continue
vals[attr] = val
return vals
# These properties can be set on a class
# to map attributes within the class.
ALIAS = '_AMFAST_ALIAS'
STATIC_ATTRS = '_AMFAST_STATIC_ATTRS'
AMF3 = '_AMFAST_AMF3'
def assign_attrs(class_, alias=None, static_attrs=None, amf3=None):
"""
Use to map ClassDef attributes to a class. Useful if you want to keep
ClassDef configuration with the class being mapped, instead of at
the point where the ClassDef is created.
If you assign ClassDef attributes with this method, you can
call ClassDef(class_) to create a ClassDef, and the assigned
attributes will be applied to the new ClassDef.
Arguments provided to the ClassDef() will override attributes
that were assigned with this function.
arguments
==========
* class_ - class, the class to assign attributes to.
* alias - string, the amf alias name of the mapped class
* static_attrs - tuple, a tuple of static attribute names, all values must be strings or unicode
* amf3 - bool, if True, this object will be encoded in AMF3.
"""
if alias is not None:
setattr(class_, ALIAS, alias)
if static_attrs is not None:
setattr(class_, STATIC_ATTRS, static_attrs)
if amf3 is not None:
setattr(class_, AMF3, amf3)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/class_def/__init__.py
|
__init__.py
|
import time
import uuid
import amfast
from connection import Connection, ConnectionError
import flex_messages as messaging
class NotConnectedError(ConnectionError):
pass
class SessionAttrError(ConnectionError):
pass
class ConnectionManager(object):
"""Keeps track of all current connections.
This is an abstract base class and should be
implemented by a sub-class.
"""
def __init__(self, connection_class=Connection, connection_params=None):
self.connection_class = connection_class
if connection_params is None:
connection_params = {}
self.connection_params = connection_params
def generateId(self):
"""Generates a unique ID for a connection."""
return str(uuid.uuid4())
def getConnection(self, connection_id, touch=True):
"""Retrieve an existing connection.
arugments
==========
* connection_id - string, id of client to get connection for.
connection_id should be unique for each Flash client (flex_client_id).
* touch - boolean, If True set 'last_accessed' to now.
raises
=======
* NotConnectedError if connection doesn't exist.
"""
if connection_id is None:
raise NotConnectedError("Blank connection_id is not connected.")
connection = self.loadConnection(connection_id)
if touch is True:
self.touchConnection(connection)
else:
# Check for timeout
current_time = time.time() * 1000
if connection.last_active < (current_time - connection.timeout):
connection.delete()
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
return connection
def createConnection(self, channel, connection_id=None):
"""Returns a new connection object."""
if connection_id is None:
connection_id = self.generateId()
connection = self.connection_class(self, channel.name, connection_id, **self.connection_params)
self.initConnection(connection, channel)
return connection
def deleteConnection(self, connection):
"""Deletes a connection object."""
if connection.notify_func is not None:
# Call notify function,
# which should check for
# a disconnection.
connection.notify_func()
connection.unSetNotifyFunc()
connection.disconnect()
def setNotifyFunc(self, connection, func):
raise ConnectionError('Not implemented')
def unSetNotifyFunc(self, connection):
raise ConnectionError('Not implemented')
class MemoryConnectionManager(ConnectionManager):
"""Manages connections in memory."""
def __init__(self, connection_class=Connection, connection_params=None):
ConnectionManager.__init__(self, connection_class=connection_class,
connection_params=connection_params)
self._lock = amfast.mutex_cls()
self.reset()
def reset(self):
self._connections = {}
self._channels = {}
def getConnectionCount(self, channel_name):
try:
return self._channels[channel_name]
except KeyError:
return 0
def loadConnection(self, connection_id):
connection = self._connections.get(connection_id, None)
if connection is None:
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
return connection
def initConnection(self, connection, channel):
connection._session = {}
connection._connected = True
connection._last_active = time.time() * 1000
connection._last_polled = 0.0
connection._authenticated = False
connection._flex_user = None
self._connections[connection.id] = connection
self._lock.acquire()
try:
try:
self._channels[channel.name] += 1
except KeyError:
self._channels[channel.name] = 1
finally:
self._lock.release()
def iterConnectionIds(self):
return self._connections.keys().__iter__()
# --- proxies for connection properties --- #
def getConnected(self, connection):
return connection._connected
def getLastActive(self, connection):
return connection._last_active
def getLastPolled(self, connection):
return connection._last_polled
def getAuthenticated(self, connection):
return connection._authenticated
def getFlexUser(self, connection):
return connection._flex_user
def getNotifyFunc(self, connection):
if not hasattr(connection, '_notify_func_id'):
return None
else:
return connection._getNotifyFuncById(connection._notify_func_id)
# --- proxies for connection methods --- #
def connectConnection(self, connection):
connection._connected = True
def disconnectConnection(self, connection):
connection._connected = False
def deleteConnection(self, connection):
self._lock.acquire()
try:
if connection.id in self._connections:
del self._connections[connection.id]
if connection.channel_name in self._channels:
self._channels[connection.channel_name] -= 1
finally:
self._lock.release()
ConnectionManager.deleteConnection(self, connection)
def touchConnection(self, connection):
connection._last_active = time.time() * 1000
def touchPolled(self, connection):
connection._last_polled = time.time() * 1000
def authenticateConnection(self, connection, user):
connection._authenticated = True
connection._flex_user = user
def unAuthenticateConnection(self, connection):
connection._authenticated = False
connection._flex_user = None
def setNotifyFunc(self, connection, func):
connection._notify_func_id = connection._setNotifyFunc(func)
def unSetNotifyFunc(self, connection):
if hasattr(connection, '_notify_func_id'):
connection._delNotifyFunc(connection._notify_func_id)
del connection._notify_func_id
def getConnectionSessionAttr(self, connection, name):
try:
return connection._session[name]
except KeyError:
raise SessionAttrError("Attribute '%s' not found." % name)
def setConnectionSessionAttr(self, connection, name, val):
connection._session[name] = val
def delConnectionSessionAttr(self, connection, name):
try:
del connection._session[name]
except KeyError:
pass
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/connection_manager.py
|
connection_manager.py
|
import time
from twisted.internet import defer, task, reactor
from twisted.web import server
from twisted.web.resource import Resource
import amfast
from amfast.class_def.as_types import AsNoProxy
from channel import ChannelSet, HttpChannel
from endpoint import AmfEndpoint
import flex_messages as messaging
import connection_manager as cm
class TwistedChannelSet(ChannelSet):
"""A ChannelSet optimized for use with Twisted."""
def render_POST(self, request):
channel_name = request.path[1:]
channel = self.getChannel(channel_name)
return channel.render_POST(request)
def scheduleClean(self):
cleaner = task.LoopingCall(self.clean)
cleaner.start(self.clean_freq, False)
def notifyConnections(self, topic, sub_topic):
reactor.callLater(0, self._notifyConnections, topic, sub_topic)
def _notifyConnections(self, topic, sub_topic):
iter = self.subscription_manager.iterSubscribers(topic, sub_topic)
def _notify():
try:
connection_id = iter.next()
try:
connection = self.connection_manager.getConnection(connection_id, False)
except cm.NotConnectedError:
pass
else:
if connection.notify_func is not None:
reactor.callLater(0, connection.notify_func)
reactor.callLater(0, _notify)
except StopIteration:
pass
reactor.callLater(0, _notify)
def clean(self):
if amfast.log_debug is True:
amfast.logger.debug("Cleaning channel.")
current_time = time.time()
iter = self.connection_manager.iterConnectionIds()
def _clean():
try:
connection_id = iter.next()
reactor.callLater(0, self.cleanConnection, connection_id, current_time)
reactor.callLater(0, _clean)
except StopIteration:
pass
reactor.callLater(0, _clean)
class TwistedChannel(Resource, HttpChannel):
"""An AMF messaging channel that can be used with Twisted Web."""
# This attribute is added to packets
# that are waiting for a long-poll
# to receive a message.
MSG_NOT_COMPLETE = '_msg_not_complete'
# This attribute is added to store
# Twisted's request var on the packet,
# so that it can be available to targets.
TWISTED_REQUEST = '_twisted_request'
def __init__(self, *args, **kwargs):
Resource.__init__(self)
HttpChannel.__init__(self, *args, **kwargs)
def render_POST(self, request):
"""Process an incoming AMF packet."""
if request.content:
d = defer.Deferred()
d.addCallbacks(request.content.read, self.fail, errbackArgs=(request,))
d.addCallbacks(self.decode, self.fail, callbackArgs=(request,), errbackArgs=(request,))
d.addCallbacks(self.invoke, self.fail, errbackArgs=(request,))
d.addCallbacks(self.checkComplete, self.fail, callbackArgs=(request,), errbackArgs=(request,))
d.callback(int(request.getHeader('Content-Length')))
return server.NOT_DONE_YET
def decode(self, raw_request, request):
"""Overridden to add Twisted's request object onto the packet."""
decoded = HttpChannel.decode(self, raw_request)
setattr(decoded, self.TWISTED_REQUEST, request)
return decoded
def getDeferred(self, msg):
"""Returns a Deferred object if a message contains a Deferred in its body,
or False if message is not deferred.
"""
if msg.is_flex_msg:
body = msg.body.body
if isinstance(body, defer.Deferred):
return body
else:
body = msg.body
if isinstance(body, defer.Deferred):
return body
return False
def completeDeferreds(self, results, response, request, deferred_msgs):
"""A response's deferred methods have completed."""
for i, result in enumerate(results):
msg = deferred_msgs[i]
if result[0] is False:
# Invokation failed
msg.convertFail(result[1].value)
else:
if msg.is_flex_msg:
msg.body.body = result[1]
else:
msg.body = result[1]
d = defer.Deferred()
d.addCallbacks(self.encode, self.fail, errbackArgs=(request,))
d.addCallbacks(self.finish, self.fail, callbackArgs=(request,),
errbackArgs=(request,))
d.callback(response)
def checkComplete(self, response, request):
"""Checks to determine if the response message is ready
to be returned to the client, and defers the response if
ready.
"""
if hasattr(response, self.MSG_NOT_COMPLETE):
# long-poll operation.
# response is waiting for a message to be published.
return
# Check for deferred messages
deferreds = []
deferred_msgs = []
for msg in response.messages:
deferred = self.getDeferred(msg)
if deferred is not False:
deferreds.append(deferred)
deferred_msgs.append(msg)
if len(deferreds) > 0:
dl = defer.DeferredList(deferreds)
dl.addCallbacks(self.completeDeferreds, self.fail,
callbackArgs=(response, request, deferred_msgs), errbackArgs=(request,))
return
# Message is complete, encode and return
d = defer.Deferred()
d.addCallbacks(self.encode, self.fail, errbackArgs=(request,))
d.addCallbacks(self.finish, self.fail, callbackArgs=(request,), errbackArgs=(request,))
d.callback(response)
def finish(self, raw_response, request):
"""Send response to client when message is complete."""
request.setHeader('Content-Type', self.CONTENT_TYPE)
request.write(raw_response)
request.finish()
def fail(self, failure, request, code=500, message='Internal Server Error'):
request.setResponseCode(500, message)
self.finish(message, request)
def setupPollRequest(self, packet, connection):
"""Setup a request for a long-poll operation."""
# Set flag so self.finish
# does not get called when
# response packet is returned
# from self.invoke.
setattr(packet.response, self.MSG_NOT_COMPLETE, True)
request = getattr(packet, self.TWISTED_REQUEST)
return request
def finishPoll(self, request, packet, message, messages):
"""Finish a request that has been waiting for messages."""
if request.finished:
# Someone else has already called this function,
# or twisted has finished the request for some other reason.
return
if isinstance(self.endpoint, AmfEndpoint):
# Make sure messages are not encoded as an ArrayCollection
messages = AsNoProxy(messages)
message.response_msg.body.body = messages
delattr(packet.response, self.MSG_NOT_COMPLETE)
self.checkComplete(packet.response, request)
def _waitForMessage(self, packet, message, connection):
"""Overridden to be non-blocking."""
request = self.setupPollRequest(packet, connection)
timeout_call = None
def _notify():
# This function gets called when a message is published,
# or wait_interval is reached.
if timeout_call is not None and timeout_call.active():
# Disable time out callback
timeout_call.cancel()
# Disable notify function.
connection.unSetNotifyFunc()
# Get messages and add them
# to the response message
messages = self.channel_set.subscription_manager.pollConnection(connection)
self.finishPoll(request, packet, message, messages)
connection.setNotifyFunc(_notify)
# Notify when wait_interval is reached
if self.wait_interval > -1:
timeout_call = reactor.callLater(float(self.wait_interval) / 1000, _notify)
# Cleanup if client drops connection.
_connectionLost = request.connectionLost
def _connection_lost(reason):
_connectionLost(reason)
_notify()
request.connectionLost = _connection_lost
return ()
def _pollForMessage(self, packet, message, connection):
"""Overridden to be non-blocking."""
request = self.setupPollRequest(packet, connection)
poller = task.LoopingCall(None)
def _timeout():
poller.stop()
messages = self.channel_set.subscription_manager.pollConnection(connection)
self.finishPoll(request, packet, message, messages)
# Cleanup if client drops connection.
_connectionLost = request.connectionLost
def _connection_lost(reason):
_connectionLost(reason)
_timeout()
request.connectionLost = _connection_lost
if self.wait_interval > -1:
timeout_call = reactor.callLater(float(self.wait_interval) / 1000, _timeout)
else:
timeout_call = None
def _poll():
messages = self.channel_set.subscription_manager.pollConnection(connection)
if len(messages) > 0:
poller.stop()
if timeout_call is not None and timeout_call.active():
# Disable time out callback
timeout_call.cancel()
self.finishPoll(request, packet, message, messages)
elif connection.connected is False:
_timeout()
poller.f = _poll
poller.start(float(self.poll_interval) / 1000)
return ()
class StreamingTwistedChannel(TwistedChannel):
"""Handles streaming http connections."""
def __init__(self, name, max_connections=-1, endpoint=None,
timeout=1200, wait_interval=0, heart_interval=30000):
TwistedChannel.__init__(self, name, max_connections, endpoint,
timeout, wait_interval)
self.heart_interval = heart_interval
def render_POST(self, request):
"""Process an incoming AMF packet."""
if request.getHeader('Content-Type') == self.CONTENT_TYPE:
# Regular AMF message
return TwistedChannel.render_POST(self, request)
request.setHeader('Content-Type', self.CONTENT_TYPE)
# Create streaming message command
msg = messaging.StreamingMessage()
msg.parseArgs(request.args)
d = defer.Deferred()
if msg.operation == msg.OPEN_COMMAND:
d.addCallbacks(self.startStream, self.fail, callbackArgs=(request,), errbackArgs=(request,))
d.callback(msg)
return server.NOT_DONE_YET
if msg.operation == msg.CLOSE_COMMAND:
d.addCallbacks(self.stopStream, self.fail, callbackArgs=(request,), errbackArgs=(request,))
d.callback(msg)
return server.NOT_DONE_YET
def startStream(self, msg, request):
"""Get this stream rolling!"""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
if self.channel_set.notify_connections is True:
poller = None
else:
# Repeatedly poll for messages.
poller = task.LoopingCall(None)
# Remove notify function if client drops connection.
_connectionLost = request.connectionLost
def _connection_lost(reason):
self.channel_set.disconnect(connection)
connection.unSetNotifyFunc()
_connectionLost(reason)
request.connectionLost = _connection_lost
# This function gets called when a message is published.
def _notify():
if connection.connected is False:
if poller is not None:
poller.stop()
connection.unSetNotifyFunc()
msg = messaging.StreamingMessage.getDisconnectMsg()
request.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint))
request.finish()
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
for msg in msgs:
request.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint))
connection.setNotifyFunc(_notify)
if poller is not None:
poller.f = _notify
poller.start(float(self.poll_interval) / 1000, False)
# Acknowledge connection
response = msg.acknowledge()
response.body = connection.id
self.sendMsg(request, response)
# Prime request with bytes so
# client will start responding.
request.write(chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES)
self.startBeat(connection, request)
def sendMsg(self, request, msg):
"""Send a message to the client."""
request.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint))
def stopStream(self, msg, request):
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
self.channel_set.disconnect(connection)
def startBeat(self, connection, request):
# Send out heart beat.
looper = task.LoopingCall(None)
def _beat():
"""Keep calling this method as long as the connection is alive."""
if connection.connected is False:
looper.stop()
return
request.write(chr(messaging.StreamingMessage.NULL_BYTE))
looper.f = _beat
looper.start(float(self.heart_interval) / 1000, False)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/twisted_channel.py
|
twisted_channel.py
|
import time
import threading
import amfast
from amfast import AmFastError
from amfast.remoting.channel import ChannelSet, HttpChannel, ChannelError
import amfast.remoting.flex_messages as messaging
class WsgiChannelSet(ChannelSet):
def __call__(self, environ, start_response):
channel_name = environ['PATH_INFO'][1:]
channel = self.getChannel(channel_name)
return channel(environ, start_response)
class WsgiChannel(HttpChannel):
"""WSGI app channel."""
def __init__(self, *args, **kwargs):
if len(args) > 3:
wait_interval = args[3]
elif 'wait_interval' in kwargs:
wait_interval = kwargs['wait_interval']
else:
wait_interval = 0
if wait_interval < 0:
# The only reliable way to detect
# when a client has disconnected with
# WSGI is that the 'write' function will fail.
#
# With long-polling, nothing is written
# until a message is published to a client,
# so we are unable to detect if we are
# waiting for a message for a disconnected client.
#
# wait_interval must be non-negative to avoid
# zombie threads.
raise ChannelError('wait_interval < 0 is not supported by WsgiChannel')
HttpChannel.__init__(self, *args, **kwargs)
def __call__(self, environ, start_response):
if environ['REQUEST_METHOD'] != 'POST':
return self.badMethod(start_response)
len_str = 'CONTENT_LENGTH'
raw_request = environ['wsgi.input'].read(int(environ[len_str]))
try:
request_packet = self.decode(raw_request)
except AmFastError, exc:
return self.badRequest(start_response, self.getBadEncodingMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return self.badServer(start_response, self.getBadServerMsg())
try:
content = self.invoke(request_packet)
response = self.encode(content)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return self.badServer(start_response, self.getBadServerMsg())
return self.getResponse(start_response, response)
def getResponse(self, start_response, response):
start_response('200 OK', [
('Content-Type', self.CONTENT_TYPE),
('Content-Length', str(len(response)))
])
return [response]
def badMethod(self, start_response):
response = self.getBadMethodMsg()
start_response('405 Method Not Allowed', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response)))
])
return [response]
def badRequest(self, start_response, response):
start_response('400 Bad Request', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response)))
])
return [response]
def badPage(self, start_response, response):
start_response('404 Not Found', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response)))
])
return [response]
def badServer(self, start_response, response):
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain'),
('Content-Length', str(len(response)))
])
return [response]
class StreamingWsgiChannel(WsgiChannel):
"""WsgiChannel that opens a persistent connection with the client to serve messages."""
def __init__(self, name, max_connections=-1, endpoint=None, wait_interval=0, heart_interval=30000):
WsgiChannel.__init__(self, name, max_connections=max_connections,
endpoint=endpoint, wait_interval=wait_interval)
self.heart_interval = heart_interval
def __call__(self, environ, start_response):
if environ['CONTENT_TYPE'] == self.CONTENT_TYPE:
# Regular AMF message
return WsgiChannel.__call__(self, environ, start_response)
# Create streaming message command
try:
msg = messaging.StreamingMessage()
msg.parseParams(environ['QUERY_STRING'])
body = environ['wsgi.input'].read(int(environ['CONTENT_LENGTH']))
msg.parseBody(body)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return self.badServer(start_response, self.getBadServerMsg())
if msg.operation == msg.OPEN_COMMAND:
return self.startStream(environ, start_response, msg)
if msg.operation == msg.CLOSE_COMMAND:
return self.stopStream(msg)
return self.badRequest(start_response, self.getBadRequestMsg('Streaming operation unknown: %s' % msg.operation))
def startStream(self, environ, start_response, msg):
"""Start streaming response."""
try:
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return self.badServer(start_response, self.getBadServerMsg())
write = start_response('200 OK', [
('Content-Type', self.CONTENT_TYPE)
])
try:
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
try:
bytes = messaging.StreamingMessage.prepareMsg(response, self.endpoint)
write(bytes)
write(chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return []
# Start heart beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
# Wait for new messages.
event = threading.Event()
connection.setNotifyFunc(event.set)
poll_secs = float(self.poll_interval) / 1000
while True:
if connection.connected is False:
# Connection is no longer active
msg = messaging.StreamingMessage.getDisconnectMsg()
try:
write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint))
except:
# Client may have already disconnected
pass
# Stop stream
return []
if self.channel_set.notify_connections is True:
# Block until notification of new message
event.wait()
else:
# Block until poll_interval is reached
event.wait(poll_secs)
# Message has been published,
# or it's time for a heart beat
# Remove notify_func so that
# New messages don't trigger event.
connection.unSetNotifyFunc()
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
while len(msgs) > 0:
# Dispatch all messages to client
for msg in msgs:
try:
bytes = messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
break
try:
write(bytes)
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return []
msgs = self.channel_set.subscription_manager.pollConnection(connection)
else:
# Send heart beat
try:
write(chr(messaging.StreamingMessage.NULL_BYTE))
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return []
# Create new event to trigger new messages or heart beats
event = threading.Event()
connection.setNotifyFunc(event.set)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
return []
def beat(self, connection):
"""Send a heart beat."""
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
else:
return
# Create timer for next beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
def stopStream(self, msg):
"""Stop a streaming connection."""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
connection.disconnect()
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
return []
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/wsgi_channel.py
|
wsgi_channel.py
|
import time
import memcache_manager
from connection import Connection, ConnectionError
from connection_manager import ConnectionManager, NotConnectedError, SessionAttrError
class MemcacheConnectionManager(ConnectionManager, memcache_manager.MemcacheManager):
"""Manages connections stored by Memcache."""
CONNECTIONS_ATTR = '_connection_ids'
CHANNELS_ATTR = '_channels'
ATTRIBUTES = ('connection_info', 'connected', 'last_active',
'last_polled', 'authenticated', 'flex_user', 'session', 'notify_func_id')
def __init__(self, connection_class=Connection, connection_params=None,
mc_servers=['127.0.0.1:11211'], mc_debug=0):
ConnectionManager.__init__(self, connection_class=connection_class,
connection_params=connection_params)
self.mc = self.createMcClient(mc_servers, mc_debug)
self._lock = memcache_manager.MemcacheMutex(self.mc)
def reset(self):
self._lock.releaseAll()
lock_name = self.getLockName('connection_reset')
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids is not None:
for connection_id in connection_ids:
keys = [self.getKeyName(connection_id, attr) for attr in self.ATTRIBUTES]
self.mc.delete_multi(keys)
self.mc.set(self.CONNECTIONS_ATTR, [])
self.mc.set(self.CHANNELS_ATTR, {})
finally:
self._lock.release(lock_name)
def incrementChannelCount(self, channel_name):
lock_name = self.getLockName(self.CHANNELS_ATTR)
self._lock.acquire(lock_name)
try:
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
channels = {}
if channel_name in channels:
channels[channel_name] += 1
else:
channels[channel_name] = 1
self.mc.set(self.CHANNELS_ATTR, channels)
finally:
self._lock.release(lock_name)
def decrementChannelCount(self, channel_name):
lock_name = self.getLockName(self.CHANNELS_ATTR)
self._lock.acquire(lock_name)
try:
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
channels = {}
if channel_name in channels:
channels[channel_name] -= 1
self.mc.set(self.CHANNELS_ATTR, channels)
finally:
self._lock.release(lock_name)
def getConnectionCount(self, channel_name):
channels = self.mc.get(self.CHANNELS_ATTR)
if channels is None:
return 0
if channel_name in channels:
return channels[channel_name]
else:
return 0
def checkMultiSetResults(self, results):
if len(results) > 0:
msg = 'The following parameters were not set: ' + ', '.join(results)
raise ConnectionError(msg)
def loadConnection(self, connection_id):
connection_info = self.mc.get(self.getKeyName(connection_id, 'connection_info'))
if connection_info is None:
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
return self.connection_class(self, connection_info['channel_name'],
connection_id, connection_info['timeout'])
def initConnection(self, connection, channel):
params = {
'connected': True,
'last_active': time.time() * 1000,
'last_polled': 0.0,
'authenticated': False,
'session': {}
}
connection_info = {
'channel_name': connection.channel_name,
'timeout': connection.timeout
}
cache_params = {}
for key, val in params.iteritems():
cache_params[self.getKeyName(connection.id, key)] = val
cache_params[self.getKeyName(connection.id, 'connection_info')] = connection_info
self.checkMultiSetResults(self.mc.set_multi(cache_params))
lock_name = self.getLockName(self.CONNECTIONS_ATTR)
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids is None:
connection_ids = []
connection_ids.append(connection.id)
self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
finally:
self._lock.release(lock_name)
self.incrementChannelCount(connection.channel_name)
def iterConnectionIds(self):
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
if connection_ids != None:
return connection_ids.__iter__()
else:
return None
# --- proxies for connection properties --- #
def getConnected(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'connected'))
def getLastActive(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'last_active'))
def getLastPolled(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'last_polled'))
def getAuthenticated(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'authenticated'))
def getFlexUser(self, connection):
return self.mc.get(self.getKeyName(connection.id, 'flex_user'))
def getNotifyFunc(self, connection):
notify_func_id = self.mc.get(self.getKeyName(connection.id, 'notify_func_id'))
if notify_func_id is None:
return None
else:
return connection._getNotifyFuncById(connection._notify_func_id)
# --- proxies for connection methods --- #
def deleteConnection(self, connection):
lock_name = self.getLockName(self.CONNECTIONS_ATTR)
self._lock.acquire(lock_name)
try:
connection_ids = self.mc.get(self.CONNECTIONS_ATTR)
for i, connection_id in enumerate(connection_ids):
if connection_id == connection.id:
connection_ids.pop(i)
break
self.mc.set(self.CONNECTIONS_ATTR, connection_ids)
finally:
self._lock.release(lock_name)
keys = [self.getKeyName(connection.id, attr) for attr in self.ATTRIBUTES]
self.mc.delete_multi(keys)
self.decrementChannelCount(connection.channel_name)
ConnectionManager.deleteConnection(self, connection)
def connectConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'connected'), True)
def disconnectConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'connected'), False)
def touchConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'last_active'), time.time() * 1000)
def touchPolled(self, connection):
self.mc.set(self.getKeyName(connection.id, 'last_polled'), time.time() * 1000)
def authenticateConnection(self, connection, user):
params = {
self.getKeyName(connection.id, 'authenticated'): True,
self.getKeyName(connection.id, 'flex_user'): user
}
self.checkMultiSetResults(self.mc.set_multi(params))
def unAuthenticateConnection(self, connection):
self.mc.set(self.getKeyName(connection.id, 'authenticated'), False)
self.mc.delete(self.getKeyName(connection.id, 'flex_user'))
def setNotifyFunc(self, connection, func):
self.mc.set(self.getKeyName(connection.id, 'notify_func_id'),
connection._setNotifyFunc(func))
def unSetNotifyFunc(self, connection):
self.mc.delete(self.getKeyName(connection.id, 'notify_func_id'))
def getConnectionSessionAttr(self, connection, name):
session = self.mc.get(self.getKeyName(connection.id, 'session'))
try:
return session[name]
except KeyError:
raise SessionAttrError("Attribute '%s' not found." % name)
def setConnectionSessionAttr(self, connection, name, val):
key = self.getKeyName(connection.id, 'session')
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
session = self.mc.get(key)
session[name] = val
self.mc.set(key, session)
finally:
self._lock.release(lock_name)
def delConnectionSessionAttr(self, connection, name):
key = self.getKeyName(connection.id, 'session')
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
session = self.mc.get(key)
try:
del session[name]
self.mc.set(key, session)
except KeyError:
pass
finally:
self._lock.release(lock_name)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/memcache_connection_manager.py
|
memcache_connection_manager.py
|
import threading
import cherrypy
import cherrypy.process.plugins as cp_plugins
import amfast
import amfast.remoting.flex_messages as messaging
from amfast.remoting.channel import ChannelSet, HttpChannel, ChannelError
def amfhook():
"""Checks for POST, and stops cherrypy from processing the body."""
cherrypy.request.process_request_body = False
cherrypy.request.show_tracebacks = False
if cherrypy.request.method != 'POST':
raise cherrypy.HTTPError(405, "405 Method Not Allowed\n\nAMF request must use 'POST' method.");
cherrypy.tools.amfhook = cherrypy.Tool('before_request_body', amfhook, priority=0)
class CherryPyChannelSet(ChannelSet):
"""A ChannelSet for use with CherryPy."""
def __init__(self, *args, **kwargs):
self.clean_scheduled = False
ChannelSet.__init__(self, *args, **kwargs)
def scheduleClean(self):
"""Overridden to use CherryPy's Monitor functionality."""
if self.clean_scheduled is False:
cleaner = cp_plugins.Monitor(cherrypy.engine, self.clean, self.clean_freq)
cleaner.name = "ConnectionCleaner"
cleaner.subscribe()
self.clean_scheduled = True
def mapChannel(self, channel):
"""Overridden so that channel is added as an attribute."""
if hasattr(self, channel.name):
raise ChannelError("Reserved attribute name '%s' cannot be used as a channel name." % channel.name)
ChannelSet.mapChannel(self, channel)
setattr(self, channel.name, channel.__call__)
class CherryPyChannel(HttpChannel):
"""An AMF messaging channel that can be used with CherryPy HTTP framework.
Instantiate a CherryPyChannel object and
mount the __call__ method to the URL where
AMF messaging should be available from.
You can also use a WsgiChannel instance by
grafting it to the CherryPy tree with
the command cherrypy.tree.graft.
Using WsgiChannel will be more efficient, but you
won't have access to any of CherryPy's built-in tools
such as cookie-based sessions.
"""
@cherrypy.expose
@cherrypy.tools.amfhook()
def __call__(self):
try:
c_len = int(cherrypy.request.headers['Content-Length'])
raw_request = cherrypy.request.rfile.read(c_len)
except KeyError:
raw_request = cherrypy.request.rfile
response = self.invoke(self.decode(raw_request))
cherrypy.response.headers['Content-Type'] = self.CONTENT_TYPE
return self.encode(response)
class StreamingCherryPyChannel(CherryPyChannel):
"""Allows HTTP streaming."""
def __init__(self, name, max_connections=-1, endpoint=None,
wait_interval=0, heart_interval=30000):
CherryPyChannel.__init__(self, name, max_connections=max_connections,
endpoint=endpoint, wait_interval=wait_interval)
self.heart_interval = heart_interval
@cherrypy.expose
@cherrypy.tools.amfhook()
def __call__(self, command=None, version=None):
if cherrypy.request.headers['Content-Type'] == self.CONTENT_TYPE:
# Regular AMF message
return CherryPyChannel.__call__(self)
# Create streaming message command
cherrypy.response.headers['Content-Type'] = self.CONTENT_TYPE
cherrypy.response.stream = True
try:
msg = messaging.StreamingMessage()
msg.parseParams(cherrypy.request.query_string)
c_len = int(cherrypy.request.headers['Content-Length'])
body = cherrypy.request.rfile.read(c_len)
msg.parseBody(body)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
raise ChannelError("AMF server error.")
if msg.operation == msg.OPEN_COMMAND:
return self.startStream(msg)
if msg.operation == msg.CLOSE_COMMAND:
return self.stopStream(msg)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
def startStream(self, msg):
"""Returns an iterator for streaming."""
try:
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
cherrypy.response.headers['Content-Type'] = self.CONTENT_TYPE
try:
# Start heart beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
# Wait for new messages.
inited = False
event = threading.Event()
connection.setNotifyFunc(event.set)
poll_secs = float(self.poll_interval) / 1000
while True:
if connection.connected is False:
# Connection is no longer active
msg = messaging.StreamingMessage.getDisconnectMsg()
yield messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
return
if inited is False:
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
bytes = messaging.StreamingMessage.prepareMsg(response, self.endpoint)
inited = True
bytes += chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES
yield bytes
if self.channel_set.notify_connections is True:
# Block until notification of new message
event.wait()
else:
# Block until poll_interval is reached
event.wait(poll_secs)
# Message has been published,
# or it's time for a heart beat
# Remove notify_func so that
# New messages don't trigger event.
connection.unSetNotifyFunc()
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
while len(msgs) > 0:
# Dispatch all messages to client
for msg in msgs:
try:
bytes = messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
break
try:
yield bytes
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
else:
# Send heart beat
try:
yield chr(messaging.StreamingMessage.NULL_BYTE)
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
# Create new event to trigger new messages or heart beats
event = threading.Event()
connection.setNotifyFunc(event.set)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
return
def stopStream(self, msg):
"""Stop a streaming connection."""
connection = self.channel_set.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
connection.disconnect()
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
def beat(self, connection):
"""Send a heart beat."""
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
else:
return
# Create timer for next beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/cherrypy_channel.py
|
cherrypy_channel.py
|
import time
import amfast
import flex_messages as messaging
class ConnectionError(amfast.AmFastError):
pass
class Connection(object):
"""A client connection to a Channel.
This class acts like a session.
Unique to a single flash client.
attributes (read-only)
=======================
* manager - ConnectionManager, the class that manages connection persistence.
* channel_name - string, name of the channel connection is connected to.
* id - string, Flash client's ID.
* timeout - int, timeout in milliseconds
* connected - boolean, True if connection is connected
* last_active - float, epoch timestamp when connection was last accessed.
* last_polled - float, epocj timestamp when connection last polled for messsages.
* authenticated - boolean, True if connection has been authenticated with RemoteObject style authentication.
* flex_user - string, username if 'authenticated' is True.
"""
# Keeps track of notification functions
# by their IDs,
# so they can be serialized
# when the connection is saved.
_notifications = {}
@classmethod
def _setNotifyFunc(cls, func):
func_id = id(func)
cls._notifications[func_id] = func
return func_id
@classmethod
def _getNotifyFuncById(cls, func_id):
return cls._notifications[func_id]
@classmethod
def _delNotifyFunc(cls, func_id):
del cls._notifications[func_id]
def __init__(self, manager, channel_name, id, timeout=1800000):
# These attributes should not
# changed during the life of a connection.
#
# All other attributes that may
# change during the life of a connection
# should be accessed through properties
# that call methods in the connection_manager.
self._manager = manager
self._channel_name = channel_name
self._id = id
self._timeout = timeout
# --- read-only properties --- #
def _getManager(self):
return self._manager
manager = property(_getManager)
def _getChannelName(self):
return self._channel_name
channel_name = property(_getChannelName)
def _getId(self):
return self._id
id = property(_getId)
def _getTimeout(self):
return self._timeout
timeout = property(_getTimeout)
# --- proxied properties --- #
def _getConnected(self):
return self._manager.getConnected(self)
connected = property(_getConnected)
def _getLastActive(self):
return self._manager.getLastActive(self)
last_active = property(_getLastActive)
def _getLastPolled(self):
return self._manager.getLastPolled(self)
last_polled = property(_getLastPolled)
def _getAuthenticated(self):
return self._manager.getAuthenticated(self)
authenticated = property(_getAuthenticated)
def _getFlexUser(self):
return self._manager.getFlexUser(self)
flex_user = property(_getFlexUser)
def _getNotifyFunc(self):
return self._manager.getNotifyFunc(self)
notify_func = property(_getNotifyFunc)
# --- proxied methods --- #
def touch(self):
"""Update last_active."""
self._manager.touchConnection(self)
def touchPolled(self):
"""Update last_polled."""
self._manager.touchPolled(self)
def softTouchPolled(self):
"""Update last_polled without persisting value.
Useful when ChannelSet calls _pollForMessage.
"""
self._manager.softTouchPolled(self)
def connect(self):
"""Set connected=True."""
self._manager.connectConnection(self)
def disconnect(self):
"""Set connected=False."""
self._manager.disconnectConnection(self)
def delete(self):
"""Delete connection."""
self._manager.deleteConnection(self)
def authenticate(self, user):
"""Set authenticated = True"""
self._manager.authenticateConnection(self, user)
def unAuthenticate(self):
"""Set authenticated = False"""
self._manager.unAuthenticateConnection(self)
def setNotifyFunc(self, func):
self._manager.setNotifyFunc(self, func)
def unSetNotifyFunc(self):
self._manager.unSetNotifyFunc(self)
def getSessionAttr(self, name):
"""Get a session attribute."""
return self._manager.getConnectionSessionAttr(self, name)
def setSessionAttr(self, name, val):
"""Set a session attribute."""
self._manager.setConnectionSessionAttr(self, name, val)
def delSessionAttr(self, name):
"""Del a session attribute."""
self._manager.delConnectionSessionAttr(self, name)
# --- instance methods --- #
def personalizeMessage(self, client_id, msg):
"""Return a copy of the message with client_id set."""
if hasattr(msg, 'headers'):
headers = msg.headers
else:
headers = None
return msg.__class__(headers=headers, body=msg.body,
timeToLive=msg.timeToLive, clientId=client_id,
destination=msg.destination, timestamp=msg.timestamp)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/connection.py
|
connection.py
|
import base64
from amfast.class_def.as_types import AsNoProxy
from amfast.remoting.flex_messages import CommandMessage
from amfast.remoting.channel import ChannelError, SecurityError
from amfast.remoting.endpoint import AmfEndpoint
# ---- NetConnection Operations --- #
def nc_auth(packet, msg, credentials):
"""NetConnection style authentication."""
packet.channel.channel_set.checkCredentials(
credentials['userid'], credentials['password'])
# Flag indicating packet was
# authenticated properly.
packet._authenticated = True
# --- Flex CommandMessage Operations --- #
def client_ping(packet, msg, *args):
"""Respond to a ping request and connect to the Channel."""
response = msg.response_msg.body
if (not hasattr(response, 'headers')) or response.headers is None:
response.headers = {}
command = msg.body[0]
response.headers[command.FLEX_CLIENT_ID_HEADER] = command.connection.id
def login_operation(packet, msg, raw_creds):
"""RemoteObject style authentication."""
cred_str = base64.decodestring(raw_creds)
command = msg.body[0]
if hasattr(command, 'headers') and \
command.CREDENTIALS_CHARSET_HEADER in command.headers:
# Convert encoded string
cred_str = unicode(cred_str, command.headers[command.CREDENTIALS_CHARSET_HEADER])
creds = cred_str.split(':')
channel_set = packet.channel.channel_set
channel_set.checkCredentials(creds[0], creds[1])
command.connection.authenticate(creds[0])
def logout_operation(packet, msg, *args):
"""RemoteObject style de-authentication."""
command = msg.body[0]
command.connection.unAuthenticate()
def disconnect_operation(packet, msg, *args):
"""Respond to a disconnect operation. Disconnects current Connection."""
command = msg.body[0]
packet.channel.disconnect(command.connection)
response = msg.response_msg.body
if hasattr(response, 'headers') and response.FLEX_CLIENT_ID_HEADER in response.headers:
del response.headers[response.FLEX_CLIENT_ID_HEADER]
def subscribe_operation(packet, msg, *args):
"""Respond to a subscribe operation."""
command = msg.body[0]
channel_set = packet.channel.channel_set
if channel_set.subscription_manager.secure is True:
if command.connection.authenticated is False:
raise SecurityError("Operation requires authentication.")
headers = command.headers
channel_set.subscription_manager.subscribe(command.connection.id,
command.clientId, command.destination,
headers.get(command.SUBTOPIC_HEADER, None),
headers.get(command.SELECTOR_HEADER, None))
def unsubscribe_operation(packet, msg, *args):
"""Respond to an unsubscribe operation."""
command = msg.body[0]
packet.channel.channel_set.subscription_manager.unSubscribe(
command.connection.id, command.clientId, command.destination,
command.headers.get(command.SUBTOPIC_HEADER, None))
def poll_operation(packet, msg, *args):
"""Respond to a poll operation. Returns queued messages."""
command = msg.body[0]
connection = command.connection
channel = packet.channel
msgs = channel.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) < 1 and channel.wait_interval != 0:
# Long polling channel, don't return response
# until a message is available.
msgs = channel.waitForMessage(packet, msg, connection)
if isinstance(channel.endpoint, AmfEndpoint):
# Make sure messages are not encoded as an ArrayCollection
return AsNoProxy(msgs)
else:
return msgs
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/targets.py
|
targets.py
|
import time
import amfast
import flex_messages as messaging
class Subscription(object):
def __init__(self, connection_id=None, client_id=None, topic=None):
self.connection_id = connection_id
self.client_id = client_id
self.topic = topic
class SubscriptionManager(object):
"""Receives and publishes Producer/Consumer style messages.
This is an abstract base class and should be implemented by a sub-class.
attributes
===========
* secure - bool, Set to True to require pulishers and subscribers to be authenticated.
* ttl - float, Default timeToLive in milliseconds for messages that do not have the value set.
"""
SUBTOPIC_SEPARATOR = "_;_"
def __init__(self, secure=False, ttl=10000):
self.secure = secure
self.ttl = ttl
@classmethod
def getTopicKey(cls, topic, sub_topic=None):
if sub_topic is not None:
return cls.SUBTOPIC_SEPARATOR.join((topic, sub_topic))
else:
return topic
@classmethod
def splitTopicKey(cls, topic):
return topic.split(cls.SUBTOPIC_SEPARATOR)
def getMessageTopicKey(self, msg):
"""Returns topic key for a message."""
topic = msg.destination
if hasattr(msg, 'headers') and \
msg.headers is not None and \
messaging.AsyncMessage.SUBTOPIC_HEADER in msg.headers:
sub_topic = msg.headers[messaging.AsyncMessage.SUBTOPIC_HEADER]
else:
sub_topic = None
return self.getTopicKey(topic, sub_topic)
def publishMessage(self, msg):
# Update timestamp to current server time.
# Is this the correct thing to do???
msg.timestamp = time.time() * 1000
if msg.timeToLive is None or msg.timeToLive == 0:
# Set timeToLive if it has not been pre-set.
msg.timeToLive = self.ttl
self.persistMessage(msg)
def pollConnection(self, connection, soft_touch=False):
"""Retrieves all waiting messages for a specific connection.
parameters
===========
* connection - Connection, connection to poll.
* soft_touch - boolean, True to call connection.touchSoftPolled,
False to call connection.touchPolled. Default = False
"""
current_time = time.time() * 1000
polled_msgs = []
for subscription in self.iterConnectionSubscriptions(connection):
polled_msgs.extend([connection.personalizeMessage(\
subscription.client_id, msg) \
for msg in self.pollMessages(subscription.topic, \
connection.last_polled, current_time)])
if soft_touch is True:
connection.softTouchPolled()
else:
connection.touchPolled()
return polled_msgs
class MemorySubscriptionManager(SubscriptionManager):
"""Stores all subscription information in memory."""
MSG_ATTR = 'messages'
CONNECTION_ATTR = 'connections'
def __init__(self, secure=False, ttl=30000):
SubscriptionManager.__init__(self, secure=secure, ttl=ttl)
self._lock = amfast.mutex_cls()
self.reset()
def reset(self):
self._topics = {}
def _getTopicMap(self, topic):
"""Retrieves or creates a topic map."""
topic_map = self._topics.get(topic, None)
if topic_map is None:
topic_map = {self.MSG_ATTR: [], self.CONNECTION_ATTR: {}}
self._topics[topic] = topic_map
return topic_map
def _cleanTopicMap(self, topic, topic_map):
"""Removes un-needed subscription data for a topic."""
if len(topic_map[self.MSG_ATTR]) == 0 and \
len(topic_map[self.CONNECTION_ATTR]) == 0:
del self._topics[topic]
def subscribe(self, connection_id, client_id, topic, sub_topic=None, selector=None):
"""Subscribe a client to a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to subscribe to.
* sub_topic - string, Sub-Topic to subscribe to. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
subscription = Subscription(connection_id=connection_id,
client_id=client_id, topic=topic)
self._lock.acquire()
try:
topic_map = self._getTopicMap(topic)
connection_map = topic_map[self.CONNECTION_ATTR].get(connection_id, None)
if connection_map is None:
connection_map = {}
topic_map[self.CONNECTION_ATTR][connection_id] = connection_map
connection_map[client_id] = subscription
finally:
self._lock.release()
def unSubscribe(self, connection_id, client_id, topic, sub_topic=None):
"""Un-Subscribe a client from a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to un-subscribe from.
* sub_topic - string, Sub-Topic to un-subscribe from. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
self._lock.acquire()
try:
topic_map = self._topics.get(topic, None)
if topic_map is not None:
connection_map = topic_map[self.CONNECTION_ATTR].get(connection_id, None)
if connection_map is not None:
if client_id in connection_map:
if len(connection_map) == 1:
# This is the only subscribed client,
# delete all connection data.
del topic_map[self.CONNECTION_ATTR][connection_id]
self._cleanTopicMap(topic, topic_map) # Clean up topic data
else:
# Delete this single subscription
del connection_map[client_id]
finally:
self._lock.release()
def deleteConnection(self, connection):
"""Remove all subscriptions for this connection.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
"""
for topic, topic_map in self._topics.items():
if connection.id in topic_map[self.CONNECTION_ATTR]:
del topic_map[self.CONNECTION_ATTR][connection.id]
self._cleanTopicMap(topic, topic_map)
def iterSubscribers(self, topic, sub_topic=None):
"""Iterate through Flash client ids subscribed to a specific topic."""
topic = self.getTopicKey(topic, sub_topic)
topic_map = self._topics.get(topic, None)
if topic_map is None:
return [].__iter__()
return topic_map[self.CONNECTION_ATTR].keys().__iter__()
def iterConnectionSubscriptions(self, connection):
"""Iterate through all Subscriptions that belong to a specific connection."""
for topic_map in self._topics.values():
connection_map = topic_map[self.CONNECTION_ATTR].get(connection.id, {})
for subscription in connection_map.values():
yield subscription
def persistMessage(self, msg):
"""Store a message."""
topic = self.getMessageTopicKey(msg)
self._lock.acquire()
try:
topic_map = self._getTopicMap(topic)
topic_map[self.MSG_ATTR].append(msg)
finally:
self._lock.release()
def pollMessages(self, topic, cutoff_time, current_time):
"""Retrieves all queued messages, and discards expired messages.
arguments:
===========
* topic - string, Topic to find messages for.
* cutoff_time - float, epoch time, only messages published
after this time will be returned.
* current_time - float, epoch time, used to determine if a
message is expired.
"""
topic_map = self._topics.get(topic, None)
if topic_map is None:
return
self._lock.acquire()
try:
msgs = topic_map[self.MSG_ATTR]
msg_count = len(msgs)
idx = 0
while idx < msg_count:
msg = msgs[idx]
if current_time > (msg.timestamp + msg.timeToLive):
# Remove expired message
msgs.pop(idx)
msg_count -= 1
else:
idx += 1
if msg.timestamp > cutoff_time:
yield msg
except:
self._lock.release()
raise
self._lock.release()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/subscription_manager.py
|
subscription_manager.py
|
import time
import threading
import uuid
import amfast
from amfast.class_def import ClassDefMapper
import connection_manager as cm
import subscription_manager as sm
import flex_messages as messaging
import thread_pool
class ChannelError(amfast.AmFastError):
pass
class SecurityError(ChannelError):
pass
class ChannelFullError(ChannelError):
pass
class Channel(object):
"""An individual channel that can send/receive messages.
attributes
===========
* name - string, Channel name.
* endpoint - Endpoint, encodes and decodes messages.
* max_connections - int, When the number of connections exceeds this number,
an exception is raised when new clients attempt to connect. Set to -1
for no limit.
"""
def __init__(self, name, max_connections=-1, endpoint=None):
self.name = name
self.max_connections = max_connections
if endpoint is None:
from endpoint import AmfEndpoint
endpoint = AmfEndpoint()
self.endpoint = endpoint
self._lock = amfast.mutex_cls()
self._channel_set = None
def _getChannelSet(self):
# channel_set should be considered
# read-only outside of this class
return self._channel_set
channel_set = property(_getChannelSet)
def encode(self, *args, **kwargs):
"""Encode a packet."""
try:
return self.endpoint.encodePacket(*args, **kwargs)
except amfast.AmFastError, exc:
# Not much we can do if packet is not encoded properly
amfast.log_exc(exc)
raise exc
def decode(self, *args, **kwargs):
"""Decode a raw request."""
try:
return self.endpoint.decodePacket(*args, **kwargs)
except amfast.AmFastError, exc:
# Not much we can do if packet is not decoded properly
amfast.log_exc(exc)
raise exc
def invoke(self, request):
"""Invoke an incoming request packet."""
try:
request.channel = self # so user can access channel object
return request.invoke()
except amfast.AmFastError, exc:
return request.fail(exc)
def getFlexConnection(self, flex_msg):
"""Returns a Connection object for a Flex message.
Creates a new Connection if one does not already exist.
arguments
==========
* flex_msg - FlexMessage object.
"""
# If header does not exist, connection does not exist.
if not hasattr(flex_msg, 'headers') or flex_msg.headers is None:
return self.connect()
flex_client_id = flex_msg.headers.get(flex_msg.FLEX_CLIENT_ID_HEADER, None)
if flex_client_id == 'nil' or flex_client_id is None:
return self.connect()
try:
return self.channel_set.connection_manager.getConnection(flex_msg.headers[flex_msg.FLEX_CLIENT_ID_HEADER])
except cm.NotConnectedError:
return self.connect(flex_msg.headers[flex_msg.FLEX_CLIENT_ID_HEADER])
def connect(self, connection_id=None):
"""Add a client connection to this channel.
arguments
==========
* flex_client_id - string, Flex client id.
Returns Connection
"""
if self.max_connections > -1 and \
self.channel_set.connection_manager.getConnectionCount(self.name) \
>= self.max_connections:
raise ChannelFullError("Channel '%s' is not accepting connections." % self.name)
return self.channel_set.connect(self, connection_id)
def disconnect(self, connection):
"""Remove a client connection from this Channel.
arguments
==========
* connection - Connection.
"""
self.channel_set.disconnect(connection)
class HttpChannel(Channel):
"""An individual channel that can send/receive messages over HTTP.
attributes
===========
* wait_interval - int, Number of millisecondsseconds to wait before sending
response to client when a polling request is received. Set to -1 to
configure channel as a long-polling channel. Default = 0
* poll_interval - int, Number of milliseconds between message polling operations
when channel_set.notify == False
"""
# Content type for amf messages
CONTENT_TYPE = 'application/x-amf'
# These bytes need to be sent to 'kickstart'
# streaming connections so that the client
# will respond to incoming message. In reality
# this value is browser specific, but for now
# we're just using the maximum required byte length
# which is for IE.
KICKSTART_BYTES = 2048
def __init__(self, name, max_connections=-1, endpoint=None,
wait_interval=0, poll_interval=500):
Channel.__init__(self, name, max_connections, endpoint)
self.wait_interval = wait_interval
self.poll_interval = poll_interval
def getBadMethodMsg(self):
return "405 Method Not Allowed\n\nAMF request must use 'POST' method."
def getBadEncodingMsg(self):
return self.getBadRequestMsg('AMF packet could not be decoded.')
def getBadRequestMsg(self, msg=''):
return "400 Bad Request\n\n%s" % msg
def getBadPageMsg(self, msg=''):
return "404 Not Found\n\n%s" % msg
def getBadServerMsg(self, msg=''):
return "500 Internal Server Error\n\nAmFast server error.%s" % msg
def waitForMessage(self, packet, message, connection):
"""Waits until a new message is published to this connection.
Returns list of messages.
"""
if self.channel_set.notify_connections is True:
return self._waitForMessage(packet, message, connection)
else:
return self._pollForMessage(packet, message, connection)
def _waitForMessage(self, packet, message, connection):
"""Waits until notified that a new message is available, then returns messages.
This is blocking, and should only be used
for Channels where each connection is a thread.
Synchronous servers should override this method.
"""
event = threading.Event()
connection.setNotifyFunc(event.set)
if (self.wait_interval > -1):
event.wait(float(self.wait_interval) / 1000)
# Event.set has been called,
# or timeout has been reached.
connection.unSetNotifyFunc()
return self.channel_set.subscription_manager.pollConnection(connection)
def _pollForMessage(self, packet, message, connection):
"""Repeatedly polls for a new message until
message is available or wait_interval is reached.
This is blocking, and should only be used
for Channels where each connection is a thread.
Synchronous servers should override this method.
"""
# If True, don't store persistent 'last_polled' value every poll operation.
soft_touch = hasattr(self.channel_set.connection_manager, "softTouchPolled")
total_time = 0
poll_secs = float(self.poll_interval) / 1000
wait_secs = float(self.wait_interval) / 1000
while True:
event = threading.Event()
event.wait(poll_secs)
msgs = self.channel_set.subscription_manager.pollConnection(connection, soft_touch)
if len(msgs) > 0:
if soft_touch is True:
# Store 'last_polled' value.
connection.touchPolled()
return msgs
total_time += poll_secs
if total_time > wait_secs or connection.connected is False:
if soft_touch is True:
# Store 'last_polled' value.
connection.touchPolled()
return ()
class ChannelSet(object):
"""A collection of Channels.
A client can access the same RPC exposed methods
from any of the Channels contained in a ChannelSet.
A Channel can only belong to 1 ChannelSet.
attributes
===========
* service_mapper - ServiceMapper, maps destinations to Targets.
* connection_manager - ConnectionManager, keeps track of connected clients.
* subscription_manager - SubscriptionManager, keeps track of subscribed clients.
* notify_connections - boolean, set to True when using long-polling or streaming channels.
* clean_freq - float - number of seconds to clean expired connections.
"""
def __init__(self, service_mapper=None, connection_manager=None,
subscription_manager=None, notify_connections=False, clean_freq=300):
if service_mapper is None:
from amfast.remoting import ServiceMapper
service_mapper = ServiceMapper()
self.service_mapper = service_mapper
if connection_manager is None:
connection_manager = cm.MemoryConnectionManager()
self.connection_manager = connection_manager
if subscription_manager is None:
subscription_manager = sm.MemorySubscriptionManager()
self.subscription_manager = subscription_manager
self.notify_connections = notify_connections
self.clean_freq = clean_freq
self._lock = amfast.mutex_cls()
self._channels = {}
self.scheduleClean()
def __iter__(self):
# To make thread safe
channel_vals = self._channels.values()
return channel_vals.__iter__()
def checkCredentials(self, user, password):
"""Determines if credentials are valid.
arguments
==========
* user - string, username.
* password - string, password.
Returns True if credentials are valid.
Raises SecurityError if credentials are invalid.
"""
raise SecurityError('Authentication not implemented.');
def connect(self, channel, connection_id=None):
"""Add a client connection to this channel.
arguments
==========
* connection_id - string, Client connection id.
Returns Connection
"""
try:
connection = self.connection_manager.getConnection(connection_id)
except cm.NotConnectedError:
pass
else:
raise ChannelError("Connection ID '%s' is already connected." % connection_id)
return self.connection_manager.createConnection(channel, connection_id)
def disconnect(self, connection):
"""Remove a client connection from this ChannelSet.
arugments
==========
* connection - Connection being disconnected.
"""
self.subscription_manager.deleteConnection(connection)
self.connection_manager.deleteConnection(connection)
def scheduleClean(self):
"""Schedule connection cleaning procedure to run sometime in the future."""
if amfast.use_dummy_threading is True:
amfast.logger.warn('Connection cleaning was NOT scheduled.')
return
repeater = thread_pool.RepeatingThread(self.clean_freq, self.clean)
repeater.start()
def clean(self):
"""Clean out expired connections."""
if amfast.log_debug is True:
amfast.logger.debug("Cleaning channel.")
current_time = time.time() * 1000
if self.connection_manager.iterConnectionIds():
for connection_id in self.connection_manager.iterConnectionIds():
self.cleanConnection(connection_id, current_time)
if hasattr(self.subscription_manager, 'deleteExpiredMessages'):
# TODO: better interface for deleting expired messages.
#
# Work on this once async messaging support is dropped.
#
# Perhaps iterate through messages and delete expired
# similar to how connection cleaning works??
#
# Currently, some subscriptions managers auto-delete
# expired messages, while others require a method to be called.
self.subscription_manager.deleteExpiredMessages(current_time)
def cleanConnection(self, connection_id, current_time):
if amfast.log_debug is True:
amfast.logger.debug("Cleaning connection: %s" % connection_id)
try:
connection = self.connection_manager.getConnection(connection_id, False)
except cm.NotConnectedError:
return
if connection.last_active + connection.timeout < current_time:
channel = self.getChannel(connection.channel_name)
channel.disconnect(connection)
def publishObject(self, body, topic, sub_topic=None, headers=None, ttl=10000):
"""Create a message and publish it.
arguments:
===========
body - Any Python object.
topic - string, the topic to publish to.
sub_topic - string, the sub topic to publish to. Default = None
headers - dict, headers to include with this message.
ttl - int time to live in milliseconds. Default = 30000
"""
if sub_topic is not None:
if headers is None:
headers = {}
headers[messaging.AsyncMessage.SUBTOPIC_HEADER] = sub_topic
current_time = time.time() * 1000
msg = messaging.AsyncMessage(headers=headers, body=body,
clientId=None, destination=topic, timestamp=current_time,
timeToLive=ttl)
self.publishMessage(msg)
def publishMessage(self, msg):
"""Publish a pre-formed message.
arguments:
===========
* msg - AbstractMessage, the Flex message to publish.
"""
self.subscription_manager.publishMessage(msg)
if self.notify_connections is True:
topic = msg.destination
if hasattr(msg, 'headers') and \
msg.headers is not None and \
messaging.AsyncMessage.SUBTOPIC_HEADER in msg.headers:
sub_topic = msg.headers[messaging.AsyncMessage.SUBTOPIC_HEADER]
else:
sub_topic = None
self.notifyConnections(topic, sub_topic)
def notifyConnections(self, topic, sub_topic):
"""Notify connections that a message has been published.
arguments:
===========
* topic - string, topic to notify
* sub_topic - string, sub_topic to notify
"""
thread_pool.GlobalThreadPool().addTask(self._notifyConnections, args=(topic, sub_topic))
def _notifyConnections(self, topic, sub_topic):
"""Do the real work of notifyConnections."""
for connection_id in self.subscription_manager.iterSubscribers(topic, sub_topic):
try:
connection = self.connection_manager.getConnection(connection_id, False)
except cm.NotConnectedError:
continue
if connection.notify_func is not None:
connection.notify_func()
def mapChannel(self, channel):
"""Add a Channel to the ChannelSet
arguments
==========
* channel - Channel, the channel to add.
"""
self._lock.acquire()
try:
self._channels[channel.name] = channel
channel._channel_set = self
finally:
self._lock.release()
def unMapChannel(self, channel):
"""Removes a Channel to the ChannelSet
arguments
==========
* channel - Channel, the channel to remove.
"""
self._lock.acquire()
try:
if channel.name in self._channels:
channel._channel_set = None
del self._channels[channel.name]
finally:
self._lock.release()
def getChannel(self, name):
"""Retrieves a Channel from the ChannelSet
arguments
==========
* name - string, the name of the Channel to retrieve.
"""
try:
return self._channels[name]
except KeyError:
raise ChannelError("Channel '%s' is not mapped." % name)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/channel.py
|
channel.py
|
import threading
class WorkerTask(object):
"""A task to be performed by the ThreadPool."""
def __init__(self, function, args=(), kwargs={}):
self.function = function
self.args = args
self.kwargs = kwargs
def __call__(self):
self.function(*self.args, **self.kwargs)
class RepeatingThread(threading.Thread):
"""A thread that repeats the same task over and over."""
def __init__(self, duration=None, task=None):
threading.Thread.__init__(self)
self.setDaemon(True)
self.duration = duration
self.task = task
self._stop = False
self._event = None
def run(self):
if self._stop is False:
self._event = threading.Event()
while True:
self._event.wait(self.duration)
if self._stop is True:
return
self.task()
def stop(self):
self._stop = True
if self._event is not None:
self._event.set()
class WorkerThread(threading.Thread):
"""A thread managed by a thread pool."""
def __init__(self, pool):
threading.Thread.__init__(self)
self.setDaemon(True)
self.pool = pool
self.busy = False
self._started = False
self._event = None
def work(self):
if self._started is True:
if self._event is not None and not self._event.isSet():
self._event.set()
else:
self._started = True
self.start()
def run(self):
while True:
self.busy = True
while len(self.pool._tasks) > 0:
try:
task = self.pool._tasks.pop()
task()
except IndexError:
# Just in case another thread grabbed the task 1st.
pass
# Sleep until needed again
self.busy = False
if self._event is None:
self._event = threading.Event()
else:
self._event.clear()
self._event.wait()
class ThreadPool(object):
"""Executes queued tasks in the background."""
def __init__(self, max_pool_size=10):
self.max_pool_size = max_pool_size
self._threads = []
self._tasks = []
def _addTask(self, task):
self._tasks.append(task)
worker_thread = None
for thread in self._threads:
if thread.busy is False:
worker_thread = thread
break
if worker_thread is None and len(self._threads) <= self.max_pool_size:
worker_thread = WorkerThread(self)
self._threads.append(worker_thread)
if worker_thread is not None:
worker_thread.work()
def addTask(self, function, args=(), kwargs={}):
self._addTask(WorkerTask(function, args, kwargs))
class GlobalThreadPool(object):
"""ThreadPool Singleton class."""
_instance = None
def __init__(self):
"""Create singleton instance """
if GlobalThreadPool._instance is None:
# Create and remember instance
GlobalThreadPool._instance = ThreadPool()
def __getattr__(self, attr):
""" Delegate get access to implementation """
return getattr(self._instance, attr)
def __setattr__(self, attr, val):
""" Delegate set access to implementation """
return setattr(self._instance, attr, val)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/thread_pool.py
|
thread_pool.py
|
import pyamf
import pyamf.remoting as pyamf_remoting
import pyamf.flex.messaging as pyamf_messaging
import amfast
import amfast.class_def as class_def
import amfast.remoting as amfast_remoting
import amfast.remoting.flex_messages as amfast_messaging
class PyAmfConversionError(amfast.AmFastError):
"""Raised when conversion to/from PyAmf datatype fails."""
pass
class PyAmfVersionError(PyAmfConversionError):
"""Raised when installed version of PyAmf is not compatible."""
if pyamf.__version__[1] < 4:
raise PyAmfVersionError('PyAmf version is not compatible.')
#---------- FROM PyAmf TO AmFast -----------#
def packet_to_amfast(pyamf_packet):
"""Converts a PyAmf Envelope to an AmFast Packet.
arguments:
===========
* pyamf_packet - pyamf.remoting.Envelope.
Returns amfast.remoting.Packet
"""
if pyamf_packet.clientType == pyamf.ClientTypes.Flash6:
client_type = amfast_remoting.Packet.FLASH_8
elif pyamf_packet.clientType == pyamf.ClientTypes.FlashCom:
client_type = amfast_remoting.Packet.FLASH_COM
elif pyamf_packet.clientType == pyamf.ClientTypes.Flash9:
client_type = amfast_remoting.Packet.FLASH_9
else:
clientType = amfast_remoting.Packet.FLASH_8
headers = [amfast_remoting.Header(name,
required=pyamf_packet.headers.is_required(name), value=header) \
for name, header in pyamf_packet.headers]
messages = [message_to_amfast(name, body) for name, body in pyamf_packet.bodies]
return amfast_remoting.Packet(client_type=client_type, headers=headers, messages=messages)
def message_to_amfast(name, msg):
"""Converts a PyAmf Request to an AmFast Message
arguments:
===========
* name - string, response
* msg - pyamf.remoting.Request
Returns amfast.remoting.Message
"""
if hasattr(msg, 'target'):
target = msg.target
else:
target = ''
if hasattr(msg, 'status'):
response = name + msg.status
else:
response = name
return amfast_remoting.Message(target=target, response=response, body=msg.body)
#--------- FROM AmFast to PyAmf -----------#
def dummy_callable(obj):
"""A callable that you probably shouldn't be using :)"""
return []
def class_def_alias(class_def):
"""Create a pyamf.ClassAlias object that uses a ClassDef for the actual operations.
arguments:
==========
* class_def - amfast.class_def.ClassDef
Returns pyamf.ClassAlias
"""
metadata = []
if hasattr(class_def, 'DYNAMIC_CLASS_DEF'):
metadata.append('dynamic')
elif hasattr(class_def, 'EXTERNALIZABLE_CLASS_DEF'):
metadata.append('external')
else:
metadata.append('static')
if class_def.amf3 is True:
metadata.append('amf3')
class_alias = ClassDefAlias(class_def.class_, class_def.alias,
attrs=class_def.static_attrs, attr_func=dummy_callable,
metadata=metadata)
class_alias.class_def = class_def
return class_alias
def register_class_def(class_def):
"""Maps a ClassDef to PyAmf.
This provides the functionality of pyamf.register_class,
except it maps a ClassDef.
arguments:
===========
* class_def - amfast.class_def.ClassDef
"""
if class_def.alias in pyamf.CLASS_CACHE:
raise PyAmfConversionError("Alias '%s' is already registered." % class_def.alias)
class_alias = class_def_alias(class_def)
if pyamf.__version__[1] > 4:
pyamf.CLASS_CACHE[class_alias.klass] = class_alias
pyamf.CLASS_CACHE[class_alias.alias] = class_alias
def register_class_mapper(class_mapper):
"""Maps all ClassDefs in a ClassDefMapper to PyAmf.
arguments:
===========
* class_mapper - amfast.class_def.ClassDefMapper
"""
for class_def in class_mapper:
if class_def._built_in is False:
register_class_def(class_def)
def packet_to_pyamf(amfast_packet):
"""Converts an AmFast Packet to a PyAmf Envelope
arguments:
==========
* amfast.remoting.Packet
Returns pyamf.remoting.Evenlope
"""
version = pyamf.AMF0
if amfast_packet.client_type == amfast_remoting.Packet.FLASH_8:
client_type = pyamf.ClientTypes.Flash6
elif amfast_packet.client_type == amfast_remoting.Packet.FLASH_COM:
client_type = pyamf.ClientTypes.FlashCom
elif amfast_packet.client_type == amfast_remoting.Packet.FLASH_9:
client_type = pyamf.ClientTypes.Flash9
else:
client_type = pyamf.ClientTypes.Flash6
packet = pyamf_remoting.Envelope()
packet.amfVersion = version
packet.clientType = client_type
headers = pyamf_remoting.HeaderCollection()
for header in amfast_packet.headers:
headers[header.name] = header.value
if header.required is True:
headers.set_required(header.name, value=True)
else:
headers.set_required(header.name, value=False)
packet.headers = headers
for msg in amfast_packet.messages:
split_target = msg.target.split('/')
pyamf_status = '/' + split_target.pop()
pyamf_target = '/'.join(split_target)
packet[pyamf_target] = message_to_pyamf(msg, packet, pyamf_status)
return packet
def message_to_pyamf(msg, packet, status):
"""Converts an AmFast Message to a PyAmf Response.
arguments:
===========
* msg - amfast.remoting.Message
* packet - pyamf.remoting.Envelope
* status - string
Returns pyamf.remoting.Response
"""
message = pyamf_remoting.Response(msg.body)
message.envelope = packet
for k, v in pyamf_remoting.STATUS_CODES.iteritems():
if v == status:
message.status = k
break
return message
#----------- PyAMF Class Extensions ------------#
# Some extra classes to smooth things along with AmFast.
class ClassDefAlias(pyamf.ClassAlias):
"""A pyamf.ClassAlias that uses an amfast.class_def.ClassDef
on the backend. This class should be instaniated with the
class_def_alias() function.
"""
def checkClass(kls, klass):
# Override parent method, because
# AmFast does not require that mapped
# classes' __init__ methods
# have no required arguments.
pass
def getAttrs(self, obj, *args, **kwargs):
"""Returns attribute names in PyAmf format."""
if hasattr(self.class_def, 'DYNAMIC_CLASS_DEF'):
dynamic_attrs = self.class_def.getDynamicAttrVals(obj).keys()
else:
dynamic_attrs = []
return (self.class_def.static_attrs, dynamic_attrs)
def getAttributes(self, obj, *args, **kwargs):
"""Returns attribute values in PyAmf format."""
if hasattr(self.class_def, 'DYNAMIC_CLASS_DEF'):
dynamic_attrs = self.class_def.getDynamicAttrVals(obj)
else:
dynamic_attrs = {}
static_attrs = {}
static_attr_vals = self.class_def.getStaticAttrVals(obj)
for i in xrange(0, len(self.class_def.static_attrs)):
static_attrs[self.class_def.static_attrs[i]] = static_attr_vals[i]
return (static_attrs, dynamic_attrs)
def applyAttributes(self, obj, attrs, *args, **kwargs):
"""Applies attributes to an instance."""
self.class_def.applyAttrVals(obj, attrs)
def createInstance(self, *args, **kwargs):
"""Returns a new instance of the mapped class."""
return self.class_def.getInstance()
#---- Classes for dealing with ISmallMessage ----#
class DataInputReader(object):
"""A wrapper class for pyamf.amf3.DataInput.
Use this, so we can re-use our existing ISmallMsg reading code.
"""
def __init__(self, data_input):
self.data_input = data_input
def read(self, length):
return self.data_input.stream.read(length)
def readElement(self):
return self.data_input.decoder.readElement()
class PyamfAbstractSmallMsgDef(amfast_messaging.AbstractSmallMsgDef):
"""Decodes ISmallMessages with PyAmf."""
def readExternal(self, obj, data_input):
"""Overridden to use PyAmf instead of AmFast."""
flags = self._readFlags(data_input)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.BODY_FLAG:
obj.body = data_input.readElement()
else:
obj.body = None
if flag & self.CLIENT_ID_FLAG:
obj.clientId = data_input.readElement()
else:
obj.clientId = None
if flag & self.DESTINATION_FLAG:
obj.destination = data_input.readElement()
else:
obj.destination = None
if flag & self.HEADERS_FLAG:
obj.headers = data_input.readElement()
else:
obj.headers = None
if flag & self.MESSAGE_ID_FLAG:
obj.messageId = data_input.readElement()
else:
obj.messageId = None
if flag & self.TIMESTAMP_FLAG:
obj.timestamp = data_input.readElement()
else:
obj.timestamp = None
if flag & self.TIME_TO_LIVE_FLAG:
obj.timeToLive = data_input.readElement()
else:
obj.timeToLive = None
if i == 1:
if flag & self.CLIENT_ID_BYTES_FLAG:
clientIdBytes = data_input.readElement()
obj.clientId = self._readUid(clientIdBytes)
else:
if not hasattr(obj, 'clientId'):
obj.clientId = None
if flag & self.MESSAGE_ID_BYTES_FLAG:
messageIdBytes = data_input.readElement()
obj.messageId = self._readUid(messageIdBytes)
else:
if not hasattr(obj, 'messageId'):
obj.messageId = None
def getInstance(self):
"""
Return a regular AmFast AbstractMessage instead of
the class that has been mapped to this ClassDef.
Kinda tricky. Muuuuhhahahahah
"""
obj = amfast_messaging.AbstractMessage.__new__(amfast_messaging.AbstractMessage)
def readAmf(data_input):
self.readExternal(obj, DataInputReader(data_input))
obj.__readamf__ = readAmf
return obj
class PyamfAsyncSmallMsgDef(amfast_messaging.AsyncSmallMsgDef, PyamfAbstractSmallMsgDef):
def __init__(self, *args, **kwargs):
amfast_messaging.AsyncSmallMsgDef.__init__(self, *args, **kwargs)
def readExternal(self, obj, data_input):
PyamfAbstractSmallMsgDef.readExternal(self, obj, data_input)
flags = self._readFlags(data_input)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.CORRELATION_ID_FLAG:
obj.correlationId = data_input.readElement()
else:
obj.correlationId = None
if flag & self.CORRELATION_ID_BYTES_FLAG:
correlationIdBytes = data_input.readElement()
obj.correlationId = self._readUid(correlationIdBytes)
else:
if not hasattr(obj, 'correlationId'):
obj.correlationId = None
def getInstance(self):
obj = amfast_messaging.AsyncMessage.__new__(amfast_messaging.AsyncMessage)
def readAmf(data_input):
self.readExternal(obj, DataInputReader(data_input))
obj.__readamf__ = readAmf
return obj
class PyamfCommandSmallMsgDef(amfast_messaging.CommandSmallMsgDef, PyamfAsyncSmallMsgDef):
def __init__(self, *args, **kwargs):
amfast_messaging.CommandSmallMsgDef.__init__(self, *args, **kwargs)
def readExternal(self, obj, data_input):
PyamfAsyncSmallMsgDef.readExternal(self, obj, data_input)
flags = self._readFlags(data_input)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.OPERATION_FLAG:
obj.operation = data_input.readElement()
else:
obj.operation = None
def getInstance(self):
obj = amfast_messaging.CommandMessage.__new__(amfast_messaging.CommandMessage)
def readAmf(data_input):
self.readExternal(obj, DataInputReader(data_input))
obj.__readamf__ = readAmf
return obj
# ---- Dummy classes to trick PyAmf into doing what we want. ---#
class SmallAbstractMsg(amfast_messaging.AbstractMessage):
def __readamf__(self, data_input):
raise pyamf.EncodeError("__readamf__ is not implemented for this class: %s." % self)
def __writeamf__(self, data_output):
raise pyamf.EncodeError("__writeamf__ is not implemented for this class: %s." % self)
class SmallAsyncMsg(amfast_messaging.AsyncMessage):
def __readamf__(self, data_input):
raise pyamf.EncodeError("__readamf__ is not implemented for this class: %s." % self)
def __writeamf__(self, data_output):
raise pyamf.EncodeError("__writeamf__ is not implemented for this class: %s." % self)
class SmallCommandMsg(amfast_messaging.CommandMessage):
def __readamf__(self, data_input):
raise pyamf.EncodeError("__readamf__ is not implemented for this class: %s." % self)
def __writeamf__(self, data_output):
raise pyamf.EncodeError("__writeamf__ is not implemented for this class: %s." % self)
#----- Map Flex message classes with PyAmf -----#
# Clear existing message class mappings,
# then re-map with AmFast ClassDefs.
#---- AbstractMessage ---#
try:
pyamf.unregister_class('flex.messaging.messages.AbstractMessage')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.AbstractMessage))
#---- AsyncMessage ----#
try:
pyamf.unregister_class('flex.messaging.messages.AsyncMessage')
except pyamf.UnknownClassAlias:
pass
try:
pyamf.unregister_class('DSA')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.AsyncMessage))
register_class_def(PyamfAsyncSmallMsgDef(SmallAsyncMsg, 'DSA',
('body', 'clientId', 'destination', 'headers', 'messageId',
'timeToLive', 'timestamp', 'correlationId')))
#---- AcknowledgeMessage --#
try:
pyamf.unregister_class('flex.messaging.messages.AcknowledgeMessage')
except pyamf.UnknownClassAlias:
pass
try:
pyamf.unregister_class('DSK')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.AcknowledgeMessage))
#---- CommandMessage ----#
try:
pyamf.unregister_class('flex.messaging.messages.CommandMessage')
except pyamf.UnknownClassAlias:
pass
try:
pyamf.unregister_class('DSC')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.CommandMessage))
register_class_def(PyamfCommandSmallMsgDef(SmallCommandMsg, 'DSC',
('body', 'clientId', 'destination', 'headers', 'messageId',
'timeToLive', 'timestamp', 'correlationId', 'operation')))
#---- ErrorMessage ----#
try:
pyamf.unregister_class('flex.messaging.messages.ErrorMessage')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.ErrorMessage))
#---- RemotingMessage ----#
try:
pyamf.unregister_class('flex.messaging.messages.RemotingMessage')
except pyamf.UnknownClassAlias:
pass
register_class_def(class_def.ClassDef(amfast_messaging.RemotingMessage))
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/pyamf_converter.py
|
pyamf_converter.py
|
import logging
import pyamf
from pyamf import util as pyamf_util
import pyamf.remoting
import amfast
from endpoint import Endpoint
import pyamf_converter as pc
class PyAmfEndpoint(Endpoint):
"""An Endpoint that can encode/decode AMF packets with PyAmf.
How to configure custom class mapping:
# When using the PyAmfEndpoint,
# custom type mapping can be configured
# either through AmFast, or through PyAmf.
# Configure type mapping with AmFast
class_mapper = ClassDefMapper()
#... map classes ...#
# Use pyamf_converter to automatically map classes
# from a ClassDefMapper with PyAmf.
import amfast.remoting.pyamf_converter as pyamf_converter
pyamf_converter.register_class_mapper(class_mapper)
# Configure type mapping directly with PyAmf.
# Use the standard PyAmf way of mapping classes.
pyamf.register_class(klass, 'alias', ....)
"""
def decodePacket(self, raw_packet, *args, **kwargs):
if amfast.log_raw:
self.logRaw('rawDecodePacket', raw_packet)
context = pyamf.get_context(pyamf.AMF0)
pyamf_packet = pyamf.remoting.decode(raw_packet, context)
packet = pc.packet_to_amfast(pyamf_packet)
if amfast.log_debug:
amfast.logger.debug("<decodedPyAmfPacket>%s</decodedPyAmfPacket>" % pyamf_packet)
return packet
def encodePacket(self, packet):
pyamf_packet = pc.packet_to_pyamf(packet)
if amfast.log_debug:
amfast.logger.debug("<encodedPyAmfPacket>%s</encodedPyAmfPacket>" % pyamf_packet)
context = pyamf.get_context(pyamf.AMF0)
stream = pyamf.remoting.encode(pyamf_packet, context)
raw_packet = stream.getvalue()
if amfast.log_raw:
self.logRaw('rawEncodePacket', raw_packet)
return raw_packet
def decode(self, raw_obj, amf3=False):
if amf3 is True:
amf_type = pyamf.AMF3
else:
amf_type = pyamf.AMF0
context = pyamf.get_context(amf_type)
decoder = pyamf.get_decoder(amf_type, raw_obj, context=context)
obj = decoder.readElement()
if amfast.log_raw:
self.logRaw('rawDecodeObject', raw_obj)
return obj
def encode(self, obj, amf3=False):
if amf3 is True:
amf_type = pyamf.AMF3
else:
amf_type = pyamf.AMF0
stream = pyamf_util.BufferedByteStream()
context = pyamf.get_context(amf_type)
encoder = pyamf.get_encoder(amf_type, stream, context=context)
encoder.writeElement(obj)
raw_obj = stream.getvalue()
if amfast.log_raw:
self.logRaw('rawEncodeObject', raw_obj)
return raw_obj
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/pyamf_endpoint.py
|
pyamf_endpoint.py
|
import memcache_manager
from subscription_manager import Subscription, SubscriptionManager
import flex_messages as messaging
class MemcacheSubscriptionManager(SubscriptionManager, memcache_manager.MemcacheManager):
"""Stores all subscription information in memcache."""
TOPIC_ATTR = '_topics'
MSG_ATTR = '_messages'
CONNECTION_ATTR = '_connections'
def __init__(self, secure=False, ttl=30000, mc_servers=['127.0.0.1:11211'], mc_debug=0):
SubscriptionManager.__init__(self, secure=secure, ttl=ttl)
self.mc = self.createMcClient(mc_servers, mc_debug)
self._lock = memcache_manager.MemcacheMutex(self.mc)
def reset(self):
self._lock.releaseAll()
lock_name = self.getLockName('subscription_reset')
self._lock.acquire(lock_name)
try:
topics = self.mc.get(self.TOPIC_ATTR)
if topics is not None:
for topic in topics.iterkeys():
msg_key = self.getKeyName(topic, self.MSG_ATTR)
self.mc.delete(msg_key)
connection_key = self.getKeyName(topic, self.CONNECTION_ATTR)
connections = self.mc.get(connection_key)
for connection_id in connections:
key = self.getKeyName(connection_id, topic)
self.mc.delete(key)
self.mc.delete(connection_key)
self.mc.set(self.TOPIC_ATTR, {})
finally:
self._lock.release(lock_name)
def _cleanTopic(self, topic):
"""Removes un-needed subscription data for a topic."""
msg_key = self.getKeyName(topic, self.MSG_ATTR)
msgs = self.mc.get(msg_key)
connection_key = self.getKeyName(topic, self.CONNECTION_ATTR)
connections = self.mc.get(connection_key)
if (msgs is None or len(msgs) == 0) and \
(connections is None or len(connections) == 0):
lock_name = self.getLockName(self.TOPIC_ATTR)
self._lock.acquire(lock_name)
try:
topics = self.mc.get(self.TOPIC_ATTR)
if topic in topics:
del topics[topic]
self.mc.set(self.TOPIC_ATTR, topics)
if msgs is not None:
self.mc.delete(msg_key)
if connections is not None:
self.mc.delete(connection_key)
finally:
self._lock.release(lock_name)
def _createTopic(self, topic):
lock_name = self.getLockName(self.TOPIC_ATTR)
self._lock.acquire(lock_name)
try:
topics = self.mc.get(self.TOPIC_ATTR)
if topics is None:
topics = {}
topic_map = topics.get(topic, None)
if topic_map is None:
topics[topic] = True
self.mc.set(self.TOPIC_ATTR, topics)
finally:
self._lock.release(lock_name)
def _createTopicMessageQeue(self, topic):
key = self.getKeyName(topic, self.MSG_ATTR)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
messages = self.mc.get(key)
if messages is None:
self.mc.set(key, [])
finally:
self._lock.release(lock_name)
def _createConnectionList(self, topic, connection_id):
key = self.getKeyName(topic, self.CONNECTION_ATTR)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
connections = self.mc.get(key)
if connections is None:
connections = {}
connections[connection_id] = True
self.mc.set(key, connections)
finally:
self._lock.release(lock_name)
def _createClientList(self, topic, connection_id, client_id, subscription):
key = self.getKeyName(connection_id, topic)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
connection_map = self.mc.get(key)
if connection_map is None:
connection_map = {}
connection_map[client_id] = subscription
self.mc.set(key, connection_map)
finally:
self._lock.release(lock_name)
def subscribe(self, connection_id, client_id, topic, sub_topic=None, selector=None):
"""Subscribe a client to a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to subscribe to.
* sub_topic - string, Sub-Topic to subscribe to. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
subscription = Subscription(connection_id=connection_id,
client_id=client_id, topic=topic)
self._createTopic(topic)
self._createTopicMessageQeue(topic)
self._createConnectionList(topic, connection_id)
self._createClientList(topic, connection_id, client_id, subscription)
def unSubscribe(self, connection_id, client_id, topic, sub_topic=None):
"""Un-Subscribe a client from a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to un-subscribe from.
* sub_topic - string, Sub-Topic to un-subscribe from. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
key = self.getKeyName(connection_id, topic)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
connection_map = self.mc.get(key)
if connection_map is None:
# Connection has already been removed.
return
elif len(connection_map) == 1:
# This is the only subscribed client.
# Delete client list.
self.mc.delete(key)
# Delete connection_id from subscription list
sub_key = self.getKeyName(topic, self.CONNECTION_ATTR)
sub_lock_name = self.getLockName(sub_key)
self._lock.acquire(sub_lock_name)
try:
connections = self.mc.get(sub_key)
if connection_id in connections:
del connections[connection_id]
self.mc.set(sub_key, connections)
finally:
self._lock.release(sub_lock_name)
self._cleanTopic(topic)
else:
# Delete single client subscription
if client_id in connection_map:
del connection_map[client_id]
self.mc.set(key, connection_map)
finally:
self._lock.release(lock_name)
def deleteConnection(self, connection):
"""Remove all subscriptions for this connection.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
"""
# Get all subscribed topics
topics = self.mc.get(self.TOPIC_ATTR)
for topic in topics.iterkeys():
# Check if connection is subscribed to a topic.
key = self.getKeyName(connection.id, topic)
if self.mc.get(key) is not None:
# Connection is subscribed to this topic
self.mc.delete(key)
# Delete connection_id from subscription list
sub_key = self.getKeyName(topic, self.CONNECTION_ATTR)
sub_lock_name = self.getLockName(sub_key)
self._lock.acquire(sub_lock_name)
try:
connections = self.mc.get(sub_key)
if connection.id in connections:
del connections[connection.id]
self.mc.set(sub_key, connections)
self._cleanTopic(topic)
finally:
self._lock.release(sub_lock_name)
def iterSubscribers(self, topic, sub_topic=None):
"""Iterate through Flash client ids subscribed to a specific topic."""
topics = self.mc.get(self.TOPIC_ATTR)
for topic in topics.iterkeys():
key = self.getKeyName(topic, self.CONNECTION_ATTR)
connections = self.mc.get(key)
for connection_id in connections.iterkeys():
yield connection_id
def iterConnectionSubscriptions(self, connection):
"""Iterate through all Subscriptions that belong to a specific connection."""
# Get all subscribed topics
topics = self.mc.get(self.TOPIC_ATTR)
for topic in topics.iterkeys():
# Check if connection is subscribed to a topic.
key = self.getKeyName(connection.id, topic)
subscriptions = self.mc.get(key)
if subscriptions is not None:
for subscription in subscriptions.itervalues():
yield subscription
def persistMessage(self, msg):
"""Store a message."""
topic = msg.destination
if hasattr(msg, 'headers') and \
msg.headers is not None and \
messaging.AsyncMessage.SUBTOPIC_HEADER in msg.headers:
sub_topic = msg.headers[messaging.AsyncMessage.SUBTOPIC_HEADER]
else:
sub_topic = None
topic = self.getTopicKey(topic, sub_topic)
# Remove connection data,
# so that it is not pickled
tmp_connection = getattr(msg, 'connection', None)
if tmp_connection is not None:
msg.connection = None
key = self.getKeyName(topic, self.MSG_ATTR)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
messages = self.mc.get(key)
if messages is None:
messages = []
messages.append(msg)
self.mc.set(key, messages)
finally:
self._lock.release(lock_name)
# Restore connection data
if tmp_connection is not None:
msg.connection = tmp_connection
def pollMessages(self, topic, cutoff_time, current_time):
"""Retrieves all queued messages, and discards expired messages.
arguments:
===========
* topic - string, Topic to find messages for.
* cutoff_time - float, epoch time, only messages published
after this time will be returned.
* current_time - float, epoch time, used to determine if a
message is expired.
"""
key = self.getKeyName(topic, self.MSG_ATTR)
lock_name = self.getLockName(key)
self._lock.acquire(lock_name)
try:
msgs = self.mc.get(key)
if msgs is None:
return
msg_count = len(msgs)
set = False
idx = 0
while idx < msg_count:
msg = msgs[idx]
if current_time > (msg.timestamp + msg.timeToLive):
# Remove expired message
msgs.pop(idx)
msg_count -= 1
set = True
else:
idx += 1
if msg.timestamp > cutoff_time:
yield msg
if set is True:
self.mc.set(key, msgs)
except:
self._lock.release(lock_name)
raise
self._lock.release(lock_name)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/memcache_subscription_manager.py
|
memcache_subscription_manager.py
|
import pickle
from google.appengine.ext import db
import amfast
from subscription_manager import Subscription, SubscriptionManager
class GaeSubscription(db.Model, Subscription):
"""A client's subscription to a topic, persisted in a Google Datastore."""
@classmethod
def getKeyName(cls, connection_id, client_id, topic):
return ':'.join((connection_id, client_id, topic))
connection_id = db.StringProperty(required=True)
client_id = db.StringProperty(required=True)
topic = db.StringProperty(required=True)
class GaeMessageBody(db.Model):
"""Flex message body persisted in a Google Datastore."""
p_message = db.BlobProperty(required=True)
class GaeMessageMetadata(db.Model):
"""Flex message metadata persisted in a Google Datastore."""
time_to_live = db.FloatProperty(required=True)
timestamp = db.FloatProperty(required=True)
topic = db.StringProperty(required=True)
message_body = db.ReferenceProperty(reference_class=GaeMessageBody, required=True)
class GaeSubscriptionManager(SubscriptionManager):
"""Stores subscriptions in Google DataStore."""
def reset(self):
query = GaeSubscription.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeMessageMetadata.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeMessageBody.all(keys_only=True)
for result in query:
db.delete(result)
def subscribe(self, connection_id, client_id, topic, sub_topic=None, selector=None):
"""Add a subscription to a topic."""
topic = SubscriptionManager.getTopicKey(topic, sub_topic)
key_name = GaeSubscription.getKeyName(connection_id, client_id, topic)
subscription = GaeSubscription(key_name=key_name,
connection_id=connection_id, client_id=client_id, topic=topic)
subscription.put()
def unSubscribe(self, connection_id, client_id, topic, sub_topic=None):
"""Remove a subscription from a topic."""
topic = SubscriptionManager.getTopicKey(topic, sub_topic)
key_name = GaeSubscription.getKeyName(connection_id, client_id, topic)
subscription = GaeSubscription.get_by_key_name(key_name)
db.delete(subscription)
def deleteConnection(self, connection):
"""Delete connection-subscription information."""
query = GaeSubscription.all(keys_only=True)
query.filter('connection_id = ', connection.id)
db.delete(query)
def persistMessage(self, msg):
"""Save message object."""
# Remove connection data,
# so that it is not pickled
tmp_connection = getattr(msg, 'connection', None)
if tmp_connection is not None:
msg.connection = None
message_body = GaeMessageBody(p_message=pickle.dumps(msg))
message_body.put()
message_data = GaeMessageMetadata(timestamp=msg.timestamp,
time_to_live=float(msg.timeToLive), topic=self.getMessageTopicKey(msg),
message_body=message_body)
message_data.put()
# Restore connection attr.
if tmp_connection is not None:
msg.connection = tmp_connection
def iterConnectionSubscriptions(self, connection):
"""Iterate through all Subscriptions that belong to a specific connection."""
query = GaeSubscription.all()
query.filter('connection_id = ', connection.id)
return query
def iterSubscribers(self, topic, sub_topic=None):
"""Iterate through all connection ids subscribed to a topic."""
topic = SubscriptionManager.getTopicKey(topic, sub_topic)
connection_ids = {} # Keep track of unique IDs.
query = GaeSubscription.all()
query.filter('topic = ', topic)
for subscription in query:
if subscription.connection_id in connection_ids:
continue
connection_ids[subscription.connection_id] = True
yield subscription.connection_id
def pollMessages(self, topic, cutoff_time, current_time):
"""Retrieves all qeued messages, and discards expired messages.
arguments:
===========
* topic - string, Topic to find messages for.
* cutoff_time - float, epoch time, only messages published
after this time will be returned.
* current_time - float, epoch time, used to determine if a
message is expired.
"""
query = GaeMessageMetadata.all()
query.filter('topic = ', topic)
query.filter('timestamp > ', cutoff_time)
query.order('timestamp')
for message_data in query:
yield pickle.loads(message_data.message_body.p_message)
def deleteExpiredMessages(self, cutoff_time):
query = GaeMessageMetadata.all(keys_only=True)
query.filter('timestamp < ', cutoff_time)
for result in query:
db.delete(result)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/gae_subscription_manager.py
|
gae_subscription_manager.py
|
import time
import tornado.web
from tornado.ioloop import IOLoop, PeriodicCallback
import amfast
from amfast.class_def.as_types import AsNoProxy
from channel import ChannelSet, HttpChannel
from endpoint import AmfEndpoint
import flex_messages as messaging
import connection_manager as cm
class TornadoChannelSet(ChannelSet):
def scheduleClean(self):
cleaner = PeriodicCallback(self.clean, self.clean_freq * 1000,
IOLoop.instance())
cleaner.start()
def clean(self):
if amfast.log_debug is True:
amfast.logger.debug("Cleaning channel.")
current_time = time.time()
iter = self.connection_manager.iterConnectionIds()
def _clean():
try:
connection_id = iter.next()
def _callback():
self.cleanConnection(connection_id, current_time)
IOLoop.instance().add_callback(_callback)
IOLoop.instance().add_callback(_clean)
except StopIteration:
pass
_clean()
def notifyConnections(self, topic, sub_topic):
for connection_id in self.subscription_manager.iterSubscribers(topic, sub_topic):
self.notifyConnection(connection_id)
def notifyConnection(self, connection_id):
def _notify():
self._notifyConnection(connection_id)
IOLoop.instance().add_callback(_notify)
def _notifyConnection(self, connection_id):
try:
connection = self.connection_manager.getConnection(connection_id, False)
except cm.NotConnectedError:
return
if connection.notify_func is not None:
IOLoop.instance().add_callback(connection.notify_func)
class CallbackChain(object):
"""Chain together Tornado callbacks.
When a callback completes, the next in line is called with
the output of the previous, until the chain is completed."""
def __init__(self):
self.callbacks = []
def addCallback(self, function, args=[], kwargs={}):
self.callbacks.append({
'function': function,
'args': args,
'kwargs': kwargs
})
def execute(self, arg=None):
"""Execute the callback chain.
arguments:
============
* arg - object, Argument to pass to 1st callback. Default = None
"""
callback_cnt = len(self.callbacks)
if callback_cnt < 1:
return
callback = self.callbacks.pop(0)
def _execute():
result = callback['function'](arg, *callback['args'],
**callback['kwargs'])
if callback_cnt > 1:
self.execute(result)
IOLoop.instance().add_callback(_execute)
class TornadoChannel(HttpChannel):
# This attribute is added to packets
# that are waiting for a long-poll
# to receive a message.
MSG_NOT_COMPLETE = '_msg_not_complete'
# This attribute is added to store
# Tornado's request handler on the packet,
# so that it can be available to targets.
TORNADO_REQUEST = '_tornado_request'
def __init__(self, *args, **kwargs):
HttpChannel.__init__(self, *args, **kwargs)
class requestHandler(tornado.web.RequestHandler):
"""The RequestHandler class for this Channel."""
@tornado.web.asynchronous
def post(inner_self):
"""Process an incoming request."""
self.processRequest(inner_self)
self.request_handler = requestHandler
def processRequest(self, request_handler):
request_handler.set_header('Content-Type', self.CONTENT_TYPE)
call_chain = CallbackChain()
call_chain.addCallback(self.decode)
call_chain.addCallback(self.invoke)
call_chain.addCallback(self.checkComplete, (request_handler,))
call_chain.execute(request_handler)
def decode(self, request_handler):
"""Overridden to add Tornado's request object onto the packet."""
decoded = HttpChannel.decode(self, request_handler.request.body)
setattr(decoded, self.TORNADO_REQUEST, request_handler)
return decoded
def checkComplete(self, response, request_handler):
"""Checks to determine if the response message is ready
to be returned to the client, and finishes the request if ready.
"""
if hasattr(response, self.MSG_NOT_COMPLETE):
# long-poll operation.
# response is waiting for a message to be published.
return
if request_handler._finished is True:
# Someone else already finished the request.
return
if request_handler.request.connection.stream.closed():
# Client is not connected.
return
# Message is complete, encode and return
request_handler.finish(self.encode(response))
def setupPollRequest(self, packet):
"""Setup a request for a long-poll operation."""
# Set flag so self.checkComplete
# does not finish the message.
setattr(packet.response, self.MSG_NOT_COMPLETE, True)
return getattr(packet, self.TORNADO_REQUEST)
def finishPoll(self, request, packet, message, messages):
"""Finish a request that has been waiting for messages."""
if isinstance(packet.channel.endpoint, AmfEndpoint):
# Make sure messages are not encoded as an ArrayCollection
messages = AsNoProxy(messages)
message.response_msg.body.body = messages
if hasattr(packet.response, self.MSG_NOT_COMPLETE):
delattr(packet.response, self.MSG_NOT_COMPLETE)
self.checkComplete(packet.response, request)
def _waitForMessage(self, packet, message, connection):
"""Overridden to be non-blocking."""
request = self.setupPollRequest(packet)
def _notify():
# This function gets called when a message is published,
# or wait_interval is reached.
connection.unSetNotifyFunc()
# Get messages and add them
# to the response message
messages = self.channel_set.subscription_manager.pollConnection(connection)
self.finishPoll(request, packet, message, messages)
# Setup timeout
timeout_call = None
if self.wait_interval > -1:
timeout_call = IOLoop.instance().add_timeout(
time.time() + self.wait_interval, _notify)
def _notifyTimeout():
# Notifies, plus cancels the timeout
if timeout_call is not None:
IOLoop.instance().remove_timeout(timeout_call)
_notify()
connection.setNotifyFunc(_notifyTimeout)
# Remove notify function if client drops connection.
request.request.connection.stream.set_close_callback(connection.unSetNotifyFunc)
return ()
def _pollForMessage(self, packet, message, connection):
"""Overridden to be non-blocking."""
request = self.setupPollRequest(packet)
# Polls for messages every self.poll_interval
poller = PeriodicCallback(None, self.poll_interval, IOLoop.instance())
def _timeout():
# Executed when timeout is reached.
poller.stop()
messages = self.channel_set.subscription_manager.pollConnection(connection)
self.finishPoll(request, packet, message, messages)
if self.wait_interval > -1:
timeout_call = IOLoop.instance().add_timeout(
time.time() + self.wait_interval, _timeout)
else:
timeout_call = None
# Timeout if client drops connection.
request.request.connection.stream.set_close_callback(_timeout)
def _poll():
messages = self.channel_set.subscription_manager.pollConnection(connection)
if len(messages) > 0:
poller.stop()
if timeout_call is not None:
# Disable time out callback
IOLoop.instance().remove_timeout(timeout_call)
self.finishPoll(request, packet, message, messages)
poller.callback = _poll
poller.start()
return ()
class StreamingTornadoChannel(TornadoChannel):
"""Handles streaming http connections."""
def __init__(self, name, max_connections=-1, endpoint=None,
timeout=1200, wait_interval=0, heart_interval=30000):
TornadoChannel.__init__(self, name, max_connections, endpoint,
timeout, wait_interval)
self.heart_interval = heart_interval
def processRequest(self, request_handler):
if request_handler.request.headers['Content-Type'] == self.CONTENT_TYPE:
# Regular AMF message
return TornadoChannel.processRequest(self, request_handler)
request_handler.set_header('Content-Type', self.CONTENT_TYPE)
msg = messaging.StreamingMessage()
msg.parseArgs(request_handler.request.arguments)
if msg.operation == msg.OPEN_COMMAND:
def _open():
self.startStream(msg, request_handler)
IOLoop.instance().add_callback(request_handler.async_callback(_open))
elif msg.operation == msg.CLOSE_COMMAND:
pass
def startStream(self, msg, request_handler):
"""Get this stream rolling!"""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
if self.channel_set.notify_connections is True:
poller = None
else:
# Call _notify multiple times if polling.
poller = PeriodicCallback(None, self.poll_interval, IOLoop.instance())
# Handle new message.
def _notify():
if connection.connected is False:
if poller is not None:
poller.stop()
connection.unSetNotifyFunc()
if request_handler.request.connection.stream.closed() is False:
msg = messaging.StreamingMessage.getDisconnectMsg()
self.sendMsgs((msg,), request_handler)
request_handler.finish()
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
self.sendMsgs(msgs, request_handler)
connection.setNotifyFunc(_notify)
if poller is not None:
poller.callback = _notify
poller.start()
# Handle dropped connection.
def _connectionLost():
if poller is not None:
poller.stop()
self.channel_set.disconnect(connection)
connection.unSetNotifyFunc()
request_handler.request.connection.stream.set_close_callback(_connectionLost)
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
self.sendMsgs((response,), request_handler)
request_handler.write(chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES)
request_handler.flush()
self.startBeat(connection, request_handler)
def sendMsgs(self, msgs, request_handler):
"""Send messages to the client."""
for msg in msgs:
request_handler.write(messaging.StreamingMessage.prepareMsg(msg, self.endpoint))
request_handler.flush()
def startBeat(self, connection, request_handler):
beater = PeriodicCallback(None, self.heart_interval, IOLoop.instance())
def _beat():
if connection.connected is False:
beater.stop()
else:
request_handler.write(chr(messaging.StreamingMessage.NULL_BYTE))
request_handler.flush()
beater.callback = _beat
beater.start()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/tornado_channel.py
|
tornado_channel.py
|
import types
import threading
from django import http
import amfast
from amfast.remoting import Packet
import amfast.remoting.flex_messages as messaging
from amfast.remoting.channel import HttpChannel, ChannelError
def django_response_wrapper(func):
'''
A decorator which wrap a bare response to a DjangoResopnse
'''
def _(channel, django_request):
response_packet = func(channel, django_request)
if response_packet is None:
return http.HttpResponse(mimetype = channel.CONTENT_TYPE)
elif type(response_packet) is types.GeneratorType:
http_response = http.HttpResponse(content=response_packet, mimetype=channel.CONTENT_TYPE)
return http_response
else:
raise ChannelError('Invalid response type.')
return _
class DjangoChannel(HttpChannel):
"""A channel that works with Django."""
# Attribute that holds Django's
# request object, so that it can
# be accessed from a target.
DJANGO_REQUEST = '_django_request'
def __call__(self, http_request):
if http_request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
try:
request_packet = self.decode(http_request.raw_post_data)
setattr(request_packet, self.DJANGO_REQUEST, http_request)
except amfast.AmFastError, exc:
return http.HttpResponseBadRequest(mimetype='text/plain', content=self.getBadEncodingMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
try:
response_packet = self.invoke(request_packet)
raw_response = self.encode(response_packet)
http_response = http.HttpResponse(mimetype=self.CONTENT_TYPE)
http_response['Content-Length'] = str(len(raw_response))
http_response.write(raw_response)
return http_response
except amfast.AmFastError, exc:
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
return http.HttpResponseServerError(mimetype='text/plain', content=self.getBadServerMsg())
class StreamingDjangoChannel(DjangoChannel):
"""Experimental support for streaming with Django."""
def __init__(self, name, max_connections=-1, endpoint=None,
wait_interval=0, heart_interval=30000):
DjangoChannel.__init__(self, name, max_connections=max_connections,
endpoint=endpoint, wait_interval=wait_interval)
self.heart_interval = heart_interval
def __call__(self, http_request):
if http_request.META['CONTENT_TYPE'] == self.CONTENT_TYPE:
return DjangoChannel.__call__(self, http_request)
try:
body = http_request.raw_post_data
msg = messaging.StreamingMessage()
msg.parseBody(body)
#django has a well wrapped http_request object which contents all the wsgi options
msg.parseParams(http_request.META['QUERY_STRING'])
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
if msg.operation == msg.OPEN_COMMAND:
return self.startStream(msg)
elif msg.operation == msg.CLOSE_COMMAND:
return self.stopStream(msg)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
@django_response_wrapper
def startStream(self, msg):
try:
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
raise ChannelError('Http streaming operation unknown: %s' % msg.operation)
try:
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
inited = False
event = threading.Event()
connection.setNotifyFunc(event.set)
poll_secs = float(self.poll_interval) / 1000
while True:
if connection.connected is False:
msg = messaging.StreamingMessage.getDisconnectMsg()
try:
yield messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
finally:
# Client may have already disconnected
return
if inited is False:
# Send acknowledge message
response = msg.acknowledge()
response.body = connection.id
bytes = messaging.StreamingMessage.prepareMsg(response, self.endpoint)
inited = True
bytes += chr(messaging.StreamingMessage.NULL_BYTE) * self.KICKSTART_BYTES
yield bytes
if self.channel_set.notify_connections is True:
# Block until notification of new message
event.wait()
else:
# Block until poll_interval is reached
event.wait(poll_secs)
# Message has been published,
# or it's time for a heart beat
# Remove notify_func so that
# New messages don't trigger event.
connection.unSetNotifyFunc()
msgs = self.channel_set.subscription_manager.pollConnection(connection)
if len(msgs) > 0:
while len(msgs) > 0:
# Dispatch all messages to client
for msg in msgs:
try:
bytes = messaging.StreamingMessage.prepareMsg(msg, self.endpoint)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
break
try:
yield bytes
# return bytes
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
msgs = self.channel_set.subscription_manager.pollConnection(connection)
else:
# Send heart beat
try:
yield chr(messaging.StreamingMessage.NULL_BYTE)
except (KeyboardInterrupt, SystemExit):
raise
except:
# Client has disconnected
self.channel_set.disconnect(connection)
return
# Create new event to trigger new messages or heart beats
event = threading.Event()
connection.setNotifyFunc(event.set)
except (KeyboardInterrupt, SystemExit):
raise
except Exception, exc:
amfast.log_exc(exc)
self.channel_set.disconnect(connection)
return
@django_response_wrapper
def stopStream(self, msg):
"""Stop a streaming connection."""
connection = self.channel_set.connection_manager.getConnection(msg.headers.get(msg.FLEX_CLIENT_ID_HEADER))
connection.disconnect()
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
@django_response_wrapper
def beat(self, connection):
"""Send a heart beat."""
if hasattr(connection, "notify_func") and connection.notify_func is not None:
connection.notify_func()
else:
return
# Create timer for next beat
timer = threading.Timer(float(self.heart_interval) / 1000, self.beat, (connection, ))
timer.daemon = True
timer.start()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/django_channel.py
|
django_channel.py
|
import time
import cPickle as pickle
import sqlalchemy as sa
from sqlalchemy.sql import func, and_
if sa.__version__.startswith('0.5'):
# 0.5 is lowest supported version
BINARY_TYPE = sa.Binary
else:
BINARY_TYPE = sa.LargeBinary
from connection import Connection
from connection_manager import NotConnectedError, ConnectionManager, SessionAttrError
class SaConnectionManager(ConnectionManager):
"""Manages connections in a database, uses SqlAlchemy to talk to the DB."""
def __init__(self, engine, metadata, connection_class=Connection, connection_params=None,
table_prefix=''):
ConnectionManager.__init__(self, connection_class=connection_class,
connection_params=connection_params)
self.engine = engine
self.metadata = metadata
self.table_prefix = table_prefix and "%s_" % table_prefix.rstrip('_') or table_prefix
self.mapTables()
def reset(self):
db = self.getDb()
db.execute(self.session_attrs.delete())
db.execute(self.connections.delete())
db.close()
def mapTables(self):
self.connections = sa.Table('%sconnections' % self.table_prefix, self.metadata,
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('channel_name', sa.String(128), nullable=False),
sa.Column('timeout', sa.Float(), nullable=False),
sa.Column('connected', sa.Boolean(), nullable=False),
sa.Column('last_active', sa.Float(), nullable=False),
sa.Column('last_polled', sa.Float(), nullable=False, default=0.0),
sa.Column('authenticated', sa.Boolean(), nullable=False, default=False),
sa.Column('flex_user', sa.String(128), nullable=True),
sa.Column('notify_func_id', sa.Integer(), nullable=True)
)
self.session_attrs = sa.Table('%ssession_attrs' % self.table_prefix, self.metadata,
sa.Column('connection_id', sa.String(36),
sa.ForeignKey('%sconnections.id' % self.table_prefix),
primary_key=True, index=True),
sa.Column('name', sa.String(128), primary_key=True),
sa.Column('value', BINARY_TYPE(), nullable=False)
)
def createTables(self):
db = self.getDb()
self.connections.create(db, checkfirst=True)
self.session_attrs.create(db, checkfirst=True)
db.close()
def getDb(self):
return self.engine.connect()
def loadConnection(self, connection_id):
s = sa.select([self.connections.c.channel_name, self.connections.c.timeout],
self.connections.c.id==connection_id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
if row is None:
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
return self.connection_class(self, row[self.connections.c.channel_name],
connection_id, row[self.connections.c.timeout])
def initConnection(self, connection, channel):
ins = self.connections.insert().values(
id=connection.id,
channel_name=connection.channel_name,
timeout=connection.timeout,
connected=True,
last_active=time.time() * 1000,
last_polled=0.0,
authenticated=False,
)
db = self.getDb()
db.execute(ins)
db.close()
def getConnectionCount(self, channel_name):
s = sa.select([sa.sql.func.count(self.connections.c.id)])
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
return row[0]
def iterConnectionIds(self):
s = sa.select([self.connections.c.id])
db = self.getDb()
result = db.execute(s)
for row in result:
yield row[self.connections.c.id]
db.close()
# --- proxies for connection properties --- #
def getConnected(self, connection):
s = sa.select([self.connections.c.connected], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row =result.fetchone()
db.close()
if row is None:
return False
return row[self.connections.c.connected]
def getLastActive(self, connection):
s = sa.select([self.connections.c.last_active], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
return row[self.connections.c.last_active]
def getLastPolled(self, connection):
s = sa.select([self.connections.c.last_polled], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
return row[self.connections.c.last_polled]
def getAuthenticated(self, connection):
s = sa.select([self.connections.c.authenticated], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
return row[self.connections.c.authenticated]
def getFlexUser(self, connection):
s = sa.select([self.connections.c.flex_user], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
return row[self.connections.c.flex_user]
def getNotifyFunc(self, connection):
s = sa.select([self.connections.c.notify_func_id], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
if row is None:
return None
notify_func_id = notify_func_id = row[self.connections.c.notify_func_id]
if notify_func_id is None:
return None
return connection._getNotifyFuncById(notify_func_id)
# --- proxies for connection methods --- #
def deleteConnection(self, connection):
d = self.connections.delete().\
where(self.connections.c.id==connection.id)
db = self.getDb()
db.execute(d)
db.close()
ConnectionManager.deleteConnection(self, connection)
def connectConnection(self, connection):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(connected=True)
db = self.getDb()
db.execute(u)
db.close()
def disconnectConnection(self, connection):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(connected=False)
db = self.getDb()
db.execute(u)
db.close()
def touchConnection(self, connection):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(last_active=time.time() * 1000)
db = self.getDb()
db.execute(u)
db.close()
def touchPolled(self, connection):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(last_polled=time.time() * 1000)
db = self.getDb()
db.execute(u)
db.close()
def authenticateConnection(self, connection, user):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(authenticated=True, flex_user=user)
db = self.getDb()
db.execute(u)
db.close()
def unAuthenticateConnection(self, connection):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(authenticated=False, flex_user=None)
db = self.getDb()
db.execute(u)
db.close()
def setNotifyFunc(self, connection, func):
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(notify_func_id=connection._setNotifyFunc(func))
db = self.getDb()
db.execute(u)
db.close()
def unSetNotifyFunc(self, connection):
s = sa.select([self.connections.c.notify_func_id], self.connections.c.id==connection.id)
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
if row is None:
return
connection._delNotifyFunc(row[self.connections.c.notify_func_id])
u = self.connections.update().\
where(self.connections.c.id==connection.id).\
values(notify_func_id=None)
db.execute(u)
db.close()
def getConnectionSessionAttr(self, connection, name):
s = sa.select([self.session_attrs.c.value],
and_(self.session_attrs.c.connection_id==connection.id,
self.session_attrs.c.name==name))
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
db.close()
if row is None:
raise SessionAttrError("Attribute '%s' not found." % name)
return pickle.loads(str(row[self.session_attrs.c.value]))
def setConnectionSessionAttr(self, connection, name, val):
s = sa.select([self.session_attrs.c.connection_id],
and_(self.session_attrs.c.connection_id==connection.id,
self.session_attrs.c.name==name))
db = self.getDb()
result = db.execute(s)
row = result.fetchone()
if row is None:
statement = self.session_attrs.insert().values(
connection_id=connection.id,
name=name, value=pickle.dumps(val))
else:
statement = self.connections.update().\
where(and_(self.session_attrs.c.connection_id==connection.id,
self.session_attrs.c.name==name)).\
values(value=pickle.dumps(val))
db.execute(statement)
db.close()
def delConnectionSessionAttr(self, connection, name):
d = self.session_attrs.delete().\
where(and_(self.session_attrs.c.connection_id==connection.id,
self.session_attrs.c.name==name))
db = self.getDb()
db.execute(d)
db.close()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/sa_connection_manager.py
|
sa_connection_manager.py
|
import collections
import threading
import amfast
from amfast import AmFastError, class_def
from amfast.class_def.as_types import AsError
class RemotingError(AmFastError):
"""Remoting related errors."""
pass
class Service(object):
"""A remoting service is a service that is exposed
by an amfast.remoting.channel.Channel to AMF clients.
attributes:
============
* name - string, service name.
"""
# Name of special service that handles packet header targets
PACKET_HEADER_SERVICE = 'PACKET_HEADER_SERVICE'
# Name of special service that handles command messages
COMMAND_SERVICE = 'COMMAND_SERVICE'
# Name of special service that handles targets without a service prefix
DEFAULT_SERVICE = 'DEFAULT_SERVICE'
SEPARATOR = '.' # Character used to separate service names and target names
def __init__(self, name):
self.name = name
self._lock = threading.RLock()
self._targets = {} # Keeps track of targets internally
def __iter__(self):
return self._targets.itervalues()
def mapTarget(self, target):
"""Add a target to the service."""
self._lock.acquire()
try:
self._targets[target.name] = target
finally:
self._lock.release()
def unMapTarget(self, target):
"""Remove a target from the service."""
self._lock.acquire()
try:
if target.name in self._targets:
del self._targets[target.name]
finally:
self._lock.release()
def getTarget(self, target_name):
"""Get a target from the service by name."""
self._lock.acquire()
try:
target = self._targets.get(target_name, None)
finally:
self._lock.release()
return target
class Target(object):
"""A remoting target can be invoked by an RPC message received from a client.
attributes:
============
* name - string, name of the target.
* secure - boolean, True to require login.
"""
@classmethod
def mapObject(cls, service, obj, secure=False, map_private=False,\
map_special=False):
"""Maps all instance methods of an object.
arguments:
============
* service - Service, the service to map to.
* obj - object, the object instance to map methods from.
* secure - boolean, True to require login. Default = False.
* map_private - boolean, True to map methods starting with '_'.
Default = False
* map_special - boolean, True to map methods starting with '__'.
Default = False
"""
for attr in dir(obj):
if map_special is False and attr.startswith('__'):
continue
if map_private is False and attr.startswith('_'):
continue
val = getattr(obj, attr, None)
if isinstance(val, collections.Callable):
service.mapTarget(cls(callable=val, name=attr, secure=secure))
def __init__(self, name, secure=False):
self.name = name
self.secure = secure
def _invokeStr(self, args):
return "<targetInvocation target=\"%s\">%s</targetInvocation>" % \
(self.name, args)
def invoke(self, packet, msg, args):
"""Invoke a target.
arguments
==========
* packet - Packet, Packet that is invoking the target.
* msg - Message, the message that is invoking this target.
This value will be None if the target is being invoked by a package header.
* args - list, list of arguments to pass to the callable.
"""
raise RemotingError("'invoke' must be implemented on a sub-class.")
class CallableTarget(Target):
"""Calls an external callable with the passed arguments when invoked.
attributes:
============
* name - string, name of the target.
* secure - boolean, True to require login.
* callable - callable, a callable that can be invoked.
"""
def __init__(self, callable, name, secure=False):
Target.__init__(self, name, secure)
self.callable = callable
def invoke(self, packet, msg, args):
"""Calls self.callable and passes *args."""
if amfast.log_debug:
amfast.logger.debug(self._invokeStr(args))
return self.callable(*args)
class ExtCallableTarget(CallableTarget):
"""Calls an external callable with the the same arguments that invoke receives.
attributes:
============
* name - string, name of the target.
* secure - boolean, True to require login.
* callable - callable, a callable that can be invoked.
"""
def invoke(self, packet, msg, args):
if amfast.log_debug:
amfast.logger.debug(self._invokeStr(args))
return self.callable(packet, msg, *args)
class Header(object):
"""A remoting message header.
attributes:
============
* name - string, header name.
* required - bool, True if header is required.
* value - object, header value.
"""
def __init__(self, name, required=False, value=None):
self.name = name
self.required = required
self.value = value
def __str__(self):
return "<header name=\"%s\" required=\"%s\">%s</header>" % (self.name, self.required, self.value)
def invoke(self, request):
"""Invoke an action on this header if one has been mapped."""
target = request.channel.channel_set.service_mapper.\
packet_header_service.getTarget(self.name)
if target is not None:
return target.invoke(request, None, (self.value,))
return False
class Message(object):
"""A remoting message body.
attributes:
============
* target - Target, the target to be invoked.
* response - string, message id.
* body - object, message body.
"""
SUCCESS_TARGET = '/onResult'
FAILED_TARGET = '/onStatus'
DEBUG_TARGET = '/onDebugEvents'
def __init__(self, target=None, response=None, body=None):
self.target = target
self.response = response
self.body = body
def _isInvokable(self):
"""If True, the message's body can invoke itself."""
if self.target == 'null':
return True
return False
is_invokable = property(_isInvokable)
def _isFlexMsg(self):
"""If True, the message's body is a Flex message."""
return hasattr(self.body, 'FLEX_CLIENT_ID_HEADER')
is_flex_msg = property(_isFlexMsg)
def invoke(self, request):
"""Invoke an action on an RPC message and return a response message."""
try:
self.response_msg = self.acknowledge(request)
if self.is_invokable:
self.body[0].invoke(request, self)
elif self.target is not None and self.target != '':
self._invoke(request)
else:
raise RemotingError("Cannot invoke message: '%s'." % self)
except Exception, exc:
amfast.log_exc(exc)
self.response_msg = self.fail(request, exc)
return self.response_msg
def _invoke(self, request):
"""Invoke an action on an AMF0 style RPC message."""
qualified_name = self.target.split(Service.SEPARATOR)
if len(qualified_name) < 2:
target_name = self.target
service_name = Service.DEFAULT_SERVICE
else:
target_name = qualified_name.pop()
service_name = Service.SEPARATOR.join(qualified_name)
target = request.channel.channel_set.service_mapper.getTarget(service_name, target_name)
if target is None:
raise RemotingError("Target '%s' not found." % self.target)
if target.secure is True:
# Make sure user is authenticated
if not hasattr(request, '_authenticated'):
raise RemotingError('Target requires authentication.');
self.response_msg.body = target.invoke(request, self, self.body)
def fail(self, request, exc):
"""Return an error response message."""
response_target = self.response + self.FAILED_TARGET
response_message = Message(target=response_target, response='')
if self.is_invokable:
error_val = self.body[0].fail(request, self, exc)
else:
error_val = AsError(exc=exc)
response_message.body = error_val
return response_message
def convertFail(self, exc):
"""Convert a successful message into a failure."""
self.target = self.target.replace(self.SUCCESS_TARGET, self.FAILED_TARGET)
if self.is_flex_msg:
self.body = self.body.convertFail(exc=exc)
else:
self.body = AsError(exc=exc)
def acknowledge(self, request):
"""Return a successful response message to acknowledge an RPC message."""
response_target = self.response + self.SUCCESS_TARGET
response_message = Message(target=response_target, response='')
if self.is_invokable:
response_message.body = self.body[0].acknowledge(request, self)
return response_message
def __str__(self):
return "<message> <target>%s</target> <response>%s</response> <body>%s</body></message>" % (self.target, self.response, self.body)
class Packet(object):
"""An AMF NetConnection packet that can be passed from client->server or server->client.
attributes:
============
* client_type - string, the type of client connected to the server.
* headers - dict, keys = header names, values = Header objects.
* messages - list, a list of messages that belong to the packet.
"""
FLASH_8 = 0x00
FLASH_COM = 0x01
FLASH_9 = 0x03
def __init__(self, client_type=None, headers=None, messages=None):
if client_type is None:
client_type = self.FLASH_8
self.client_type = client_type
if headers is None:
headers = []
self.headers = headers
if messages is None:
messages = []
self.messages = messages
def _getAmf3(self):
if self.client_type == self.FLASH_9:
return True
return False
is_amf3 = property(_getAmf3)
def invoke(self):
"""Process an RPC packet and return a response packet."""
if amfast.log_debug:
amfast.logger.debug("<requestPacket>%s</requestPacket>" % self)
self.response = self.acknowledge()
try:
# Invoke any headers
for header in self.headers:
header.invoke(self)
# Invoke any messages
for message in self.messages:
self.response.messages.append(message.invoke(self))
except Exception, exc:
# Fail all messages
amfast.log_exc(exc)
self.response = self.fail(exc)
if amfast.log_debug:
amfast.logger.debug("<responsePacket>%s</responsePacket>" % self.response)
return self.response
def fail(self, exc):
"""Return a response Packet with all messages failed."""
response = self.acknowledge()
for message in self.messages:
response.messages.append(message.fail(self, exc))
return response
def acknowledge(self):
"""Create a response to this packet."""
response = Packet()
response.client_type = self.client_type
return response
def __str__(self):
header_msg = "\n ".join(["%s" % header for header in self.headers])
message_msg = "\n ".join(["%s" % message for message in self.messages])
return """
<Packet>
<headers>
%s
</headers>
<messages>
%s
</messages>
<attributes>
<attr name="client_type">%s</attr>
</attributes>
</Packet>
""" % (header_msg, message_msg, self.client_type)
class ServiceMapper(object):
"""Maps service to service name.
attributes
===========
* packet_header_service - Service, a special service for AMF packet headers.
* message_header_service - Service, a special service for AMF message headers.
* command_service - Service, a special service for Flex CommandMessages.
* default_service - Service, a special service for targets that don't have service specifiers.
When an AMF packet or message is processed, the object checks
the header_service Service for a target where target.name == header.name
for each header. If a target is found, the target will be invoked
before any packet.messages are invoked.
An example of how to use this functionality is adding a target named 'Credentials'
to packet_header_service that checks credentials stored in the 'Credentials' header before
invoking any messages in the packet.
"""
def __init__(self):
self._services = {} # used internally to keep track of Service objects.
self._mapBuiltIns()
self._lock = threading.RLock()
def __iter__(self):
return self._services.itervalues()
def _mapBuiltIns(self):
"""Map default Targets required for Authentication and FlexMessaging.
Users can override the defaults, by remapping their own targets.
"""
import amfast.remoting.flex_messages as messaging
import targets
# Map built in targets
self.packet_header_service = Service(Service.PACKET_HEADER_SERVICE)
self.mapService(self.packet_header_service)
self.command_service = Service(Service.COMMAND_SERVICE)
self.mapService(self.command_service)
self.default_service = Service(Service.DEFAULT_SERVICE)
self.mapService(self.default_service)
# NetConnection authentication
self.packet_header_service.mapTarget(ExtCallableTarget(targets.nc_auth,
'Credentials'))
# CommandMessages
self.command_service.mapTarget(ExtCallableTarget(targets.client_ping,
messaging.CommandMessage.CLIENT_PING_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.login_operation,
messaging.CommandMessage.LOGIN_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.logout_operation,
messaging.CommandMessage.LOGOUT_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.poll_operation,
messaging.CommandMessage.POLL_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.subscribe_operation,
messaging.CommandMessage.SUBSCRIBE_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.unsubscribe_operation,
messaging.CommandMessage.UNSUBSCRIBE_OPERATION))
self.command_service.mapTarget(ExtCallableTarget(targets.disconnect_operation,
messaging.CommandMessage.DISCONNECT_OPERATION))
def mapService(self, service):
"""Maps a service
arguments
==========
* service - Service, the service to map.
"""
self._services[service.name] = service
def unMapService(self, service):
"""Un-maps a service
arguments
==========
* service - Service, the service to un-map.
"""
self._lock.acquire()
try:
if service.name in self._services:
del self._services[service.name]
finally:
self._lock.release()
def getTarget(self, service_name, target_name):
"""Get a Target
Returns None in Target is not found.
arguments
==========
* service_name - string, the service name.
* target_name - string, the target name.
"""
self._lock.acquire()
try:
service = self.getService(service_name)
if service is None:
target = None
else:
target = service.getTarget(target_name)
finally:
self._lock.release()
return target
def getService(self, service_name):
"""Get a Service
Returns None in Service is not found.
arguments
==========
* service_name - string, the service name.
"""
return self._services.get(service_name, None)
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/__init__.py
|
__init__.py
|
import pickle
import random
import time
from google.appengine.ext import db
from connection import Connection, ConnectionError
from connection_manager import ConnectionManager, NotConnectedError, SessionAttrError
class GaeConnection(Connection):
"""A connection stored in Google Datastore."""
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
self.model = None
class GaeConnectionLastActive(db.Model):
value = db.FloatProperty(required=True)
class GaeConnectionConnected(db.Model):
value = db.BooleanProperty(required=True)
class GaeConnectionLastPolled(db.Model):
value = db.FloatProperty(required=True)
class GaeConnectionAuthentication(db.Model):
authenticated = db.BooleanProperty(required=True)
flex_user = db.StringProperty(required=False)
class GaeConnectionSession(db.Model):
value = db.BlobProperty(required=True)
class GaeConnectionModel(db.Model):
"""Connection data that is stored in a Google Datastore."""
# These values never change
channel_name = db.StringProperty(required=True)
timeout = db.IntegerProperty(required=True)
# Every changeable property is it's own model,
# So we can update each property
# without affecting any other.
# Referenced properties.
last_active = db.ReferenceProperty(reference_class=GaeConnectionLastActive, required=True)
connected = db.ReferenceProperty(reference_class=GaeConnectionConnected, required=True)
last_polled = db.ReferenceProperty(reference_class=GaeConnectionLastPolled, required=True)
authentication = db.ReferenceProperty(reference_class=GaeConnectionAuthentication, required=False)
session = db.ReferenceProperty(reference_class=GaeConnectionSession, required=False)
class GaeChannelModel(db.Model):
"""Channel data that is stored in a Google Datastore."""
# Shard counter into multiple entities to avoid conflicts.
NUM_SHARDS = 20
name = db.StringProperty(required=True)
count = db.IntegerProperty(required=True)
class GaeConnectionManager(ConnectionManager):
"""Manages connections stored by Google DataStore.
attributes
=============
* touch_time - int, minimum time in milliseconds between writes to 'last_active' field.
"""
def __init__(self, connection_class=GaeConnection, connection_params=None, touch_time=10000):
ConnectionManager.__init__(self, connection_class=connection_class,
connection_params=connection_params)
# Reduces number of writes to 'last_active' field.
self.touch_time = touch_time
def reset(self):
query = GaeConnectionModel.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeConnectionLastActive.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeConnectionConnected.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeConnectionLastPolled.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeConnectionAuthentication.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeConnectionSession.all(keys_only=True)
for result in query:
db.delete(result)
query = GaeChannelModel.all(keys_only=True)
for result in query:
db.delete(result)
def _getChannelShardName(self, channel_name):
index = random.randint(0, GaeChannelModel.NUM_SHARDS - 1)
return ":".join((channel_name, str(index)))
def _incrementChannelCount(self, channel_name):
shard_name = self._getChannelShardName(channel_name)
counter = GaeChannelModel.get_by_key_name(shard_name)
if counter is None:
counter = GaeChannelModel(key_name=shard_name,
name=channel_name, count=0)
counter.count += 1
counter.put()
def _decrementChannelCount(self, channel_name):
shard_name = self._getChannelShardName(channel_name)
counter = GaeChannelModel.get_by_key_name(shard_name)
if counter is None:
counter = GaeChannelModel(key_name=shard_name,
name=channel_name, count=0)
counter.count -= 1
counter.put()
def getConnectionCount(self, channel_name):
query = GaeChannelModel.all()
query.filter('name = ', channel_name)
total = 0
for result in query:
total += result.count
return total
def loadConnection(self, connection_id):
connection_model = GaeConnectionModel.get_by_key_name(connection_id)
if connection_model is None:
raise NotConnectedError("Connection '%s' is not connected." % connection_id)
connection = self.connection_class(self, connection_model.channel_name,
connection_id, timeout=connection_model.timeout)
connection.model = connection_model
return connection
def initConnection(self, connection, channel):
last_active = GaeConnectionLastActive(key_name=connection.id,
value=(time.time() * 1000))
last_active.put()
connected = GaeConnectionConnected(key_name=connection.id, value=True)
connected.put()
last_polled = GaeConnectionLastPolled(key_name=connection.id, value=0.0)
last_polled.put()
params = {
'key_name': connection.id,
'channel_name': connection.channel_name,
'timeout': connection.timeout,
'connected': connected,
'last_active': last_active,
'last_polled': last_polled
}
connection_model = GaeConnectionModel(**params)
connection_model.put()
db.run_in_transaction(self._incrementChannelCount,
connection.channel_name)
connection.model = connection_model
def iterConnectionIds(self):
query = GaeConnectionModel.all(keys_only=True)
for key in query:
yield key.name()
# --- proxies for connection properties --- #
def getConnected(self, connection):
if connection.model is None:
return False
else:
return connection.model.connected.value
def getLastActive(self, connection):
return connection.model.last_active.value
def getLastPolled(self, connection):
return connection.model.last_polled.value
def getAuthenticated(self, connection):
if connection.model.authentication is None:
return False
return connection.model.authentication.authenticated
def getFlexUser(self, connection):
if connection.model.authentication is None:
return None
return connection.model.authentication.flex_user
def getNotifyFunc(self, connection):
return None
# --- proxies for connection methods --- #
def deleteConnection(self, connection):
# Delete referenced properties 1st
db.delete(GaeConnectionModel.last_active.get_value_for_datastore(connection.model))
db.delete(GaeConnectionModel.connected.get_value_for_datastore(connection.model))
db.delete(GaeConnectionModel.last_polled.get_value_for_datastore(connection.model))
# Optional referenced properties
authentication_key = GaeConnectionModel.authentication.get_value_for_datastore(connection.model)
if authentication_key is not None:
db.delete(authentication_key)
session_key = GaeConnectionModel.session.get_value_for_datastore(connection.model)
if session_key is not None:
db.delete(session_key)
# Delete connection
connection.model.delete()
connection.model = None
ConnectionManager.deleteConnection(self, connection)
db.run_in_transaction(self._decrementChannelCount, connection.channel_name)
def connectConnection(self, connection):
if connection.model is not None:
connected = GaeConnectionConnected(key_name=connection.id, value=True)
connected.put()
connection.model.connected = connected
def disconnectConnection(self, connection):
if connection.model is not None:
connected = GaeConnectionConnected(key_name=connection.id, value=False)
connected.put()
connection.model.connected = connected
def touchConnection(self, connection):
if connection.model is not None:
now = time.time() * 1000
diff = now - connection.model.last_active.value
connection.model.last_active.value = now
if diff > self.touch_time:
# last_active value is only written periodically
# to save time writing to data_store.
connection.model.last_active.put()
def touchPolled(self, connection):
self.softTouchPolled(connection);
if connection.model is not None:
connection.model.last_polled.put()
def softTouchPolled(self, connection):
if connection.model is not None:
connection.model.last_polled.value = time.time() * 1000
def authenticateConnection(self, connection, user):
if connection.model is not None:
if connection.model.authentication is None:
authentication = GaeConnectionAuthentication(
key_name=connection.id, authenticated=True, flex_user=user)
authentication.put()
connection.model.authentication = authentication
connection.model.put()
else:
connection.model.authentication.authenticated = True
connection.model.authentication.flex_user = user
connection.model.authentication.authenticated.put()
def unAuthenticateConnection(self, connection):
if connection.model is not None:
if connection.model.authentication is not None:
connection.model.authentication.authenticated = False
connection.model.authentication.flex_user = None
connection.model.authentication.put()
def initSession(self, connection):
if connection.model is not None:
if connection.model.session is None or \
connection.model.session.value is None:
connection._session = {}
else:
connection._session = pickle.loads(connection.model.session.value)
def saveSession(self, connection):
if connection.model is not None:
value = pickle.dumps(connection._session)
if connection.model.session is None:
session = GaeConnectionSession(key_name=connection.id, value=value)
session.put()
connection.model.session = session
connection.model.put()
else:
connection.model.session.value = value
connection.model.session.put()
def getConnectionSessionAttr(self, connection, name):
self.initSession(connection)
try:
return connection._session[name]
except KeyError:
raise SessionAttrError("Attribute '%s' not found." % name)
def setConnectionSessionAttr(self, connection, name, val):
self.initSession(connection)
connection._session[name] = val
self.saveSession(connection)
def delConnectionSessionAttr(self, connection, name):
self.initSession(connection)
try:
del connection._session[name]
self.saveSession(connection)
except KeyError:
pass
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/gae_connection_manager.py
|
gae_connection_manager.py
|
import uuid
import time
import cgi
import amfast
from amfast import class_def, remoting
from amfast.class_def.as_types import AsError
try:
# Use decode module if available.
# Users may be using PyAmf instead.
from amfast.decode import decode
except ImportError:
pass
class FlexMessageError(remoting.RemotingError):
"""Errors raised by this module."""
pass
class FaultError(AsError):
"""Equivalent to mx.rpc.Fault."""
def __init__(self, message='', exc=None, detail='', content=None):
AsError.__init__(self, message, exc)
self.faultCode = self.name
self.faultString = self.message
self.faultDetail = detail
self.rootCause = exc
self.content = content
class_def.assign_attrs(FaultError, 'mx.rpc.Fault',
('errorId', 'name', 'message', 'faultCode',
'faultString', 'faultDetail', 'rootCause', 'content'), True)
class AbstractMessage(object):
"""Base class for all FlexMessages."""
DESTINATION_CLIENT_ID_HEADER = 'DSDstClientId'
ENDPOINT_HEADER = 'DSEndpoint'
FLEX_CLIENT_ID_HEADER = 'DSId'
PRIORITY_HEADER = 'DSPriority'
REMOTE_CREDENTIALS_CHARSET_HEADER = 'DSRemoteCredentialsCharset'
REMOTE_CREDENTIALS_HEADER = 'DSRemoteCredentials'
REQUEST_TIMEOUT_HEADER = 'DSRequestTimeout'
STATUS_CODE_HEADER = 'DSStatusCode'
def __init__(self, body=None, clientId=None, destination=None,
headers=None, timeToLive=None, timestamp=None, messageId=None):
self.body = body
self.clientId = clientId
self.destination = destination
self.timeToLive = timeToLive
if headers is not None:
self.headers = headers
if timestamp is None:
timestamp = time.time() * 1000
self.timestamp = timestamp
if messageId is None:
messageId = self._getId()
self.messageId = messageId
def invoke(self, packet, msg):
"""Invoke all message headers."""
if amfast.log_debug:
amfast.logger.debug("\nInvoking FlexMessage:\n%s" % self)
def fail(self, packet, msg, exc):
"""Return an error message."""
fault = FaultError(exc=exc)
response = ErrorMessage(exc=fault)
self._matchAcknowledge(packet, msg, response)
return response
def convertFail(self, exc):
"""Convert this message to an error."""
fault = FaultError(exc=exc)
headers = getattr(self, 'headers', None)
return ErrorMessage(self, clientId=self.clientId, destination=self.destination,
headers=headers, timeToLive=self.timeToLive, timestamp=self.timestamp,
messageId=self.messageId, correlationId=self.correlationId, exc=fault)
def getAcknowledgeClass(self):
"""Returns the correct class for the response message."""
return AcknowledgeMessage
def acknowledge(self, packet, msg):
"""Return a successful result message."""
class_ = self.getAcknowledgeClass()
response = class_()
self._matchAcknowledge(packet, msg, response)
return response
def _matchAcknowledge(self, packet, msg, response):
"""Syncs values between this message and it's response acknowledgement."""
response.correlationId = self.messageId
if self.clientId is None:
self.clientId = packet.channel.channel_set.connection_manager.generateId()
response.clientId = self.clientId
def _getId(self):
"""Get a messageId or clientId."""
return str(uuid.uuid4())
def __str__(self):
header_str = ''
if hasattr(self, 'headers') and self.headers is not None:
header_str = '\n '.join(["<header name=\"%s\">%s</header>" % (key, val) for key, val in self.headers.iteritems()])
attrs = {}
for key, val in self.__dict__.iteritems():
if key == 'body':
continue
if key == 'headers':
continue
attrs[key] = val
attrs_str = '\n '.join(["<attr name=\"%s\">%s</attr>" % (key, val) for key, val in attrs.iteritems()])
str = """
<FlexMessage: %s>
<headers>
%s
</headers>
<body>
%s
</body>
<attributes>
%s
</attributes>
</FlexMessage>
""" % (self.__class__.__name__, header_str, self.body, attrs_str)
return str
class_def.assign_attrs(AbstractMessage, 'flex.messaging.messages.AbstractMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive'), True)
class AbstractSmallMsgDef(class_def.ExternClassDef):
"""Encodes and decodes messages using ISmallMessage.
ISmallMessages use a more compact representation
of mx.messaging.messages.
"""
HAS_NEXT_FLAG = 0x80
BODY_FLAG = 0x01
CLIENT_ID_FLAG = 0x02
DESTINATION_FLAG = 0x04
HEADERS_FLAG = 0x08
MESSAGE_ID_FLAG = 0x10
TIMESTAMP_FLAG = 0x20
TIME_TO_LIVE_FLAG = 0x40
CLIENT_ID_BYTES_FLAG = 0x01
MESSAGE_ID_BYTES_FLAG = 0x02
ALPHA_CHAR_CODES = (48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 65, 66, 67, 68, 69, 70)
def _readUid(self, bytes):
"""Decode a 128bit byte array into a 36 char string representing an UID."""
if bytes is None:
return None
if hasattr(bytes, 'bytes'):
# amfast.class_def.as_types.ByteArray object
byte_str = bytes.bytes
else:
# Other type
byte_str = str(bytes)
if len(byte_str) != 16:
return None
uid_chars = [None] * 36
idx = 0
for i, byte in enumerate(byte_str):
if i == 4 or i == 6 or i == 8 or i == 10:
# hyphen
uid_chars[idx] = 45
idx += 1
char_code = ord(byte)
uid_chars[idx] = self.ALPHA_CHAR_CODES[(char_code & 0xF0) >> 4]
idx += 1
uid_chars[idx] = self.ALPHA_CHAR_CODES[(char_code & 0x0F)]
idx += 1
return ''.join([chr(byte) for byte in uid_chars])
def _readFlags(self, context):
"""Reads flags."""
flags = []
flag = self.HAS_NEXT_FLAG
while (flag & self.HAS_NEXT_FLAG):
flag = ord(context.read(1))
flags.append(flag)
return flags
def readExternal(self, obj, context):
flags = self._readFlags(context)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.BODY_FLAG:
obj.body = decode(context)
else:
obj.body = None
if flag & self.CLIENT_ID_FLAG:
obj.clientId = decode(context)
else:
obj.clientId = None
if flag & self.DESTINATION_FLAG:
obj.destination = decode(context)
else:
obj.destination = None
if flag & self.HEADERS_FLAG:
obj.headers = decode(context)
else:
obj.headers = None
if flag & self.MESSAGE_ID_FLAG:
obj.messageId = decode(context)
else:
obj.messageId = None
if flag & self.TIMESTAMP_FLAG:
obj.timestamp = decode(context)
else:
obj.timestamp = None
if flag & self.TIME_TO_LIVE_FLAG:
obj.timeToLive = decode(context)
else:
obj.timeToLive = None
if i == 1:
if flag & self.CLIENT_ID_BYTES_FLAG:
clientIdBytes = decode(context)
obj.clientId = self._readUid(clientIdBytes)
else:
if not hasattr(obj, 'clientId'):
obj.clientId = None
if flag & self.MESSAGE_ID_BYTES_FLAG:
messageIdBytes = decode(context)
obj.messageId = self._readUid(messageIdBytes)
else:
if not hasattr(obj, 'messageId'):
obj.messageId = None
class RemotingMessage(AbstractMessage):
def __init__(self, body=None, clientId=None, destination=None,
headers=None, timeToLive=None, timestamp=None, messageId=None,
operation=None, source=None):
AbstractMessage.__init__(self, body=body, clientId=clientId,
destination=destination, headers=headers, timeToLive=timeToLive,
timestamp=timestamp, messageId=messageId)
self.operation = operation
self.source = source
def invoke(self, packet, msg):
AbstractMessage.invoke(self, packet, msg)
# Set connection object, so it is accessable in target.
self.connection = packet.channel.getFlexConnection(self)
target = packet.channel.channel_set.service_mapper.getTarget(self.destination, self.operation)
if target is None:
raise FlexMessageError("Operation '%s' not found." % \
remoting.Service.SEPARATOR.join((self.destination, self.operation)))
if target.secure is True:
if self.connection.authenticated is False:
from amfast.remoting.channel import SecurityError
raise SecurityError("Operation requires authentication.")
msg.response_msg.body.body = target.invoke(packet, msg, self.body)
class_def.assign_attrs(RemotingMessage, 'flex.messaging.messages.RemotingMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive', 'source', 'operation'), True)
class AsyncMessage(AbstractMessage):
SUBTOPIC_HEADER = 'DSSubtopic'
def __init__(self, body=None, clientId=None, destination=None,
headers=None, timeToLive=None, timestamp=None, messageId=None,
correlationId=None):
AbstractMessage.__init__(self, body=body, clientId=clientId,
destination=destination, headers=headers, timeToLive=timeToLive,
timestamp=timestamp, messageId=messageId)
if correlationId is not None:
self.correlationId = correlationId
def invoke(self, packet, msg):
"""Publish this message."""
AbstractMessage.invoke(self, packet, msg)
channel = packet.channel
channel_set = channel.channel_set
self.connection = channel.getFlexConnection(self)
if channel_set.subscription_manager.secure is True:
if self.connection.authenticated is False:
from amfast.remoting.channel import SecurityError
raise SecurityError("Operation requires authentication.")
channel_set.publishMessage(self)
class_def.assign_attrs(AsyncMessage, 'flex.messaging.messages.AsyncMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive', 'correlationId'), True)
class AsyncSmallMsgDef(AbstractSmallMsgDef):
"""Decodes messages that were encoded using ISmallMessage."""
CORRELATION_ID_FLAG = 0x01
CORRELATION_ID_BYTES_FLAG = 0x02
def readExternal(self, obj, context):
AbstractSmallMsgDef.readExternal(self, obj, context)
flags = self._readFlags(context)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.CORRELATION_ID_FLAG:
obj.correlationId = decode(context)
else:
obj.correlationId = None
if flag & self.CORRELATION_ID_BYTES_FLAG:
correlationIdBytes = decode(context)
obj.correlationId = self._readUid(correlationIdBytes)
else:
if not hasattr(obj, 'correlationId'):
obj.correlationId = None
class CommandMessage(AsyncMessage):
"""A Flex CommandMessage. Operations are integers instead of strings.
See Flex API docs for list of possible commands.
"""
SUBSCRIBE_OPERATION = 0
UNSUBSCRIBE_OPERATION = 1
POLL_OPERATION = 2
CLIENT_SYNC_OPERATION = 4
CLIENT_PING_OPERATION = 5
CLUSTER_REQUEST_OPERATION = 7
LOGIN_OPERATION = 8
LOGOUT_OPERATION = 9
SUBSCRIPTION_INVALIDATE_OPERATION = 10
MULTI_SUBSCRIBE_OPERATION = 11
DISCONNECT_OPERATION = 12
TRIGGER_CONNECT_OPERATION = 13
ADD_SUBSCRIPTIONS = 'DSAddSub'
CREDENTIALS_CHARSET_HEADER = 'DSCredentialsCharset'
MAX_FREQUENCY_HEADER = 'DSMaxFrequency'
MESSAGING_VERSION = 'DSMessagingVersion'
NEEDS_CONFIG_HEADER = 'DSNeedsConfig'
NO_OP_POLL_HEADER = 'DSNoOpPoll'
POLL_WAIT_HEADER = 'DSPollWait'
PRESERVE_DURABLE_HEADER = 'DSPreserveDurable'
REMOVE_SUBSCRIPTIONS = 'DSRemSub'
SELECTOR_HEADER = 'DSSelector'
SUBTOPIC_SEPARATOR = '_;_'
def __init__(self, body=None, clientId=None, destination=None,
headers=None, timeToLive=None, timestamp=None, messageId=None,
correlationId=None, operation=1000):
AsyncMessage.__init__(self, body=body, clientId=clientId,
destination=destination, headers=headers, timeToLive=timeToLive,
timestamp=timestamp, messageId=messageId, correlationId=correlationId)
self.operation = operation
def invoke(self, packet, msg):
AbstractMessage.invoke(self, packet, msg)
self.connection = packet.channel.getFlexConnection(self)
target = packet.channel.channel_set.service_mapper.command_service.getTarget(self.operation)
if target is None:
raise FlexMessageError("Command '%s' not found." % self.operation)
msg.response_msg.body.body = target.invoke(packet, msg, (self.body,))
def getAcknowledgeClass(self):
"""Returns the correct class for the response message."""
if self.operation == self.POLL_OPERATION:
return CommandMessage
return AsyncMessage.getAcknowledgeClass(self)
class_def.assign_attrs(CommandMessage, 'flex.messaging.messages.CommandMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive', 'correlationId',
'operation'), True)
class CommandSmallMsgDef(AsyncSmallMsgDef):
"""Decodes messages that were encoded using ISmallMessage."""
OPERATION_FLAG = 0x01
def readExternal(self, obj, context):
AsyncSmallMsgDef.readExternal(self, obj, context)
flags = self._readFlags(context)
for i, flag in enumerate(flags):
if i == 0:
if flag & self.OPERATION_FLAG:
obj.operation = decode(context)
else:
obj.operation = None
class AcknowledgeMessage(AsyncMessage):
"""A response message sent back to the client."""
ERROR_HINT_HEADER = 'DSErrorHint'
class_def.assign_attrs(AcknowledgeMessage, 'flex.messaging.messages.AcknowledgeMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive', 'correlationId'), True)
class ErrorMessage(AcknowledgeMessage):
"""A response message sent back to the client after a failure."""
def __init__(self, body=None, clientId=None, destination=None,
headers=None, timeToLive=None, timestamp=None, messageId=None,
correlationId=None, exc=None, faultCode='', faultString='',
faultDetail='', rootCause=None, extendedData=None):
"""exc must be a FaultError or None."""
AcknowledgeMessage.__init__(self, body=body, clientId=clientId,
destination=destination, headers=headers, timeToLive=timeToLive,
timestamp=timestamp, messageId=messageId, correlationId=correlationId)
if exc is not None:
self.faultCode = exc.faultCode
self.faultString = exc.faultString
self.faultDetail = exc.faultDetail
self.rootCause = exc.rootCause
self.extendedData = exc.content
self.body = exc
else:
self.faultCode = faultCode
self.faultString = faultString
self.faultDetail = faultDetail
self.rootCause = rootCause
self.extendedData = extendedData
class_def.assign_attrs(ErrorMessage, 'flex.messaging.messages.ErrorMessage',
('body', 'clientId', 'destination', 'headers',
'messageId', 'timestamp', 'timeToLive', 'correlationId', 'faultCode',
'faultString', 'faultDetail', 'rootCause', 'extendedData'), True)
class StreamingMessage(CommandMessage):
"""A command message delivered over an HTTP streaming channel."""
# streaming command key
COMMAND_PARAM_NAME = 'command'
# open a streaming connection
OPEN_COMMAND = 'open'
# close streaming connection
CLOSE_COMMAND = 'close'
# stream id
ID_PARAM_NAME = 'streamId'
# stream version
VERSION_PARAM_NAME = 'version'
# Bytes for encoding message
CR_BYTE = 13
LF_BYTE = 10
NULL_BYTE = 0
@classmethod
def getDisconnectMsg(cls):
msg = CommandMessage()
msg.operation = CommandMessage.DISCONNECT_OPERATION
return msg
@classmethod
def prepareMsg(cls, msg, endpoint):
return cls.getMsgBytes(endpoint.encode(msg, amf3=True))
@classmethod
def getMsgBytes(cls, raw):
"""Add size information to raw AMF encoding for streaming."""
byte_size = len(raw)
hex_len = '%x' % byte_size # Turn length into a string of hex digits
return ''.join((hex_len, chr(cls.CR_BYTE), chr(cls.LF_BYTE), raw)) # CR_BYTE marks end of size declaration, LF_BYTE marks beginning of data section.
def parseArgs(self, args):
if not hasattr(self, 'headers') or self.headers is None:
self.headers = {}
if self.FLEX_CLIENT_ID_HEADER in args:
self.headers[self.FLEX_CLIENT_ID_HEADER] = args[self.FLEX_CLIENT_ID_HEADER][0]
if self.COMMAND_PARAM_NAME in args:
self.operation = args[self.COMMAND_PARAM_NAME][0]
def parseParams(self, url_params):
"""Parses and sets attributes from URL parameters."""
params = cgi.parse_qs(url_params, True)
self.operation = params.get(self.COMMAND_PARAM_NAME)[0]
def parseBody(self, body):
if not hasattr(self, 'headers') or self.headers is None:
self.headers = {}
params = cgi.parse_qsl(body, True)
for param in params:
if param[0] == self.FLEX_CLIENT_ID_HEADER:
self.headers[self.FLEX_CLIENT_ID_HEADER] = param[1]
def acknowledge(self, *args, **kwargs):
"""Return a successful result message."""
class_ = self.getAcknowledgeClass()
response = class_()
self._matchAcknowledge(response)
return response
def _matchAcknowledge(self, response, *args, **kwargs):
"""Syncs values between this message and it's response acknowledgement."""
response.correlationId = self.operation
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/flex_messages.py
|
flex_messages.py
|
import time
import cPickle as pickle
import sqlalchemy as sa
from sqlalchemy.sql import func, and_
if sa.__version__.startswith('0.5'):
# 0.5 is lowest supported version
BINARY_TYPE = sa.Binary
else:
BINARY_TYPE = sa.LargeBinary
from subscription_manager import Subscription, SubscriptionManager
import flex_messages as messaging
class SaSubscriptionManager(SubscriptionManager):
"""Manages subscriptions in a database, uses SqlAlchemy to talk to the DB."""
def __init__(self, engine, metadata, secure=False, ttl=30000, table_prefix=''):
SubscriptionManager.__init__(self, secure=secure, ttl=ttl)
self.engine = engine
self.metadata = metadata
self.table_prefix = table_prefix and "%s_" % table_prefix.rstrip('_') or table_prefix
self.mapTables()
def reset(self):
db = self.getDb()
db.execute(self.subscriptions.delete())
db.execute(self.messages.delete())
db.close()
def mapTables(self):
self.subscriptions = sa.Table('%ssubscriptions' % self.table_prefix, self.metadata,
sa.Column('connection_id', sa.String(36), primary_key=True),
sa.Column('client_id', sa.String(36), primary_key=True),
sa.Column('topic', sa.String(128), primary_key=True)
)
self.messages = sa.Table('%smessages' % self.table_prefix, self.metadata,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('topic', sa.String(256), index=True),
sa.Column('clientId', sa.String(128), nullable=True),
sa.Column('messageId', sa.String(128), nullable=True),
sa.Column('correlationId', sa.String(128), nullable=True),
sa.Column('destination', sa.String(128), nullable=True),
sa.Column('timestamp', sa.Float(), nullable=True),
sa.Column('timeToLive', sa.Float(), nullable=True),
sa.Column('headers', BINARY_TYPE(), nullable=True),
sa.Column('body', BINARY_TYPE(), nullable=False)
)
def createTables(self):
db = self.getDb()
self.subscriptions.create(db, checkfirst=True)
self.messages.create(db, checkfirst=True)
db.close()
def getDb(self):
return self.engine.connect()
def subscribe(self, connection_id, client_id, topic, sub_topic=None, selector=None):
"""Subscribe a client to a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to subscribe to.
* sub_topic - string, Sub-Topic to subscribe to. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
ins = self.subscriptions.insert().values(
connection_id=connection_id,
client_id=client_id,
topic=topic
)
db = self.getDb()
db.execute(ins)
db.close()
def unSubscribe(self, connection_id, client_id, topic, sub_topic=None):
"""Un-Subscribe a client from a topic.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
* client_id - string, id of messaging client that is subscribing.
* topic - string, Topic to un-subscribe from.
* sub_topic - string, Sub-Topic to un-subscribe from. Default = None.
"""
topic = self.getTopicKey(topic, sub_topic)
d = self.subscriptions.delete().\
where(and_(self.subscriptions.c.connection_id==connection_id,
self.subscriptions.c.client_id==client_id,
self.subscriptions.c.topic==topic))
db = self.getDb()
db.execute(d)
db.close()
def deleteConnection(self, connection):
"""Remove all subscriptions for this connection.
arguments
==========
* connection_id - string, id of Flash client that is subscribing.
"""
d = self.subscriptions.delete().\
where(self.subscriptions.c.connection_id==connection.id)
db = self.getDb()
db.execute(d)
db.close()
def iterSubscribers(self, topic, sub_topic=None):
"""Iterate through Flash client ids subscribed to a specific topic."""
topic = self.getTopicKey(topic, sub_topic)
s = sa.select([self.subscriptions.c.connection_id],
self.subscriptions.c.topic==topic, distinct=True)
db = self.getDb()
results = db.execute(s)
for row in results:
yield row[self.subscriptions.c.connection_id]
def iterConnectionSubscriptions(self, connection):
"""Iterate through all Subscriptions that belong to a specific connection."""
s = sa.select([self.subscriptions.c.connection_id,
self.subscriptions.c.client_id, self.subscriptions.c.topic],
self.subscriptions.c.connection_id==connection.id)
db = self.getDb()
results = db.execute(s)
for row in results:
yield Subscription(row[self.subscriptions.c.connection_id],
row[self.subscriptions.c.client_id], row[self.subscriptions.c.topic])
def persistMessage(self, msg):
"""Store a message."""
if hasattr(msg, 'headers') and (msg.headers is not None):
enc_headers = pickle.dumps(msg.headers)
else:
enc_headers = None
if hasattr(msg, 'correlationId'):
correlation_id = msg.correlationId
else:
correlation_id = None
ins = self.messages.insert().values(
topic=self.getMessageTopicKey(msg),
clientId=msg.clientId,
messageId=msg.messageId,
correlationId=correlation_id,
destination=msg.destination,
timestamp=msg.timestamp,
timeToLive=msg.timeToLive,
headers=enc_headers,
body=pickle.dumps(msg.body)
)
db = self.getDb()
db.execute(ins)
db.close()
def deleteExpiredMessages(self, cutoff_time):
"""Deletes expired messages."""
d = self.messages.delete().\
where(self.messages.c.timestamp + self.messages.c.timeToLive < cutoff_time)
db = self.getDb()
db.execute(d)
db.close()
def pollMessages(self, topic, cutoff_time, current_time):
"""Retrieves all queued messages, and discards expired messages.
arguments:
===========
* topic - string, Topic to find messages for.
* cutoff_time - float, epoch time, only messages published
after this time will be returned.
* current_time - float, epoch time, used to determine if a
message is expired.
"""
# Poll for new messages
s = sa.select((self.messages,),
and_(self.messages.c.topic == topic,
self.messages.c.timestamp > cutoff_time)).\
order_by(self.messages.c.timestamp)
db = self.getDb()
results = db.execute(s)
for row in results:
if row['headers'] is None:
headers = None
else:
headers = pickle.loads(str(row['headers']))
yield messaging.AsyncMessage(body=pickle.loads(str(row['body'])),
clientId=row['clientId'], destination=row['destination'],
headers=headers, timeToLive=row['timeToLive'],
timestamp=row['timestamp'], messageId=row['messageId'])
db.close()
|
AmFast
|
/AmFast-0.5.3-r541.tar.gz/AmFast-0.5.3-r541/amfast/remoting/sa_subscription_manager.py
|
sa_subscription_manager.py
|
import pandas as pd
import numpy as np
import os
#Raise
class ParaError(Exception):
pass
class CateError(Exception):
pass
#Init
class AmaSalesEstimator(object):
""" 亚马逊预计销量计算器
version == 1.0.3
根据亚马逊大部分的一级类目BSR来计算产品当前的预计日销量或月销量。
Arrtibutes:
df_temp_para: 最近一次导出参数的临时保存表,默认为None,用于缓存参数加速销量的计算。
sales: 计算的预计日销量。
Usage:
AmaSalesEstimator(<paradata_path>, 'us', 'Home & Kitchen',95623).sales
地区为us,一级类目为Home & Kitchen,BSR为95623的预计日销量。
AmaSalesEstimator(<paradata_path>, 'us', 'Home & Kitchen', 221563, True).sales
地区为us,一级类目为Home & Kitchen,BSR为221563的预计月销量。
AmaSalesEstimator(<paradata_path>, 'us', 'Home & Kitchen', 5120, daily_float=True).sales
输出为包含两位小数的预计日销量,如果月销量开关打开的时候这个功能无效。
"""
df_temp_para = None
sales = None
def __init__(self, paradata_path, cy, cate, bsr, monthly=False, daily_float=False):
""" 寻找并提取对应的参数表,计算与BSR相应的预计日销量
Args:
paradata_path: 参数文件的地址,字符串。
cy: 国家地区,字符串,用两位英文字母来表示。
cate: 一级类目名称,字符串,必须严格按照亚马逊上显示的一级类目来填写。
bsr: 一级类目的BSR值,整数。
monthly: 按月统计,布尔值,因为日销量原始数据的计算是浮点数,建议通过打开该开关来得到相对更准确的月销量。
daily_float: 输出带两位小数的预计日销量,布尔值。
Raise:
ParaError: 当文件夹内没有参数表sales_est_para.xlsx时报错。
CateError: 当所查询的类目没有在参数表内有记录时报错。
"""
#验证输入的bsr值是否在200以内
cy = cy.lower()
cate = cate.strip()
bsr = int(bsr)
#导入计算用参数表
if os.path.isfile(paradata_path):
df_para = pd.read_excel(paradata_path)
else:
raise ParaError('没有找到计算参数表sales_est_para.xlsx')
#查看是否存在临时参数表
if self.df_temp_para == None:
df_temp_para = df_para[(df_para['cy'] == cy) & (df_para['cate'] == cate)]
if df_temp_para['cy'].count() == 0:
raise CateError('没有在参数表内找到对应类目的参数,请检查类目名称或更新参数表')
else:
pass
self.df_temp_para = df_temp_para
else:
#如果存在临时参数表,则查看是否和本次导入的一级类目一致
if df_temp_para['cy'].iloc[0] == cy:
pass
else:
df_temp_para = df_para[(df_para['cy'] == cy) & (df_para['cate'] == cate)]
if df_temp_para['cy'].count() == 0:
raise CateError('没有在参数表内找到对应类目的参数,请检查类目名称或更新参数表')
else:
pass
self.df_temp_para = df_temp_para
list_para = df_temp_para[['a','b','c','d']].iloc[0].tolist()
#计算公式
def func(x,a,b,c,d):
fx = a * x ** 3 + b * x ** 2 + c * x + d
return fx
#将输入的bsr转换为x
x = np.log2(bsr)
#计算
fx = func(x, list_para[0], list_para[1], list_para[2], list_para[3])
y = 2 ** fx #转换后的预计日销量初步结果
#如果原数据存在销量0值点,则在0值点后的数据均为0,避免过拟合导致销量上升
if bsr >= df_temp_para['even'].iloc[0]:
y = 0
else:
pass
#如果计算的预计月销量0.8,则修改为0
#因为计算结果落在(0.5,1]的BSR区间太广
if y < 0.8:
y = 0
else:
pass
#月销量开关
if monthly == True:
y = int(round(y * 30, 0))
else:
if daily_float == True:
y = round(y, 2)
else:
y = int(round(y, 0))
self.sales = y #导出数据
|
AmaSalesEstimator
|
/AmaSalesEstimator-1.0.3-py3-none-any.whl/AmaSalesEstimator.py
|
AmaSalesEstimator.py
|
import sys
import warnings
#Get the current (pre-script) version setting
try:
genv = {}
execfile('lib/version.py', genv)
PREVERSION = genv['version_info']
except IOError:
warnings.warn('The lib/version.py file appears to be missing or inaccessible. Please be sure you are running from the Amara source root')
PREVERSION = None
except KeyError:
PREVERSION = None
#PREVERSION = genv['__version__']
#Can't use Advanced formatting because that's Python 2.6+ http://www.python.org/dev/peps/pep-3101/
#VERSION_TEMPLATE = "__version__ = '{0},{1},{2}'"
VERSION_TEMPLATE = "version_info = ('%s', '%s', '%s')\n"
GIT_DESCRIBE = 'git describe --match "v[0-9]*" HEAD'
def tuple_from_git_tag():
import os
from subprocess import Popen, CalledProcessError, PIPE
p = Popen([GIT_DESCRIBE], stdout=PIPE, shell=True)
gitid = p.communicate()[0].strip()
return tuple(gitid[1:].split('.'))
#return tuple('v2.0.0a5'[1:].split('.'))
def set_git_version(baseversiontuple=None):
if not baseversiontuple:
baseversiontuple = tuple_from_git_tag()
if PREVERSION != baseversiontuple:
vfile = open('lib/version.py', 'w')
vfile.write(VERSION_TEMPLATE%baseversiontuple)
if not QUIET_FLAG: print >> sys.stderr, 'Version number changed to', '.'.join(baseversiontuple)
if not QUIET_FLAG: print >> sys.stderr, 'Please commit and move the corresponding version VCS tag if necessary'
return
cmdargs = sys.argv[1:]
QUIET_FLAG = False
if '-q' in cmdargs:
QUIET_FLAG = True
cmdargs.remove('-q')
if not QUIET_FLAG: print >> sys.stderr, 'Previous version number', '.'.join(PREVERSION)
version = tuple(cmdargs[0].split('.')) if len(cmdargs) > 0 else None
set_git_version(version)
#General, useful info:
NOTES = '''
Short hash of current commit:
git log -1 --format=%h
or
git log -1 --format=%h HEAD
Search for tags matching a pattern
git tag -l -n "2.*"
Like above, but just get the current:
git describe --match "2.[0-9]*"
or
git describe --match "2.[0-9]*" HEAD
Another thing to look at is simplified branching for bugfix branches:
Start with a release: 3.0.8. Then, after that release, do this:
git branch bugfixes308
This will create a branch for bugfixes. Checkout the branch:
git checkout bugfixes308
Now make any bugfix changes you want.
git commit -a
Commit them, and switch back to the master branch:
git checkout master
Then pull in those changes from the other branch:
git merge bugfixes308
That way, you have a separate release-specific bugfix branch, but you're still pulling the bugfix changes into your main dev trunk.
'''
#----- No longer used stuff follows -----
#try:
# hgversionstamp()
#except (KeyboardInterrupt, SystemExit):
# raise
#except Exception, e:
##except Exception as e: #Python 2.6+ only
# print >> sys.stderr, 'Error trying to tag with HG revision:', repr(e)
# pass
#
def hgversionstamp():
#Note: check_call is Python 2.6 or later
#python -c "from subprocess import *; check_call(['hg -q id'], shell=True)"
import os
from subprocess import Popen, CalledProcessError, PIPE
#raise RuntimeError('Foo Bar') #Just to test handling failure
#check_call(['hg -q id'], shell=True)
p = Popen(['hg -q id'], stdout=PIPE, shell=True)
hgid = p.communicate()[0].strip()
hgid_file = os.path.join('lib', '__hgid__.py')
open(hgid_file, 'w').write('HGID = \'%s\'\n'%hgid)
print >> sys.stderr, 'Setup run from a Mercurial repository, so putting the version ID,', hgid, ', in ', hgid_file
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/setversion.py
|
setversion.py
|
.. This document is in reST, and used with Sphinx. For a reST primer see http://sphinx.pocoo.org/rest.html
****************************
Amara Manual, version 2.0a3
****************************
:Author: Uche Ogbuji
:Release: |release|
:Date: |today|
`Amara 2.0 <http://wiki.xml3k.org/Amara2>`_ is the core XML processing library for `Akara <http://wiki.xml3k.org/Akara>`_.
Core node API
=============
Amara has at its core ``amara.tree``, a simple node API that makes available the information in an XML document. It is similar to DOM (or the W3C InfoSet)[#CNA1]_, but easier to use, more expressive and more in line with Python conventions. The main quirk with the core node API is that node methods are prepended with ``xml_`` in order to support the bindery subclass which exposes XML constructs as Python attributes.
Amara also provides a couple of tree APIs derived from the core: Bindery and DOM. Everything in this section applies to those, as well.
Reading XML documents
---------------------
Parse to create a tree object as follows::
from amara import *
doc = parse(source) #source can be file, URI, stream or string
You can specialize the parse using flags::
doc = parse(source, validate=True) #Enable DTD validation
The flags are (mutually exclusive at present):
* ``validate`` - include external entities & perform DTD validation
* ``standalone`` - no parsed external entities (or external DTD subset)
You can specialize the generated node classes as follows:
.. #!code python
from amara import tree
class myattribute(tree.attribute)
#Specialize any aspects of attribute here
pass
class myelement(tree.element)
xml_attribute_factory = myattribute #factory callable for attributes. If omitted defaults to amara.tree.attribute
#Specialize any other aspects of element here
class myentity(tree.entity)
#If you don't specify a factory for any node type it defaults to the tree.x class
xml_element_factory = myelement #factory callable for elements
#xml_comment_factory = tree.comment
#xml_processing_instruction_factory = tree.processing_instruction
#xml_text_factory = tree.text
doc = parse(source, entity_factory=myentity)
``myentity`` is a subclass of entity with class properties that override factory behavior.
Creating nodes from scratch
---------------------------
You can construct core tree nodes as regular Python objects, using factory functions (note: Factory callables in derived classes **must** have matching signatures):
||'''base Amara tree type'''||'''factory callable signature'''||
||`tree.entity`||`entity(document_uri=None)`||
||`tree.element`||`element(ns, local)`||
||`tree.comment`||`comment(data)`||
||`tree.processing_instruction(`||`processing_instruction(target, data)`||
||`tree.text`||`text(data)`||
||`tree.attribute`||`attribute(ns, local, value=u'')`||
.. If your callable is a class, then of course the `__init__` can have the usual `self`.
Use the factory callables to create documents, and other nodes::
from amara import tree
from amara import xml_print
doc = tree.entity()
doc.xml_append(tree.element(None, u'spam'))
xml_print(doc) #<?xml version="1.0" encoding="UTF-8"?>\n<spam/>
.. rubric:: Footnotes
.. [#CNA1] The data model defines Python-friendly conventions taken from XPath data model followed by XML Infoset followed by DOM Level 3.
Bindery API
=============
Input sources
=============
Amara makes it easy to manage data sources for XML and such with typical Web usage patterns in mind. The basic construct is an input source
inputsouce
== Writing ==
'''Note: the methods defined in this section are not yet available for nodes. Use {{{python amara.xml_print(node)}}} until they are ready'''
Use the `xml_write()` method to re-serialize to XML to as stream (`sys.stdout` by default). Use the `xml_encode()` method to re-serialize to XML, returning string.
{{{#!code python
node.xml_write(amara.writer.html, indent=True) #Write out an XML document as pretty-printed HTML
node.xml_encode() #Return an XML string
node.xml_encode(amara.writer.html, indent=True) #Return an indented HTML string
}}}
There are special methods to look up a writer class from strings such as "xml" and "html"
{{{#!code python
from amara.writer import lookup
XML_W = lookup("xml")
HTML_W = lookup("html")
node.xml_write(XML_W) #Write out an XML document
node.xml_encode(HTML_W) #Return an HTML string
}}}
The default writer is the XML writer (i.e. `amara.writer.lookup("xml")`)
##There is a convenience `xml()` method, which handles the most common case where you just want to emit XML.
##
##{{{#!code python
##node.xml() #shortcut for node.xml_write(amara.writer.xml)
##node.xml('iso-8859-1') #shortcut for node.xml_write(amara.writer.xml, encoding='iso-8859-1)
##}}}
== Porting from domlette ==
Here are the brief details for how to port from 4Suite domlette API to Amara 2.x tree nodes.
=== All node types ===
Note: All properties discussed are mutable unless otherwise noted.
* Amara tree nodes don't have the equivalent of DOM ownerDocument.
* `node.nodeType` becomes `node.xml_type`
<!> `node.xml_type` is now a string rather than an integer.
{i} It is recommended to use `isinstance()` checks whenever possible instead of comparing `xml_type` values. Use of `xml_type` is best suited as a key for dispatch tables.
* There is no longer universal `node.nodeName`, `node.nodeValue`, `node.namespaceURI`, `node.localName`, `node.prefix` and `node.childNodes` (and all the other child-related members) on all node types. See each note type section below for more details.
* `node1.isSameNode(node2)` is now `node1 is node2`
* `node.xpath()` becomes `node.xml_select()`
{i} Note: the old Amara node.xml_xslt method is now available on all nodes as node.xml_transform
* `node.rootNode` becomes `node.xml_root` (which is almost equivalent to `node.xml_select(u"/")`)
<!> `node.xml_root` will return None for an unattached node whereas `node.xml_select(u"/")` will return empty node set
{i} `node.xml_root` is equivalent to: `while node.xml_parent: node = node.xml_parent`
<!> In general the path of least surprise is to not run XPath on unattached nodes. Note that an unattached node is not the common case because it means the element tree was constructed by hand. If you use any of Amara's parse methods or such high level APIs, you should always have a Document/Entity at the root. Otherwise, we assume you really know what you're doing.
* `node.baseURI` and `node.xmlBase` become `node.xml_base`
<!> The `doc.xml_system_id` on an Entity is different from `doc.xml_base`. It is equiv to the `systemId` property of the `DocumentType` node in DOM L3 (There is similar correspondence between `doc.xml_public_id` and publicId DOM property).
* `node.cloneNode()` becomes `from copy import copy; copy(node)`; `node.cloneNode(True)` becomes `from copy import deepcopy; deepcopy(node)`
=== Attribute nodes ===
==== Changes ====
* `attr.nodeName` becomes `attr.xml_qname` (an immutable property -- to change, update xml_local and xml_namespace)
* `attr.nodeValue` becomes `attr.xml_value`
* `attr.qualifiedName` becomes `attr.xml_qname` (an immutable property)
* `attr.value` becomes `attr.xml_value`
* `attr.namespaceURI` becomes `attr.xml_namespace`
* `attr.localName` becomes `attr.xml_local`
* `attr.prefix` becomes `attr.xml_prefix`
* `attr.specified` becomes `attr.xml_specified` (an immutable property)
==== New Features ====
* `attr.xml_name` returns a 2-item tuple of `(namespace, local)` (an immutable property)
=== CharacterData (Text and Comment) nodes ===
* `node.length`, `node.substringData()`, `node.insertData()`, `node.replaceData()`, `node.appendData()` and `node.deleteData()` are eliminated. Just use string manipulation on `node.xml_value`
{i} `node.length` becomes `len(node.xml_value)`
{i} `node.substringData(start, stop)` becomes `data = node.xml_value[start:stop]`
{i} `node.appendData(data)` becomes `node.xml_value += data`
* `node.nodeValue` becomes `node.xml_value`
* `node.data` becomes `node.xml_value`
* `node.nextSibling` becomes `node.xml_following_sibling` (an immutable property)
* `node.previousSibling` becomes `node.xml_preceding_sibling` (an immutable property)
=== Element nodes ===
==== Changes ====
* `node.nodeName`, `node.tagName` and `node.qualifiedName` become `node.xml_qname` (an immutable property)
* `node.namespaceURI` becomes `node.xml_namespace` (an immutable property)
* `node.localName` becomes `node.xml_local`
* `node.prefix` becomes `node.xml_prefix`
* `node.childNodes` becomes `node.xml_children` (an immutable property)
* `node.hasChildNodes()` becomes `bool(node.xml_children)`
* `node.firstChild` becomes `node.xml_first_child` (an immutable property)
* `node.lastChild` becomes `node.xml_last_child` (an immutable property)
* `node.normalize()` becomes `node.xml_normalize()`
* `node.nextSibling` becomes `node.xml_following_sibling` (an immutable property)
(!) Another option is node.xml_select(u'following-sibling::*')
* `node.previousSibling` becomes `node.xml_preceding_sibling` (an immutable property)
(!) Another option is node.xml_select(u'preceding-sibling::*')
* `node.getElementById(u'foo')` becomes `node.xml_lookup(u'foo')`
* `node.getElementByTagNameNS()` was never provided in 4Suite. As before just use `node.xml_select`
* `node.attributes` becomes `node.xml_attributes` (an immutable property)
* `node.getAttributeNodeNS(ns, localname)` becomes `node.xml_attributes.getnode(ns, localname)`
* `node.setAttributeNodeNS(attr_node)` becomes `node.xml_attributes[ns, localname] = attr_node` (the local name and the namespace of the given object must not conflict with ns and localname)
(!) Another option would be `node.xml_attributes.setnode(attr_node)` to remove the redundant expanded-name key
* `node.getAttributeNS(ns, localname)` becomes `node.xml_attributes[ns, localname]`
* `node.setAttributeNS(ns, qname, value)` becomes `node.xml_attributes[ns, localname] = value`
<!> To set an explicit prefix, one must retrieve the attribute node: {{{
E.xml_attributes[ns, localname] = value
E.xml_attributes.getnode(ns, localname).xml_prefix = prefix}}}
* `node.hasAttributeNS(ns, localname)` becomes `(ns, localname) in node.xml_attributes`
* `node.removeAttributeNode(attr_node)` becomes `node.parent.xml_remove(attr_node)`
(!) Another option would be `del node.xml_attributes[attr_node]`
* `node.removeAttribute(ns, localname)` becomes `del node.xml_attributes[ns, localname]`
* namespace attributes are now accessed by `E.xmlns_attributes` (as namespace nodes)
* `E.xml_namespaces` is a `NamespaceMap` (akin to NamedNodeMap, only for namespace nodes) of all inscope namespaces
* `node.appendChild(newnode)` becomes `node.xml_append(newnode)`
/!\ If newnode is an entity, this is a destructive act that will take all children of that entity and leave it empty, appending them to node. If you want to preserve newnode, copy it first: `copy.deepcopy(newnode)`.
* `node.removeChild(oldnode)` becomes `node.xml_remove(oldnode)`
* `node.insertBefore(newnode, refnode)` becomes `offset = node.xml_index(refnode); node.xml_insert(offset, newnode)`
* `node.replaceChild(newnode, oldnode)` becomes `node.xml_replace(oldnode, newnode)`
We're considering adding an equivalent to list.extend
==== New Features ====
* `element.xml_name` returns a 2-item tuple of `(namespace, local)`
* `element.xml_index(node[, start[, stop]])` which is equivalent to `list.index()`
* `iter(element.xml_attributes)` -> an iterator of `(ns, localname)`
* `element.xml_attributes.keys()` -> an iterator of `(ns, localname)`
* `element.xml_attributes.values()` -> an iterator of node values
* `element.xml_attributes.nodes()` -> an iterator of nodes
=== Processing Instructions ===
* `node.nodeName` becomes `node.xml_target`
* `node.nodeValue` becomes `node.xml_data`
* `node.target` becomes `node.xml_target`
* `node.data` becomes `node.xml_data`
* `node.nextSibling` becomes `node.xml_following_sibling` (an immutable property)
* `node.previousSibling` becomes `node.xml_preceding_sibling` (an immutable property)
=== Document, DocumentFragment, Entity ===
`DocumentFragment` nodes are no more. Whether you parse an XML document or an external parsed entity (which may have more than one root node, may have text at the root level, etc.), you get a Document. The type name is really a misnomer--it's an entity, and the name will be changed in a later version to reflect this fact. Note: Amara Entity does '''not''' correspond to DOM L3 Entity--some of the properties have different semantics.
The Document/Entity type always has xml_parent of None. Other nodes can have xml_parent of None if they are unappended node fragments. The Document/Entity is much like an XPath data model root node type (as elaborated a bit further in XSLT).
Note lexical information based on doc type definitions (such as the use of entities in the serialization of text nodes) is generally lost, as it was for 4Suite and Amara 1.x, and in most other XML rocessing software. We might in future add features to retain som of this information.
==== Changes ====
* `node.documentURI` becomes `node.xml_base`
* `node.documentElement` is removed as entities can have multiple element children. use e.g. node.xml_first_child or some operation on node.xml_children
* `node.childNodes` becomes `node.xml_children` (an immutable property)
* `node.hasChildNodes()` becomes `bool(node.xml_children)`
* `node.firstChild` becomes `node.xml_first_child` (an immutable property)
* `node.lastChild` becomes `node.xml_last_child` (an immutable property)
* `node.normalize` becomes `node.xml_normalize`
* `node.getElementById(u'foo')` becomes `node.xml_lookup(u'foo')`
* `node.getElementByTagNameNS()` was never provided in 4Suite. As before just use `node.xml_select`
* `node.appendChild(newnode)` becomes `node.xml_append(newnode)`
(!) Note ther is a new convenience method node.xml_merge(other), which destructively appends all the children of other to node
* `node.removeChild(oldnode)` becomes `node.xml_remove(oldnode)`
* `node.insertBefore(newnode, refnode)` becomes `offset = node.xml_index(ref_node); node.xml_insert(offset, newnode)`
* `node.replaceChild(newnode, oldnode)` becomes `node.xml_replace(newnode, oldnode)`
==== New Features ====
* `entity.xml_index(node[, start[, stop]])` which is equivalent to `list.index()`
= Working with namespaces =
On each element you have the following properties related to XML namespace.
* `node.xml_prefix` ''(elements and attributes)'' -- The node's namespace prefix
* `node.xml_namespace` ''(elements and attributes)'' -- The node's namespace URI
* `node.xml_attributes` ''(elements only)'' -- All attributes on this element '''excluding namespace declarations'''
* `node.xmlns_attributes` ''(elements only)'' -- The namespace declaration attributes on this element
* `node.xml_namespaces` ''(read-only)'' ''(elements and documents)'' -- (equivalent to XPath namespace nodes) sequence of all in-scope namespaces as nodes
/!\ `GetAllNs()` has been superseded by the `xml_namespaces` property (use `dict([(n.xml_local, n.xml_value) for n in doc.xml_namespaces])`)
In general these concepts are similar to those in [[http://www.w3.org/TR/xml-infoset/#infoitem.element|XML Infoset "2.2. Element Information Items"]]
If you work with XML namespaces you will probably need to create namespace declarations at some point. You can do so as follows:
{{{#!code python
node.xmlns_attributes[prefix] = nsuri
}}}
Where `node` is an element or document node and `prefix` and `nsuri` are Unicode objects. If in modifying an element's or attribute's `xml_prefix` or `xml_namespace`, you create a mapping that does no correspond to any in scope namespace existing declaration you will get an error. The best way to avoid this error is to make an explicit declaration first, as above.
If you manipulate a node's `xml_prefix` and `xml_namespace` property directly, Amara offers a trick to ensure consistency. Take the following document.
{{{#!code xml
<doc xmlns:a="urn:bogus:a" xmlns:b="urn:bogus:b">
<a:monty/>
</doc>
}}}
Say you want to put a:monty element into the `urn:bogus:b`. You can do so as follows:
{{{#!code python
doc.monty.xml_namespace = u"urn:bogus:b"
#Side-effect is doc.monty.xml_prefix = u"b"
}}}
Or as follows:
{{{#!code python
doc.monty.xml_prefix = u"b"
#Side-effect is doc.monty.xml_namespace = u"urn:bogus:b"
}}}
Amara is thus careful to maintain namespace integrity for you. The above is also the only way to change the parts of an element or attribute's universal name. Properties such as `xml_name` and `xml_qname` are immutable (as an implementation detail, they're computed). In effect, Amara allows you to declare namespaces as you see fit (either directly or by creating new nodes which don't conform to any existing declarations in that scope) but then it enforces those declarations upon any existing nodes within a particular scope.
= Mutation events =
Amara tree now has a system for signaling mutation of the tree.
Note: this is not really based on [[[http://www.w3.org/TR/2003/NOTE-DOM-Level-3-Events-20031107/|DOM|L3 events]]]
* `node.xml_child_inserted(child)` -- called '''after''' the node has been added to `node.xml_children`
* `node.xml_child_removed(child)` -- called '''before''' the node is removed from `node.xml_children`
* `element.xml_attribute_added(attr_node)` -- called after the attribute node has been added to `element.xml_attributes`
* `element.xml_attribute_removed(attr_node)` -- called after the attribute node has been removed `element.xml_attributes`
* `element.xml_attribute_modified(attr_node)` -- called after the attribute node's value has been updated
Note: we are probably going to add more mutation events in future versions. We're researching related efforts as rough guidance, including:
* DOM L3 Events
* [[http://www.gnu.org/software/classpathx/jaxp/apidoc/gnu/xml/dom/DomNode.html|Mutation|events on GNU's JAXP impl]]
== Exceptions in mutation events ==
<!> We propagate all exceptions from mutation event handlers, in order to allow you to catch bugs in these. In general, though you should avoid as much as possible raising exceptions from mutation event handlers, as these can lead to problems with data consistency. We do as much as we can to make mutation transactional, but there are some very hard cases that you get into when dealing with exceptions from handlers.
= DOM =
Amara 2 offers a purer analogue to W3C DOM than the old 4Suite Domlette. Amara DOM is a wrapper of Amara tree. Note that most people will want to use Amara tree. Only use Amara DOM if you really need the (rather clumsy and non-pythonic) W3C interfaces.
== Reading ==
You now parse to domlette as follows:
{{{#!code python
from amara.domlette import *
...
doc = parse(source) #source can be file, URI, stream or string
}}}
= Bindery =
Bindery is now a subclass of Domlette, so you should also review the Domlette section above.
A quick note on what's not new: you still parse using `amara.parse` (which is an alias for `amara.bindery.parse`). Amara of course now supports other types of parse e.g. domlette or saxlette, and you just use more detailed paths for these (`amara.domlette.parse`), etc.
{{{#!code python
import amara
...
doc = amara.parse(source) #source can be file, URI, stream or string
print doc.spam.eggs.xml()
}}}
== Creating a bindery document from scratch ==
Because of the special nature of bindery, it's a bit fiddly to create a document from scratch. It's probably better to just parse a document. You can use the bindery entity base class:
{{{#!code python
from amara import bindery
from amara import xml_print
doc = bindery.nodes.entity_base()
doc.xml_append(doc.xml_element_factory(None, u'spam'))
xml_print(doc) #<?xml version="1.0" encoding="UTF-8"?>\n<spam/>
}}}
You can also use xml_append_fragment
{{{#!code python
from amara import bindery
from amara import xml_print
doc = bindery.nodes.entity_base()
doc.xml_append_fragment('<a><b/></a>')
xml_print(doc) #<?xml version="1.0" encoding="UTF-8"?>\n<a><b/></a>
}}}
== Accessing elements/attributes that may not be in the XML ==
In Amara 1.x accessing an element or attribute missing from the source XML gives AttributeError so that:
{{{#!code python
X = """\
<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>
"""
import amara
doc = amara.parse(X)
print doc.monty.python.spam
print doc.monty.python.ministry
}}}
results in:
{{{#!code pytb
eggs
Traceback (most recent call last):
File "/tmp/foo.py", line 10, in <module>
print doc.monty.python.ministry
AttributeError: 'python' object has no attribute 'ministry'
}}}
The first element has a `spam` attribute, but not `ministry`. Since an index was not specified Amara returns the first python element, which is missing the attribute.
There has been some complaint about this, because it means you have to employ a "look before you leap" approach, or use a lot of exception handling to deal with common situations such as optional elements or attributes. You could also use XPath (`doc.xml_select(u'monty/python/ministry')`) which returns [] (you can use the `string` function to return Unicode) or the Pythonic approach of `getattr(doc.monty.python, 'ministry', u'')`.
Amara 2.x will offer users several conveniences. First of all, a parse rule that allows you to set defaults for missing XML nodes. This would, for example, create a value of `u''` for each `ministry` attribute missing from a `python` element. You can also use a subclass that implements create-on-get semantics. This means that `print doc.monty.python.ministry` could automatically create and return an empty element `ministry` as the last child of `python`. Alternatively it will not create anything, and will just return None in cases that lead to AttributeError. Neither approach is really recommended because the complexities of balancing XML and Python data structure semantics can be even more confusing. Explicit is better, where you use XPath for its explicitly known forgiving semantics, and Python for its normal, strict behavior. The best approach, if you need to probe entire substructures that you are not sure exists is to use XPath. If you want a shallow, safe attribute access, you can use the new xml_get (new in 2.0). `doc.monty.python.xml_get('ministry')` is similar to `getattr(doc.monty.python, 'ministry', u'')`.
=== Convenience API ===
There is a new, more consistent API for navigating the skew between XML and Python names.
##node.xml_elements #iterator of child element nodes (not the dict from Amara 1.x); equiv of Amara 1.x `( e for e in node.xml_children if e.nodeType == Node.ELEMENT_NODE)`
##node.xml_attributes #iterator of attributes nodes (not the dict from Amara 1.x)
{{{#!code python
node.xml_element_pnames #iterator of child element property names; near-replacement for Amara 1.x node.xml_elements
node.xml_element_xnames #iterator of child element XML universal names (namespace, local)
node.xml_attribute_pnames #iterator of attributes property names; near-replacement for Amara 1.x node.xml_attributes
node.xml_attribute_xnames #iterator of attributes XML universal names (namespace, local)
}}}
The Amara 1.x properties xml_elements and xml_attributes were too incongruous, and are no more.
The old node.xml_attributes behavior can be had with:
{{{#!code python
from itertools import *
dict([x, (node.namespaceURI, getattr(node, x)) for x in node.xml_attribute_pnames])
}}}
The old node.xml_elements can be had with:
{{{#!code python
from itertools import *
dict([x, getattr(node, x) for x in node.xml_element_pnames])
}}}
##This assumes the above are not much necessary with the new API. If it turns put they are we might add ##`node.xml_model.attribute_pname_dict` (from Python name to tuple of XML Universal name and value) and similar ##`node.xml_model.element_pname_dict`.
node.xml_child_text is no more. It's equivalent to:
{{{#!code python
u''.join(node.xml_select(u'text()'))
}}}
Note: as above, if you want to iterate using XML node types, use XPath
## (you'll now get an iterator rather than a list):
{{{#!code python
node.xml_select(u'*') #iterator of child elements
node.xml_select(u'@*') #iterator of attributes
node.xml_select(u'text()') #iterator of child text nodes
node.xml_select(u'comment()') #iterator of child comment nodes
node.xml_select(u'processing-instruction()') #iterator of child processing instruction nodes
}}}
Discussion:
* http://lists.fourthought.com/pipermail/4suite/2008-March/008387.html
* http://groups.google.com/group/amara-user/browse_thread/thread/602df975b12a8509
For more on this see [[Amara2/Modeling]]
== Managing the XML model ==
Related to the above issue is the question of how you can query and manage aspects of the XML model in Python. Can you discover what data members come from where in the XML (e.g. to distinguish XML elements from attributes, or these from mixed-in Python data members)? Can you work with schema or constraint information?
Amara 1.x provided a human-readable synopsis in node.xml_doc()
Amara 2.X provides formalized XML model information, opening up some powerful capabilities. The key is in the binding classes. Now binding classes are differentiated not only by name, but also by what constraints they support. If there is no guidance on the XML model every node will use generic bindings. Such guidance can come in several ways:
* Register an XML schema (RELAX NG or Schematron)
* Set up constraints in a special structure for the purpose
* Set constraints after the parse
You access the constraint information in the new class property xml_model. It's based at the core on the Schematron model of assertions, though there are many conveniences for dealing with constraints in alternate ways. Here are some of the low-level primitives:
{{{#!code python
node.xml_model.element_types #Known child element types for this node's class as a dict from XML universal name to Python name
node.xml_model.attribute_types #Known attribute types for this node's class as a dict from XML universal name to Python name
}}}
The above combine required and optional node types, derived from the constraints, and additional information about optional node types.
If you try to access an element or attribute on `node` which is in `node.xml_model.element_types` but does not exist on the present instance, you get None rather than AttributeError. This is a key change from Amara 1.x, and discussed further below.
{{{#!code python
node.xml_model.element_types #Known child element types for this node's class as a dict from XML universal name to Python name
node.xml_model.attribute_types #Known attribute types for this node's class as a dict from XML universal name to Python name
}}}
You can access and manipulate constraints as follows:
{{{#!code python
node.xml_model.constraints #A list of constraint objects
}}}
Each constraint object is essentially a Schematron assertion, with some convenience bits.
{{{#!code python
node.xml_model.constraints.append(u'@xml:id', validate=True) #Make xml:id required. Will throw a constraint violation right away if there is not one. Affects all instances of this class.
node.xml_model.validate(recurse=True) #Recursively validate constraints on node and all children
}}}
Notice that manipulating constraints affect all nodes that are instances of that class. If you want to specialize the constraints, for example add a constraint if an element occurs at top level, you have to use a new class for that instance. Amara 2.x does provide handy facilities to make this easier:
{{{#!code python
node.xml_specialize_model() #Clone the node using a new node class that's a copy of the original constraints; substitute the node in place wit the clone, and return the clone.
node.xml_specialize_model(replace=False) #Clone the node using a new node class that's a copy of the original constraints; Do not replace the node in with the clone, and return the clone.
node.xml_specialize_model(newclass) #Clone the node using the provided node class. This will throw an exception if the clone does not meet the constraints defined in newclass. Return the clone.
}}}
You can get a class to pass in using the usual `node.__class__`.
=== Disambiguation rules ===
The rules for disambiguation when converting XML to Python names have been refined. First of all known names get precedence over unknown names. By default, if there is information in the model about an element or attribute, and another element or attribute is found which would result in the same Python name, the latter is mangled e.g. with a trailing `_`.
{{{#!code python
<a.1 b.1=""><b.1/></a.1>
}}}
If you parse the above document and the class for a.1 has b.1 among known attributes, and not a known element b.1, assuming it's an open model (i.e. no constraints forbid unknown elements), the resulting binding would have a_1.b_1 as the binding of the attribute rather than the element.
You can also specify how disambiguation proceeds. You could do so in Amara 1.x, with much difficulty. In Amara 2.x, it's a matter of specializing the python_name() method. You could for example prepend "xhtml_" to the Python name of all elements in the XHTML namespace.
{{{#!code python
}}}
= XPath =
Most people will now use the convenience methods on Domlette nodes.
{{{#!code python
result = node.xml_select(expr)
}}}
`node` is used for XPath context. You can tweak any other parts of the context by providing a context object. `result` is one of the objects that map the XPath data model to Python.
{{{#!code python
ctx = amara.xpath.context(prefixes={u'html': u'http://www.w3.org/1999/xhtml'})
result = node.xml_select(u'/html:html/html:head/html:title', context=ctx)
}}}
Note: if you specify context node for ctx it overrides the `node` used to call `xml_select`. You can also use the `evaluate()` method of a amara.xpath.context object. Pass it the XPath expression string (unicode) to execute. The above code is equivalent to:
{{{#!code python
ctx = amara.xpath.context(node, prefixes={u'html': u'http://www.w3.org/1999/xhtml'})
result ctx.evaluate(u'/html:html/html:head/html:title')
}}}
There is no longer a global function to evaluate XPath (the old Ft.Xml.XPath.Evaluate).
= XSLT =
There is a similar convenience method for XSLT
{{{#!code python
result node.xml_transform(transforms, params=None, writer=None) ##, use_pis=True
}}}
* transforms is either a single inputsource or an iterator of inputsources representing the XSLT
* params is what used to be called topLevelParams
* use_pis is the inverse of what used to be ignorePis
* the old outputStream is now replaced by creating a generic writer object on which the user only sets the stream attribute
The result is an instance of one of the subclasses of `amara.xslt.result`. These are `stringresult`, `streamresult`, and `treeresult` Key properties:
`result.stream` :: stream buffer of the processor #Not available on stringresult and treeresult instances
`result.method :: xsl:method encoding parameter
`result.encoding` :: xsl:output encoding parameter
`result.media_type` :: xsl:output mediaType parameter
`result.parameters` :: all other parameters set during transform execution
##Other xsl:output parameters are similarly available.
There is also the global transform API:
{{{#!code python
from amara.xslt import transform
result transform(source, transforms, params=None) ##, use_pis=True
}}}
You can specialize the result, for example to set an output stream:
{{{#!code python
from amara.xslt import transform, streamresult
r = streamresult(sys.stderr, u'http://example.org/myresult') #The URI passed in is the base URI of the result, particularly used for EXSLT document element
new_result = transform(source, transforms, params=None, result=r)
}}}
<!> The result returned to you may not be the same object that you passed in
== Setting up parameters ==
There is a new function `amara.xpath.parameterize`, which takes a dictionary and turns it into a set of parameters suitable for passing into an XSLT transform. It's basically a convenience function to make it fairly easy to pass Python data into transforms.
{{{#!code python
from amara import parse
from amara.xpath.util import parameterize
doc = parse('<monty spam="1"><python/></monty>')
e1 = doc.documentElement
e2 = e1.firstChild #doc.xml_select(u'//python')
a1 = e1.getAttributeNodeNS(None, u'spam') #e1.xml_select(u'@spam')
D = {'p1': 1, 'p2': e1, 'p3': e2, 'p4': a1}
print parameterize(D)
#Result is something like
#{u'p2': <Element at 0x63cc30: name u'monty', 1 attributes, 1 children>,
# u'p3': <Element at 0x63cc70: name u'python', 0 attributes, 0 children>,
# u'p1': 1, u'p4': <Attr at 0x63e260: name u'spam', value u'1'>}
}}}
== The detailed API ==
Some users will need closer control. Usually this will be for those who want to get a performance boost by reusing processor objects.
{{{#!code python
proc = amara.xslt.processor()
proc.append_transform(transform) #transform can be file, URI, stream or string
result = proc.run(source1) #source1 can be file, URI, stream or string
print result.stream.read()
#Reuse the processor
result = proc.run(source2, params={u'name': u'Joe'})
print result.stream.read()
}}}
== XSLT on non-root nodes ==
The new API allows you to run XSLT on a non-root node.
{{{#!code python
elem.xml_transform(source, tr) # XSLT starts with elem as initial context
}}}
In this case the initial context for template dispatch will be `elem`, as the sole item in the node list, with position of 1. In plain English that's probably exactly what you'd expect. The same goes for global variables and parameters.
For the XSLT lawyers out there, we in effect generate an implied root node with that element as the only child in this case.
And yes, you can always do things the traditional way by navigating to the root node yourself:
{{{#!code python
elem.rootnode.xml_transform(source, tr) # XSLT starts with root of elem as initial context
}}}
== Extension functions and elements ==
See: [[Amara2/XSLT_Extensions]]
= Generating XML =
== Struct writer ==
...Add link here...
{{{#!code python
}}}
{{{#!code python
}}}
= XUpdate =
See [[Amara2/XUpdate]]
= See also =
* Some examples at [[Amara2/Scratchpad]]
= Notes =
* See [[Amara2/Whatsnew/Scratch]]
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/doc/manual.rst
|
manual.rst
|
#Also of interest: http://allmydata.org/trac/pyutil/browser/pyutil/pyutil/benchutil.py
# http://code.activestate.com/recipes/440700/
# by Tarek Ziadé
# let's use pystone instead of seconds here
# (from Stephan Richter idea)
import time
from subprocess import Popen, PIPE
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
class KnownFailure(Exception):
pass
class KnownFailureError(ErrorClassPlugin):
todo = ErrorClass(KnownFailure, label='KNOWNFAIL', isfailure=True)
# TOLERANCE in Pystones
kPS = 1000
TOLERANCE = 0.5*kPS
class DurationError(AssertionError): pass
def local_pystone():
#Can't do the simple thing here because nosetest interferes :(
#from test import pystone
#return pystone.pystones(loops=pystone.LOOPS)
#CMD = ['python', '-c', '"from test import pystone; print pystone.pystones(loops=pystone.LOOPS)"']
CMD = 'python -c "from test import pystone; print pystone.pystones(loops=pystone.LOOPS)"'
process = Popen(CMD, stdout=PIPE, stderr=PIPE, shell=True)
#result = process.stdout.read()
result = process.communicate()[0].strip()
#For security purposes, make sure it's strictly in the form (N, N)
import re
if re.match('\([\d\.]+,\s*[\d\.]+\)', result):
return eval(result)
else:
return None
#import sys; print >> sys.stderr, result
def func_not_to_exceed_pystone(max_num_pystones, current_pystone=local_pystone()):
""" decorator func_not_to_exceed_pystone """
if not isinstance(max_num_pystones, float):
max_num_pystones = float(max_num_pystones)
def _timedtest(function):
def wrapper(*args, **kw):
start_time = time.time()
try:
return function(*args, **kw)
finally:
total_time = time.time() - start_time
if total_time == 0:
pystone_total_time = 0
else:
pystone_rate = current_pystone[0] / current_pystone[1]
pystone_total_time = total_time / pystone_rate
if pystone_total_time > (max_num_pystones + TOLERANCE):
raise DurationError((('Test too long (%.2f Ps, '
'need at most %.2f Ps)')
% (pystone_total_time,
max_num_pystones)))
return wrapper
return _timedtest
# http://www.dabeaz.com/blog/2010/02/context-manager-for-timing-benchmarks.html
# benchmark.py
class time_benchmark_block(object):
def __init__(self,name):
self.name = name
def __enter__(self):
self.start = time.time()
def __exit__(self,ty,val,tb):
end = time.time()
print("%s : %0.3f seconds" % (self.name, end-self.start))
return False
class file_finder(object):
def __init__(self, context):
self.context = context
def __call__(self, fname):
stem = os.path.split(self.context)[0]
return os.path.join(stem, fname)
return
#
# The stuff below should no longer be used. It predates the move to nosetests
#
"""
Supporting definitions for the Python regression tests.
Mostly cribbed from Python core's: http://svn.python.org/view/python/trunk/Lib/test/test_support.py
(initially rev 62234)
"""
import contextlib
import errno
import socket
import sys
import os
import shutil
import warnings
import unittest
import types
import operator
import time
# Defined here as to have stack frames originating in this module removed
# from unittest reports. See `unittest.TestResult._is_relevant_tb_level()`
__unittest = True
class TestError(Exception):
"""Base class for regression test exceptions."""
__slots__ = ('message', 'detail')
def __init__(self, message=None, detail=None):
self.message = message or 'assertion failed'
self.detail = detail
def __str__(self):
return self.message
class TestFailed(TestError):
"""Test failed."""
class TestSkipped(TestError):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
try:
os.unlink(filename)
except OSError:
pass
def rmtree(path):
try:
shutil.rmtree(path)
except OSError, e:
# Unix returns ENOENT, Windows returns ESRCH.
if e.errno not in (errno.ENOENT, errno.ESRCH):
raise
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo files.'''
unload(modname)
for dirname in sys.path:
unlink(os.path.join(dirname, modname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
HOST = 'localhost'
def find_unused_port(family=socket.AF_INET, socktype=socket.SOCK_STREAM):
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
socket.error will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it."""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST):
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise TestFailed("tests should never set the SO_REUSEPORT " \
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
FUZZ = 1e-6
def transient_internet():
"""Return a context manager that raises ResourceDenied when various issues
with the Internet connection manifest themselves as exceptions."""
time_out = TransientResource(IOError, errno=errno.ETIMEDOUT)
socket_peer_reset = TransientResource(socket.error, errno=errno.ECONNRESET)
ioerror_peer_reset = TransientResource(IOError, errno=errno.ECONNRESET)
return contextlib.nested(time_out, socket_peer_reset, ioerror_peer_reset)
@contextlib.contextmanager
def captured_output(stream_name):
"""Run the 'with' statement body using a StringIO object in place of a
specific attribute on the sys module.
Example use (with 'stream_name=stdout')::
with captured_stdout() as s:
print "hello"
assert s.getvalue() == "hello"
"""
import StringIO
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StringIO.StringIO())
yield getattr(sys, stream_name)
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
return captured_output("stdout")
#=======================================================================
# Big-memory-test support. Separate from 'resources' because memory use should be configurable.
# Some handy shorthands. Note that these are used for byte-limits as well
# as size-limits, in the various bigmem tests
_1M = 1024*1024
_1G = 1024 * _1M
_2G = 2 * _1G
# Hack to get at the maximum value an internal index can take.
class _Dummy:
def __getslice__(self, i, j):
return j
MAX_Py_ssize_t = _Dummy()[:]
def set_memlimit(limit):
import re
global max_memuse
sizes = {
'k': 1024,
'm': _1M,
'g': _1G,
't': 1024*_1G,
}
m = re.match(r'(\d+(\.\d+)?) (K|M|G|T)b?$', limit,
re.IGNORECASE | re.VERBOSE)
if m is None:
raise ValueError('Invalid memory limit %r' % (limit,))
memlimit = int(float(m.group(1)) * sizes[m.group(3).lower()])
if memlimit > MAX_Py_ssize_t:
memlimit = MAX_Py_ssize_t
if memlimit < _2G - 1:
raise ValueError('Memory limit %r too low to be useful' % (limit,))
max_memuse = memlimit
def bigmemtest(minsize, memuse, overhead=5*_1M):
"""Decorator for bigmem tests.
'minsize' is the minimum useful size for the test (in arbitrary,
test-interpreted units.) 'memuse' is the number of 'bytes per size' for
the test, or a good estimate of it. 'overhead' specifies fixed overhead,
independant of the testsize, and defaults to 5Mb.
The decorator tries to guess a good value for 'size' and passes it to
the decorated test function. If minsize * memuse is more than the
allowed memory use (as defined by max_memuse), the test is skipped.
Otherwise, minsize is adjusted upward to use up to max_memuse.
"""
def decorator(f):
def wrapper(self):
if not max_memuse:
# If max_memuse is 0 (the default),
# we still want to run the tests with size set to a few kb,
# to make sure they work. We still want to avoid using
# too much memory, though, but we do that noisily.
maxsize = 5147
self.failIf(maxsize * memuse + overhead > 20 * _1M)
else:
maxsize = int((max_memuse - overhead) / memuse)
if maxsize < minsize:
# Really ought to print 'test skipped' or something
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
return
# Try to keep some breathing room in memory use
maxsize = max(maxsize - 50 * _1M, minsize)
return f(self, maxsize)
wrapper.minsize = minsize
wrapper.memuse = memuse
wrapper.overhead = overhead
return wrapper
return decorator
def bigaddrspacetest(f):
"""Decorator for tests that fill the address space."""
def wrapper(self):
if max_memuse < MAX_Py_ssize_t:
if verbose:
sys.stderr.write("Skipping %s because of memory "
"constraint\n" % (f.__name__,))
else:
return f(self)
return wrapper
#=======================================================================
# unittest integration.
class test_case(unittest.TestCase):
failureException = TestFailed
def run(self, result):
result.startTest(self)
if getattr(result, "simulate", False):
testMethod = lambda: None
else:
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
return
ok = False
try:
testMethod()
ok = True
except self.failureException:
result.addFailure(self, self._exc_info())
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def assertIsInstance(self, obj, cls):
if isinstance(cls, tuple):
expected = ' or '.join(cls.__name__ for cls in cls)
else:
expected = cls.__name__
msg = "expected %s, not %s" % (expected, type(obj).__name__)
self.assertTrue(isinstance(obj, cls), msg)
class test_loader(unittest.TestLoader):
"""
Extends `unittest.TestLoader` to support defining TestCases as members
of a TestSuite.
"""
def loadTestsFromTestSuite(self, testSuiteClass):
"""Return a suite of all tests cases contained in `testSuiteClass`."""
cases = []
for name in dir(testSuiteClass):
obj = getattr(testSuiteClass, name)
if (isinstance(obj, (type, types.ClassType)) and
issubclass(obj, unittest.TestCase)):
cases.append(obj)
tests = []
for case in sorted(cases, key=operator.attrgetter('__name__')):
tests.append(self.loadTestsFromTestCase(case))
return testSuiteClass(tests)
def loadTestsFromModule(self, module):
suites, cases = [], []
for name, obj in vars(module).iteritems():
if (isinstance(obj, (type, types.ClassType))
and '__unittest__' not in obj.__dict__
and '__unittest' not in sys.modules[obj.__module__].__dict__):
if issubclass(obj, unittest.TestSuite):
suites.append(obj)
elif issubclass(obj, unittest.TestCase):
cases.append(obj)
tests = []
for suite in sorted(suites, key=operator.attrgetter('__name__')):
tests.append(self.loadTestsFromTestSuite(suite))
for case in sorted(cases, key=operator.attrgetter('__name__')):
tests.append(self.loadTestsFromTestCase(case))
return self.suiteClass(tests)
# On Windows, the best timer is time.clock().
# On most other platforms the best timer is time.time().
if sys.platform == 'win32':
default_timer = time.clock
else:
default_timer = time.time
class _test_result(unittest.TestResult):
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, verbosity, simulate, timer=default_timer):
unittest.TestResult.__init__(self)
self.stream = stream
self.dots = verbosity == 1
self.verbose = verbosity > 1
self.simulate = simulate
self.timer = timer
self.total_time = 0
return
def startSuite(self):
self.total_time = self.timer()
return
def stopSuite(self):
stop_time = self.timer()
self.total_time = stop_time - self.total_time
if self.dots:
self.stream.write('\n')
return
def _write_errors(self, what, errors, color):
for test, err in errors:
self.stream.write(self.separator1 + '\n')
self.stream.setcolor(color)
description = test.shortDescription() or str(test)
self.stream.write('%s: %s\n' % (what, description))
self.stream.setcolor('NORMAL')
self.stream.write(self.separator2 + '\n')
self.stream.write(err + '\n')
return len(errors)
def startTest(self, test):
unittest.TestResult.startTest(self, test)
if self.verbose:
self.stream.write('%s ... ' % (test.shortDescription() or test))
def addSuccess(self, test):
unittest.TestResult.addSuccess(self, test)
if self.dots:
self.stream.setcolor('GREEN')
self.stream.write('.')
self.stream.setcolor('NORMAL')
elif self.verbose:
self.stream.setcolor('GREEN')
self.stream.write('OK')
self.stream.setcolor('NORMAL')
self.stream.write('\n')
def addError(self, test, err):
unittest.TestResult.addError(self, test, err)
if self.dots:
self.stream.setcolor('WHITE')
self.stream.write('E')
self.stream.setcolor('NORMAL')
elif self.verbose:
self.stream.setcolor('WHITE')
self.stream.write('ERROR')
self.stream.setcolor('NORMAL')
self.stream.write('\n')
def addFailure(self, test, err):
exc, val, tb = err
if self.verbose and issubclass(exc, TestError):
err = val.detail or val.message
self.failures.append((test, err))
else:
unittest.TestResult.addFailure(self, test, err)
if self.dots:
self.stream.setcolor('RED')
self.stream.write('F')
self.stream.setcolor('NORMAL')
elif self.verbose:
self.stream.setcolor('RED')
self.stream.write('FAIL')
self.stream.setcolor('NORMAL')
self.stream.write('\n')
_ansi_terms = frozenset([
'linux', 'console', 'con132x25', 'con132x30', 'con132x43',
'con132x60', 'con80x25', 'con80x28', 'con80x30', 'con80x43',
'con80x50', 'con80x60', 'xterm', 'xterm-color', 'color-xterm',
'vt100', 'vt100-color', 'rxvt', 'ansi', 'Eterm', 'putty',
'vt220-color', 'cygwin',
])
class _test_stream(object):
def __init__(self, stream):
self._stream = stream
self._encoding = getattr(stream, 'encoding', None)
if stream.isatty():
if sys.platform == 'win32':
# assume Windows console (cmd.exe or the like)
self._init_win32()
elif os.name == 'posix' and os.environ.get('TERM') in _ansi_terms:
self._colors = {
'NORMAL': '\033[0m',
'RED': '\033[1;31m',
'GREEN': '\033[1;32m',
'YELLOW': '\033[1;33m',
'WHITE': '\033[1;37m',
}
self.setcolor = self._setcolor_ansi
def _init_win32(self):
import ctypes
import msvcrt
class COORD(ctypes.Structure):
_fields_ = [('x', ctypes.c_short), ('y', ctypes.c_short)]
class SMALL_RECT(ctypes.Structure):
_fields_ = [('left', ctypes.c_short), ('top', ctypes.c_short),
('right', ctypes.c_short), ('bottom', ctypes.c_short),
]
class CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
_fields_ = [('dwSize', COORD),
('dwCursorPosition', COORD),
('wAttributes', ctypes.c_short),
('srWindow', SMALL_RECT),
('dwMaximumSize', COORD),
]
# Apparently there exists an IDE where isatty() is True, but
# the stream doesn't have a backing file descriptor.
try:
fileno = self._stream.fileno()
except AttributeError:
return
try:
self._handle = msvcrt.get_osfhandle(fileno)
except:
return
info = CONSOLE_SCREEN_BUFFER_INFO()
pinfo = ctypes.byref(info)
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(self._handle, pinfo)
self._colors = {
'NORMAL': info.wAttributes,
'RED': 12, # INTENSITY (8) | RED (4)
'GREEN': 10, # INTENSITY (8) | GREEN (2)
'YELLOW': 14, # INTENSITY (8) | GREEN (2) | RED (1)
'WHITE': 15, # INTENSITY (8) | BLUE (1) | GREEN (2) | RED (4)
}
self.setcolor = self._setcolor_win32
def _setcolor_ansi(self, color):
self._stream.write(self._colors[color])
def _setcolor_win32(self, color):
import ctypes
attr = self._colors[color]
ctypes.windll.kernel32.SetConsoleTextAttribute(self._handle, attr)
def __getattr__(self, name):
return getattr(self._stream, attr)
def write(self, data):
if isinstance(data, unicode) and self._encoding is not None:
data = data.encode(self._encoding)
self._stream.write(data)
def setcolor(self, color):
return
class test_runner(object):
"""
A test runner the display results in colorized textual form.
"""
__slots__ = ('stream', 'verbosity', 'simulate')
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream=None, verbosity=1, simulate=False):
self.stream = _test_stream(stream or sys.stderr)
self.verbosity = verbosity
self.simulate = simulate
def run(self, test):
# Run the tests
result = _test_result(self.stream, self.verbosity, self.simulate)
result.startSuite()
test(result)
result.stopSuite()
# Display details for unsuccessful tests
for items, what, color in [(result.failures, 'FAIL', 'RED'),
(result.errors, 'ERROR', 'WHITE')]:
for test, traceback in items:
self.stream.write(self.separator1 + '\n')
self.stream.setcolor(color)
description = test.shortDescription() or str(test)
self.stream.write('%s: %s\n' % (what, description))
self.stream.setcolor('NORMAL')
self.stream.write(self.separator2 + '\n')
self.stream.write(traceback + '\n')
# Display the summary
failed = []
if result.failures:
failed.append('failures=%d' % len(result.failures))
if result.errors:
failed.append('errors=%d' % len(result.errors))
if failed:
status = 'FAILED (%s)' % ', '.join(failed)
color = 'RED'
self.stream.write(self.separator1 + '\n')
else:
status = 'OK'
color = 'GREEN'
if self.verbosity > 0:
self.stream.write(self.separator1 + '\n')
summary = 'Ran %d tests in %0.3fs' % (result.testsRun,
result.total_time)
self.stream.write('%s ... ' % summary)
self.stream.setcolor(color)
self.stream.write(status)
self.stream.setcolor('NORMAL')
self.stream.write('\n')
return result
def test_main(*modules):
if not modules:
modules = ('__main__',)
def load_module(module):
if not isinstance(module, types.ModuleType):
module = __import__(module, {}, {}, ['__name__'])
return module
def usage_exit(msg=None):
progName = os.path.basename(sys.argv[0] or __file__)
if msg: print msg
print unittest.TestProgram.USAGE % locals()
raise SystemExit(2)
# parse args
import getopt
verbosity = 1
simulate = False
try:
options, args = getopt.getopt(sys.argv[1:], 'hvqn',
['help', 'verbose', 'quiet', 'dry-run'])
for option, value in options:
if option in ('-h', '--help'):
usage_exit()
if option in ('-q', '--quiet'):
verbosity -= 1
if option in ('-v', '--verbose'):
verbosity += 1
if option in ('-n', '--dry-run'):
simulate = True
except getopt.error, msg:
usage_exit(msg)
# create the tests
loader = test_loader()
if args:
suites = []
for module in modules:
if not isinstance(module, types.ModuleType):
module = __import__(module, {}, {}, ['__name__'])
suites.append(loader.loadTestsFromNames(args, module))
test = loader.suiteClass(suites)
else:
test = loader.loadTestsFromNames(modules)
# run the tests
runner = test_runner(sys.stderr, verbosity, simulate)
result = runner.run(test)
raise SystemExit(0 if result.wasSuccessful() else 1)
# Tell nose to ignore this function
test_main.__test__ = False
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/__init__.py
|
__init__.py
|
from amara.xslt.processor import processor
from amara.lib import inputsource, treecompare
from amara.xpath import util
def _run(source_xml, transform_xml, expected, parameters,
compare_method, source_uri=None, transform_uri=None,
processor_kwargs={}):
P = processor(**processor_kwargs)
source = inputsource(source_xml, source_uri)
transform = inputsource(transform_xml, transform_uri)
P.append_transform(transform)
if parameters is not None:
parameters = util.parameterize(parameters)
result = str(P.run(source, parameters=parameters))
try:
diff = compare_method(result, expected)
diff = list(diff)
assert not diff, (source_xml, transform_xml, result, expected, diff)
except Exception, err:
# I don't have a quick way to tell which string caused
# the error, so let the person debugging figure it out.
print "=== RESULT ==="
print result
print "=== EXPECTED ==="
print expected
print "=== DONE ==="
raise
def _run_html(source_xml, transform_xml, expected, parameters=None,
source_uri=None, transform_uri=None,
processor_kwargs={}):
_run(source_xml, transform_xml, expected, parameters,
treecompare.html_diff, source_uri, transform_uri,
processor_kwargs)
def _run_xml(source_xml, transform_xml, expected, parameters=None,
source_uri=None, transform_uri=None,
processor_kwargs={}):
_run(source_xml, transform_xml, expected, parameters,
treecompare.xml_diff, source_uri, transform_uri,
processor_kwargs)
def _compare_text(s1, s2):
if s1 == s2:
return []
i = 0
for i in range(min(len(s1), len(s2))):
if s1[i] != s2[i]:
prefix = s1[:i]
break
else:
prefix = s1
i += 1
lineno = prefix.count("\n")+1
line_start = prefix.rfind("\n")+1
s1_rest = s1[i:i+20]
s2_rest = s2[i:i+20]
return ["Difference at line %d col %d" % (lineno, i-line_start+1),
"prefix: %r" % (s1[line_start:i],),
"s1 continues: %r" % (s1_rest,),
"s2 continues: %r" % (s2_rest,),]
def _run_text(source_xml, transform_xml, expected, parameters=None,
source_uri=None, transform_uri=None):
_run(source_xml, transform_xml, expected, parameters,
_compare_text, source_uri, transform_uri)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/xslt_support.py
|
xslt_support.py
|
import os
import gc
import sys
import uuid
import difflib
from amara import Error
from amara.lib import inputsource, iri, irihelpers, treecompare
from amara.xpath import datatypes, util
from amara.xslt import XsltError, transform
from amara.xslt.processor import processor
# Defined to ignore this module in tracebacks
__unittest = True
#from amara.xslt.functions import generate_id_function as xslt_generate_id
#g_idmap = {}
#class generate_id_function(xslt_generate_id):
# """
# Replacement for XSLT's generate-id(). Generates IDs that are
# unique, but not random, for comparisons in the test suites.
# """
# def evaluate(self, context):
# result = xslt_generate_id.evaluate(self, context)
# if result:
# result = g_idmap.setdefault(result, len(g_idmap) + 1)
# result = datatypes.string(u'id%d' % result)
# return result
class _mapping_resolver(irihelpers.resolver):
def __init__(self, uris):
irihelpers.resolver.__init__(self)
self._uris = uris
def normalize(self, uriref, baseuri):
if uriref in self._uris:
return uriref
return irihelpers.resolver.normalize(self, uriref, baseuri)
def resolve(self, uri, baseuri=None):
if uri in self._uris:
return cStringIO.StringIO(self._uris[uri])
return irihelpers.resolver.resolve(self, uri, baseuri)
def get_mapping_factory():
return
class testsource(object):
"""
Encapsulates an inputsource given as a string or URI, so that it can be
referenced in various ways. Used by XsltTest().
"""
__slots__ = ('source', 'uri', 'validate', 'xinclude', 'external')
def __init__(self, source, uri=None, validate=False, xinclude=True,
external=False):
self.source = source
self.uri = uri
self.validate = validate
self.xinclude = xinclude
self.external = external
class filesource(testsource):
def __init__(self, path, validate=False, xinclude=True, external=False):
# Same logic that exists in _4xslt.py
# The processor only deals with URIs, not file paths
if not os.path.isabs(path):
# it is relative to the calling module
module = sys._getframe(1).f_globals['__name__']
module = sys.modules[module]
moduledir = os.path.dirname(os.path.abspath(module.__file__))
path = os.path.join(moduledir, path)
assert os.path.exists(path)
testsource.__init__(self, path, None, validate, xinclude)
class stringsource(testsource):
def __init__(self, source, uri=None, validate=False, xinclude=True,
external=False):
if not uri:
# it is relative to the calling module
module = sys._getframe(1).f_globals['__name__']
module = sys.modules[module]
moduledir = os.path.dirname(os.path.abspath(module.__file__))
path = str(uuid.uuid4())
uri = iri.os_path_to_uri(os.path.join(moduledir, path))
testsource.__init__(self, source, uri, validate, xinclude)
from amara.test import test_case, TestError
class xslt_test(test_case):
source = None
transform = None
expected = None
parameters = None
force_method = None
class __metaclass__(type):
def __init__(cls, name, bases, namespace):
transform = cls.transform
if transform and not isinstance(transform, testsource):
cls.transform = tuple(transform)
def _format_error(self, error_class, error_code):
if not issubclass(error_class, Error):
return error_class.__name__
for name, value in error_class.__dict__.iteritems():
if value == error_code:
error_code = error_class.__name__ + '.' + name
break
return '%s(%s)' % (error_class.__name__, error_code)
def setUp(self):
self.source = inputsource(self.source.source, self.source.uri)
if isinstance(self.transform, testsource):
T = self.transform
self.transform = [inputsource(T.source, T.uri)]
elif self.transform:
self.transform = [ inputsource(T.source, T.uri)
for T in self.transform ]
else:
self.transform = ()
return
def _assert_result(self, result):
method = (None, self.force_method) if self.force_method else result.parameters.method
omit_decl = result.parameters.omit_xml_declaration
expected, compared = self.expected, result
#print expected, compared
if method == (None, 'xml') and not omit_decl:
diff = treecompare.xml_diff(expected, compared)
elif method == (None, 'html'):
diff = treecompare.html_diff(expected, compared)
else:
expected = expected.splitlines()
compared = compared.splitlines()
diff = difflib.unified_diff(expected, compared,
'expected', 'compared',
n=2, lineterm='')
diff = '\n'.join(diff)
self.assertFalse(diff, msg=(None, diff))
gc.collect()
def test_processor(self):
P = processor()
for transform in self.transform:
P.append_transform(transform)
parameters = self.parameters
if parameters:
parameters = util.parameterize(parameters)
result = P.run(self.source, parameters=parameters)
self._assert_result(result)
#def test_transform(self):
# result = transform(self.source, self.transform, params=self.parameters)
# self._assert_result(result)
class xslt_error(xslt_test):
error_class = XsltError
error_code = None
class __metaclass__(xslt_test.__metaclass__):
def __init__(cls, name, bases, namespace):
xslt_test.__metaclass__.__init__(cls, name, bases, namespace)
if (issubclass(cls.error_class, Error)
and cls.error_code is None
and 'error_code' not in namespace
and '__unittest__' not in namespace):
raise ValueError("class '%s' must define 'error_code'" %
name)
def test_processor(self):
try:
xslt_test.test_processor(self)
except self.error_class, error:
expected = self._format_error(self.error_class, self.error_code)
compared = self._format_error(self.error_class,
getattr(error, 'code', None))
self.assertEquals(expected, compared)
else:
expected = self._format_error(self.error_class, self.error_code)
self.fail('%s not raised' % expected)
if __name__ == '__main__':
from amara.test import test_main, test_case, TestError
import glob
module_dir = os.path.dirname(os.path.abspath(__file__))
pattern = os.path.join(module_dir, 'test_*.py')
test_modules = [ os.path.basename(fn)[:-3] for fn in glob.glob(pattern) ]
# Re-arrange the modules /slightly/
for name in ('test_basics', 'test_exslt', 'test_borrowed'):
try:
test_modules.remove(name)
except ValueError:
pass
else:
test_modules.append(name)
test_modules.remove('test_borrowed')
test_main(*test_modules)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/__init__.py
|
__init__.py
|
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
### dalke - added to make the imports work
def NumberValue(x):
return x
#Extensions
ORACLE_JAVA_NS = 'http://www.oracle.com/XSL/Transform/java'
JAVA_COLOR_NS = ORACLE_JAVA_NS + '/java.awt.Color'
JAVA_INTEGER_NS = ORACLE_JAVA_NS + '/java.lang.Integer'
def Java_Color_GetHSBColor(context, hue, saturation, brightness):
hue = NumberValue(hue)
saturation = NumberValue(saturation)
brightness = NumberValue(brightness)
if saturation == 0:
r = g = b = int(brightness * 255)
else:
r = g = b = 0
h = (hue - int(hue)) * 6.0
f = h - int(h)
p = brightness * (1.0 - saturation)
q = brightness * (1.0 - saturation * f)
t = brightness * (1.0 - (saturation * (1.0 - f)))
h = int(h)
if h == 0:
r = int(brightness * 255)
g = int(t * 255)
b = int(p * 255)
elif h == 1:
r = int(q * 255)
g = int(brightness * 255)
b = int(p * 255)
elif h == 2:
r = int(p * 255)
g = int(brightness * 255)
b = int(t * 255)
elif h == 3:
r = int(p * 255)
g = int(q * 255)
b = int(brightness * 255)
elif h == 4:
r = int(t * 255)
g = int(p * 255)
b = int(brightness * 255)
elif h == 5:
r = int(brightness * 255)
g = int(p * 255)
b = int(q * 255)
return 0xff000000L | (r << 16) | (g << 8) | (b << 0)
def Java_Color_GetRed(context, color):
color = NumberValue(color)
return (long(color) >> 16) & 0xff
def Java_Color_GetGreen(context, color):
color = NumberValue(color)
return (long(color) >> 8) & 0xff
def Java_Color_GetBlue(context, color):
color = NumberValue(color)
return long(color) & 0xff
def Java_Integer_ToHexString(context, number):
return '%X' % NumberValue(number)
ExtFunctions = {
(JAVA_COLOR_NS, 'getHSBColor') : Java_Color_GetHSBColor,
(JAVA_COLOR_NS, 'getRed') : Java_Color_GetRed,
(JAVA_COLOR_NS, 'getGreen') : Java_Color_GetGreen,
(JAVA_COLOR_NS, 'getBlue') : Java_Color_GetBlue,
(JAVA_INTEGER_NS, 'toHexString') : Java_Integer_ToHexString,
}
class test_xslt_call_template_ed_20010101(xslt_test):
source = stringsource("""<?xml version = '1.0'?>
<ROWSET>
<ROW num="1">
<ENAME>CLARK</ENAME>
<SAL>2450</SAL>
</ROW>
<ROW num="2">
<ENAME>KING</ENAME>
<SAL>3900</SAL>
</ROW>
<ROW num="3">
<ENAME>MILLER</ENAME>
<SAL>1300</SAL>
</ROW>
</ROWSET>
""")
transform = stringsource('''<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:Color="http://www.oracle.com/XSL/Transform/java/java.awt.Color"
xmlns:Integer="http://www.oracle.com/XSL/Transform/java/java.lang.Integer"
exclude-result-prefixes="Color Integer">
<xsl:output media-type="image/svg"/>
<xsl:template match="/">
<svg xml:space="preserve" width="1000" height="1000">
<desc>Salary Chart</desc>
<g style="stroke:#000000;stroke-width:1;font-family:Arial;font-size:16">
<xsl:for-each select="ROWSET/ROW">
<xsl:call-template name="drawBar">
<xsl:with-param name="rowIndex" select="position()"/>
<xsl:with-param name="ename" select="ENAME"/>
<xsl:with-param name="sal" select="number(SAL)"/>
</xsl:call-template>
</xsl:for-each>
</g>
</svg>
</xsl:template>
<xsl:template name="drawBar">
<xsl:param name="rowIndex" select="number(0)"/>
<xsl:param name="ename"/>
<xsl:param name="sal" select="number(0)"/>
<xsl:variable name="xOffset" select="number(100)"/>
<xsl:variable name="yOffset" select="number(20)"/>
<xsl:variable name="barHeight" select="number(25)"/>
<xsl:variable name="gap" select="number(10)"/>
<xsl:variable name="x" select="$xOffset"/>
<xsl:variable name="y" select="$yOffset + $rowIndex * ($barHeight + $gap)"/>
<xsl:variable name="barWidth" select="$sal div number(10)"/>
<rect x="{$x}" y="{$y}" height="{$barHeight}" width="{$barWidth}">
<xsl:attribute name="style">
<xsl:text>fill:#</xsl:text>
<xsl:call-template name="getCoolColorStr" xml:space="default">
<xsl:with-param name="colorIndex" select="$rowIndex"/>
<xsl:with-param name="totalColors" select="number(14)"/>
</xsl:call-template>
<xsl:text> </xsl:text>
</xsl:attribute>
</rect>
<xsl:variable name="fontHeight" select="number(18)"/>
<text x="20" y="{$y + $fontHeight}">
<xsl:value-of select="$ename"/>
</text>
<xsl:variable name="x2" select="$xOffset + $barWidth + 10"/>
<text x="{$x2}" y="{$y + $fontHeight}">
<xsl:value-of select="$sal"/>
</text>
</xsl:template>
<xsl:template name="getCoolColorStr">
<xsl:param name="colorIndex"/>
<xsl:param name="totalColors"/>
<xsl:variable name="SATURATION" select="number(0.6)"/>
<xsl:variable name="BRIGHTNESS" select="number(0.9)"/>
<xsl:variable name="hue" select="$colorIndex div $totalColors"/>
<xsl:variable name="c" select="Color:getHSBColor($hue, $SATURATION, $BRIGHTNESS)"/>
<xsl:variable name="r" select="Color:getRed($c)"/>
<xsl:variable name="g" select="Color:getGreen($c)"/>
<xsl:variable name="b" select="Color:getBlue($c)"/>
<xsl:variable name="rs" select="Integer:toHexString($r)"/>
<xsl:variable name="gs" select="Integer:toHexString($g)"/>
<xsl:variable name="bs" select="Integer:toHexString($b)"/>
<xsl:if test="$r < 16">0</xsl:if><xsl:value-of select="$rs"/>
<xsl:if test="$g < 16">0</xsl:if><xsl:value-of select="$gs"/>
<xsl:if test="$b < 16">0</xsl:if><xsl:value-of select="$bs"/>
</xsl:template>
</xsl:stylesheet>
''')
parameters = {}
expected = """<?xml version='1.0' encoding='UTF-8'?>
<svg height='1000' xml:space='preserve' width='1000'>
<desc>Salary Chart</desc>
<g style='stroke:#000000;stroke-width:1;font-family:Arial;font-size:16'>
<rect height='25' x='100' style='fill:#E5965B ' width='245' y='55'/><text x='20' y='73'>CLARK</text><text x='355' y='73'>2450</text>
<rect height='25' x='100' style='fill:#E5D15B ' width='390' y='90'/><text x='20' y='108'>KING</text><text x='500' y='108'>3900</text>
<rect height='25' x='100' style='fill:#BEE55B ' width='130' y='125'/><text x='20' y='143'>MILLER</text><text x='240' y='143'>1300</text>
</g>
</svg>"""
# def test_transform(self):
# import sys
# from amara.xslt import transform
#
# result = transform(self.source, self.transform, output=io)
#
# #FIXME: the numerics break under Python 2.3
# test_harness.XsltTest(tester, source, [sheet], expected_1,
# extensionModules=[__name__])
#
# self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
#
# return
# Hide the test framework from nose
del xslt_test
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/sm_20000304.py
|
sm_20000304.py
|
import os
from Xml.Xslt import test_harness
from Ft.Lib import Uri
from Ft.Xml.InputSource import DefaultFactory
from Ft.Xml.Lib import TreeCompare
from Ft.Xml.Xslt import Processor, Error
uri = Uri.OsPathToUri(os.path.abspath(__file__))
tests = []
title = 'PI after prolog'
source = """<?xml version="1.0" encoding="utf-8"?><dummy/><?xml-stylesheet href="mb_20030915.xslt"?>"""
result = None
tests.append((title, source, result, Error.NO_STYLESHEET))
title = 'PI with no type'
source = """<?xml version="1.0" encoding="utf-8"?><?xml-stylesheet href="mb_20030915.xslt"?><dummy/>"""
result = None
tests.append((title, source, result, Error.NO_STYLESHEET))
title = 'PI with type="text/xsl"'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="mb_20030915.xslt"?>
<dummy/>"""
result = None
tests.append((title, source, result, Error.NO_STYLESHEET))
title = 'PI with type="application/xslt+xml"'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<dummy/>"""
result = """<dummy/>"""
tests.append((title, source, result))
title = 'import order when 2 PIs (1)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915a.xslt"?>
<dummy/>"""
result = """<a><dummy/></a>"""
tests.append((title, source, result))
title = 'import order when 2 PIs (2)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915a.xslt"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<dummy/>"""
result = """<dummy/>"""
tests.append((title, source, result))
title = '2 alt PIs only; no media; different types (1)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet alternate="yes" type="application/xslt+xml" href="mb_20030915a.xslt"?>
<?xml-stylesheet alternate="yes" type="application/xml" href="mb_20030915.xslt"?>
<dummy/>"""
# type differences are ignored; both are considered to be at the same level
# since both are alternate="yes" we just use first one
result = """<a><dummy/></a>"""
tests.append((title, source, result))
title = '2 alt PIs only; no media; different types (2)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet alternate="yes" type="application/xml" href="mb_20030915.xslt"?>
<?xml-stylesheet alternate="yes" type="application/xslt+xml" href="mb_20030915a.xslt"?>
<dummy/>"""
# type differences are ignored; both are considered to be at the same level
# since both are alternate="yes" we just use first one
result = """<dummy/>"""
tests.append((title, source, result))
title = '1 PI + 1 alt PI; no media; same type'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915a.xslt" alternate="yes"?>
<dummy/>"""
result = """<dummy/>""" # the non-alternate one should be selected
tests.append((title, source, result))
title = '1 PI + 1 alt PI; no media; different types (1)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="application/xml" href="mb_20030915.xslt"?>
<?xml-stylesheet alternate="yes" type="application/xslt+xml" href="mb_20030915a.xslt"?>
<dummy/>"""
# type differences are ignored; both are considered to be at the same level
# but we give preference to the one that's not alternate="yes"
result = """<dummy/>"""
tests.append((title, source, result))
title = '1 PI + 1 alt PI; no media; different types (2)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet alternate="yes" type="application/xslt+xml" href="mb_20030915a.xslt"?>
<?xml-stylesheet type="application/xml" href="mb_20030915.xslt"?>
<dummy/>"""
# type differences are ignored; both are considered to be at the same level
# but we give preference to the one that's not alternate="yes"
result = """<dummy/>"""
tests.append((title, source, result))
title = '1 PI + 1 alt PI; no media; different types (3)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type="text/xsl" href="mb_20030915.xslt"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915a.xslt" alternate="yes"?>
<dummy/>"""
result = """<a><dummy/></a>""" # because text/xsl will be ignored
tests.append((title, source, result))
title = '1 PI + 2 alt PIs; different media; no preference'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet media="screen" type="application/xslt+xml" href="mb_20030915a.xslt" alternate="yes"?>
<?xml-stylesheet media="mobile" type="application/xslt+xml" href="mb_20030915b.xslt" alternate="yes"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<dummy/>"""
result = """<dummy/>""" # the one with no media should be selected
tests.append((title, source, result))
title = '1 PI + 2 alt PIs; different media; preference (1)'
source = """<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet media="screen" type="application/xslt+xml" href="mb_20030915a.xslt" alternate="yes"?>
<?xml-stylesheet media="mobile" type="application/xslt+xml" href="mb_20030915b.xslt" alternate="yes"?>
<?xml-stylesheet type="application/xslt+xml" href="mb_20030915.xslt"?>
<dummy/>"""
result = """<b><dummy/></b>""" # the one with the matching preference should be selected
media_pref = 'mobile'
tests.append((title, source, result, None, media_pref))
title = '1 PI + 2 alt PIs; different media; preference (2)'
result = """<a><dummy/></a>""" # the one with the matching preference should be selected
media_pref = 'screen'
tests.append((title, source, result, None, media_pref))
def Test(tester):
tester.startGroup('pick stylesheet from xml-stylesheet PIs')
for tup in tests:
(title_st, source_st, expected_st) = tup[:3]
errcode = None
media = None
if len(tup) > 3:
errcode = tup[3]
if len(tup) > 4:
media = tup[4]
expected = expected_st or ''
source = test_harness.FileInfo(string=source_st, baseUri=uri)
if media:
proc = Processor.Processor()
proc.mediaPref = media
tester.startTest(title_st)
isrc = DefaultFactory.fromString(source_st, uri)
result = proc.run(isrc, ignorePis=0)
tester.compare(expected_st, result, func=TreeCompare.TreeCompare)
tester.testDone()
del proc, isrc, result
else:
test_harness.XsltTest(tester, source, [], expected,
exceptionCode=errcode,
title=title_st, ignorePis=0)
tester.groupDone()
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/mb_20030915.py
|
mb_20030915.py
|
import os, re
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
#from Ft.Lib.Uri import OsPathToUri
# The name of the environment variable that will indicate
# the location of DocBook XSL docbook-xsl-#.#.# directory
KEYNAME = 'DOCBOOK_HOME'
MIN_VERSION = '1.64.0'
# NOTE: there is a '%s' for the actual version of docbook used
#
# docbook-xsl-1.68.0 and up
expected_1_1680 = """\
<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-Type"><title></title><meta content="DocBook XSL Stylesheets V%s" name="generator"></head><body alink="#0000FF" bgcolor="white" vlink="#840084" link="#0000FF" text="black"><div lang="en" class="book"><div class="titlepage"><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#id3">1. Chapter</a></span></dt><dd><dl><dt><span class="sect1"><a href="#id2">Sect1</a></span></dt><dd><dl><dt><span class="sect2"><a href="#id1">Sect2</a></span></dt></dl></dd></dl></dd></dl></div><div lang="en" class="chapter"><div class="titlepage"><div><div><h2 class="title"><a name="id3"></a>Chapter 1. Chapter</h2></div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="sect1"><a href="#id2">Sect1</a></span></dt><dd><dl><dt><span class="sect2"><a href="#id1">Sect2</a></span></dt></dl></dd></dl></div><div lang="en" class="sect1"><div class="titlepage"><div><div><h2 style="clear: both" class="title"><a name="id2"></a>Sect1</h2></div></div></div><div lang="en" class="sect2"><div class="titlepage"><div><div><h3 class="title"><a name="id1"></a>Sect2</h3></div></div></div></div></div></div></div></body></html>"""
# docbook-xsl-1.64.0(?) and up;
expected_1_640 = """\
<html><head><meta content="text/html; charset=ISO-8859-1" http-equiv="Content-Type"><title></title><meta content="DocBook XSL Stylesheets V%s" name="generator"></head><body alink="#0000FF" bgcolor="white" vlink="#840084" link="#0000FF" text="black"><div lang="en" class="book"><div class="titlepage"><div></div><div></div><hr></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="chapter"><a href="#id3">1. Chapter</a></span></dt><dd><dl><dt><span class="sect1"><a href="#id2">Sect1</a></span></dt><dd><dl><dt><span class="sect2"><a href="#id1">Sect2</a></span></dt></dl></dd></dl></dd></dl></div><div lang="en" class="chapter"><div class="titlepage"><div><div><h2 class="title"><a name="id3"></a>Chapter 1. Chapter</h2></div></div><div></div></div><div class="toc"><p><b>Table of Contents</b></p><dl><dt><span class="sect1"><a href="#id2">Sect1</a></span></dt><dd><dl><dt><span class="sect2"><a href="#id1">Sect2</a></span></dt></dl></dd></dl></div><div lang="en" class="sect1"><div class="titlepage"><div><div><h2 style="clear: both" class="title"><a name="id2"></a>Sect1</h2></div></div><div></div></div><div lang="en" class="sect2"><div class="titlepage"><div><div><h3 class="title"><a name="id1"></a>Sect2</h3></div></div><div></div></div></div></div></div></div></body></html>"""
class test_xslt_docbook_ss_20010301(xslt_test):
source = stringsource("""\
<book>
<chapter>
<title>Chapter</title>
<sect1>
<title>Sect1</title>
<sect2>
<title>Sect2</title>
</sect2>
</sect1>
</chapter>
</book>
""")
transform = ""
parameters = {}
expected = ""
# def test_transform(self):
# import sys
# from amara.xslt import transform
#
# dirs = ["/usr/share/sgml/docbook/xsl-stylesheets", #default for docbook-style-xsl RPM
# "/usr/local/share/xsl/docbook", #default for FreeBSD textproc/docbook-xsl port
# ]
#
# DOCBOOK_DIR = None
# for dir in dirs:
# if os.path.isdir(dir):
# DOCBOOK_DIR = dir
# break
#
# DOCBOOK_DIR = os.environ.get(KEYNAME, DOCBOOK_DIR)
# if not DOCBOOK_DIR:
# tester.warning(
# "You need Norm Walsh's DocBook XSL stylesheet package for this test.\n"
# "You can either ignore this, or you can install the DocBook XSL\n"
# "stylesheets and re-run this test. Get the docbook-xsl package from\n"
# "http://sourceforge.net/project/showfiles.php?group_id=21935\n"
# "Install it in anywhere, and then before running this test, set\n"
# "the environment variable %r to the absolute path\n"
# "of the docbook-xsl-#.#.# directory on your filesystem.\n"
# % KEYNAME)
# tester.testDone()
# return
#
# if not os.path.isdir(DOCBOOK_DIR):
# tester.warning("Unable to find DocBook stylesheet directory %r" % DOCBOOK_DIR)
# tester.testDone()
# return
#
# VERSION_FILE = os.path.join(DOCBOOK_DIR, 'VERSION')
# if os.path.isfile(VERSION_FILE):
# VERSION = open(VERSION_FILE).read()
# match = re.search(r'>\s*(\d[.0-9]+)\s*<', VERSION)
# if not match:
# tester.warning("Unable to determine version of DocBook stylesheets\n"
# "Format of %r unrecognized." % VERSION_FILE)
# tester.testDone()
# return
# version = match.group(1)
# if version <= MIN_VERSION:
# tester.warning("DocBook XSL version %s or higher needed;"
# " version %s found." % (MIN_VERSION, version))
# tester.testDone()
# return
# else:
# tester.warning("Unable to determine version of DocBook stylesheets\n"
# "Was looking for file %r." % VERSION_FILE)
# tester.testDone()
# return
#
# STYLESHEET = os.path.join(DOCBOOK_DIR, 'html', 'docbook.xsl')
# if not os.path.isfile(STYLESHEET):
# tester.warning("Unable to find DocBook stylesheet %r" % STYLESHEET)
# tester.testDone()
# return
#
# STYLESHEET_URI = OsPathToUri(STYLESHEET)
# tester.testDone()
#
# source = test_harness.FileInfo(string=source_1)
# if version >= '1.68':
# expected = expected_1_1680 % version
# else:
# expected = expected_1 % version
# sheet = test_harness.FileInfo(uri=STYLESHEET_URI)
# test_harness.XsltTest(tester, source, [sheet], expected,
# title="Basic DocBook XSL processing")
#
#
# result = transform(self.source, self.transform, output=io)
# self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
# return
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/ss_20010301.py
|
ss_20010301.py
|
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
class test_xslt_row_num_km_20000207(xslt_test):
source = ""
transform = stringsource("""<xsl:transform
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0"
>
<xsl:output method='html'/>
<xsl:template match="/">
<table>
<xsl:apply-templates/>
</table>
</xsl:template>
<xsl:template match="table">
<xsl:call-template name="one-row">
<xsl:with-param name="row-num" select="1"/>
</xsl:call-template>
</xsl:template>
<!-- From Michael Kay -->
<xsl:template name="one-row">
<xsl:param name="row-num" select="1"/>
<tr>
<xsl:for-each select="row">
<td><xsl:value-of select="*[$row-num]"/></td>
</xsl:for-each>
</tr>
<xsl:if test="row/*[$row-num+1]">
<xsl:call-template name="one-row">
<xsl:with-param name="row-num" select="$row-num+1"/>
</xsl:call-template>
</xsl:if>
</xsl:template>
<!-- END From Michael Kay -->
</xsl:transform>""")
parameters = {}
expected = """<table>
<tr>
<td>1</td>
<td>11</td>
<td>21</td>
<td>31</td>
<td>41</td>
<td>51</td>
<td>61</td>
<td>71</td>
<td>81</td>
<td>91</td>
</tr>
<tr>
<td>2</td>
<td>12</td>
<td>22</td>
<td>32</td>
<td>42</td>
<td>52</td>
<td>62</td>
<td>72</td>
<td>82</td>
<td>92</td>
</tr>
<tr>
<td>3</td>
<td>13</td>
<td>23</td>
<td>33</td>
<td>43</td>
<td>53</td>
<td>63</td>
<td>73</td>
<td>83</td>
<td>93</td>
</tr>
<tr>
<td>4</td>
<td>14</td>
<td>24</td>
<td>34</td>
<td>44</td>
<td>54</td>
<td>64</td>
<td>74</td>
<td>84</td>
<td>94</td>
</tr>
<tr>
<td>5</td>
<td>15</td>
<td>25</td>
<td>35</td>
<td>45</td>
<td>55</td>
<td>65</td>
<td>75</td>
<td>85</td>
<td>95</td>
</tr>
<tr>
<td>6</td>
<td>16</td>
<td>26</td>
<td>36</td>
<td>46</td>
<td>56</td>
<td>66</td>
<td>76</td>
<td>86</td>
<td>96</td>
</tr>
<tr>
<td>7</td>
<td>17</td>
<td>27</td>
<td>37</td>
<td>47</td>
<td>57</td>
<td>67</td>
<td>77</td>
<td>87</td>
<td>97</td>
</tr>
<tr>
<td>8</td>
<td>18</td>
<td>28</td>
<td>38</td>
<td>48</td>
<td>58</td>
<td>68</td>
<td>78</td>
<td>88</td>
<td>98</td>
</tr>
<tr>
<td>9</td>
<td>19</td>
<td>29</td>
<td>39</td>
<td>49</td>
<td>59</td>
<td>69</td>
<td>79</td>
<td>89</td>
<td>99</td>
</tr>
<tr>
<td>10</td>
<td>20</td>
<td>30</td>
<td>40</td>
<td>50</td>
<td>60</td>
<td>70</td>
<td>80</td>
<td>90</td>
<td>100</td>
</tr>
</table>"""
def test_transform(self):
SIZE=10
import sys
from amara.xslt import transform
#Create the matrix to be transposed
from Ft.Xml.Domlette import implementation
doc = implementation.createDocument(None, 'table', None)
counter = 1
for row in range(SIZE):
row_elem = doc.createElementNS(None, 'row')
doc.documentElement.appendChild(row_elem)
for col in range(SIZE):
col_elem = doc.createElementNS(None, 'column')
row_elem.appendChild(col_elem)
content = doc.createTextNode(str(counter))
col_elem.appendChild(content)
counter = counter + 1
stream = cStringIO.StringIO()
from Ft.Xml.Domlette import Print
Print(doc,stream)
self.source = stringsource(stream.getvalue())
result = transform(self.source, self.transform, output=io)
self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
return
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/km_20000207.py
|
km_20000207.py
|
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
commontransform = stringsource("""<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0"
>
<xsl:output encoding="UTF-8"/>
<xsl:template match="Book">
<html>
<head>
<title><xsl:value-of select="Title" /></title>
</head>
<body>
<ul>
<li><xsl:value-of select="Chapter" /></li>
<li><xsl:value-of select="Chapter" /></li>
</ul>
</body>
</html>
</xsl:template>
</xsl:stylesheet>""")
commonexpected = """<html>\012 <head>\012 <META HTTP-EQUIV='Content-Type' CONTENT='text/html; charset=UTF-8'>\012 <title>\344\270\200\346\234\254\346\233\270</title>\012 </head>\012 <body>\012 <ul>\012 <li>\012 \351\200\231\346\230\257\347\254\254\344\270\200\347\253\240. \345\271\263\012 </li>\012 <li>\012 \351\200\231\346\230\257\347\254\254\344\270\200\347\253\240. \345\271\263\012 </li>\012 </ul>\012 </body>\012</html>"""
class test_xslt_text_output_1_fc_20001125(xslt_test):
source = stringsource("""<?xml version="1.0"?>
<?xml-stylesheet href="book.html.xsl" type="text/xsl"?>
<!DOCTYPE Book [
<!ELEMENT Book (Title, Chapter+)>
<!ATTLIST Book Author CDATA #REQUIRED>
<!ELEMENT Title (#PCDATA)>
<!ELEMENT Chapter (#PCDATA)>
<!ATTLIST Chapter id CDATA #REQUIRED>
]>
<Book Author="³¯«Ø¾±">
<Title>¤@¥»®Ñ</Title>
<Chapter id="1">
³o¬O²Ä¤@³¹. 平
</Chapter>
<Chapter id="2">
³o¬O²Ä¤G³¹. 平
</Chapter>
</Book>""")
transform = commontransform
parameters = {}
expected = commonexpected
def test_transform(self):
tester.startTest("Checking for BIG5 codec")
try:
import codecs
big5_decoder = codecs.getdecoder('big5')
except LookupError:
try:
from encodings import big5
except ImportError:
tester.warning(
"No BIG5 encoding support for case 1. You can install \n"
"BIG5 by downloading and installing ChineseCodes from\n"
"ftp://python-codecs.sourceforge.net/pub/python-codecs/")
tester.testDone()
return
else:
big5_decode = big5.decode
else:
big5_decode = lambda s: big5_decoder(s)[0]
tester.testDone()
b5 = big5_decode(self.source)
utf8 = b5.encode("utf-8")
from amara.xslt import transform
io = cStringIO.StringIO()
result = transform(utf8, self.transform, output=io)
self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
return
class test_xslt_text_output_2_fc_20001125(xslt_test):
source = stringsource("""<?xml version="1.0"?>\012<?xml-stylesheet href="book.html.xsl" type="text/xsl"?>\012<!DOCTYPE Book [\012<!ELEMENT Book (Title, Chapter+)>\012<!ATTLIST Book Author CDATA #REQUIRED>\012<!ELEMENT Title (#PCDATA)>\012<!ELEMENT Chapter (#PCDATA)>\012<!ATTLIST Chapter id CDATA #REQUIRED>\012]>\012\012<Book Author="\351\231\263\345\273\272\345\213\263">\012 <Title>\344\270\200\346\234\254\346\233\270</Title>\012 <Chapter id="1">\012 \351\200\231\346\230\257\347\254\254\344\270\200\347\253\240. 平\012 </Chapter>\012 <Chapter id="2">\012 \351\200\231\346\230\257\347\254\254\344\272\214\347\253\240. 平\012 </Chapter>\012</Book>""")
transform = commontransform
parameters = {}
expected = commonexpected
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/fc_20001125.py
|
fc_20001125.py
|
import tempfile, os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
from amara.lib.iri import os_path_to_uri
fname = tempfile.mktemp()
furi = os_path_to_uri(fname)
commonsource = "<dummy/>"
commontransform = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common"
extension-element-prefixes="exsl"
exclude-result-prefixes="exsl">
<xsl:output method="html" indent="no"/>
<xsl:param name="URL" select="'%s'"/>
<xsl:template match="/">
<exsl:document href="{$URL}"
method ="html"
version ="-//W3C//DTD XHTML 1.1//EN"
doctype-public="http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"
indent="%s">
<html>
<head>
<title>test</title>
</head>
<body>
<p>hello world</p>
</body>
</html>
</exsl:document>
</xsl:template>
</xsl:stylesheet>"""
class test_xslt_exsl_document_jk_20050406(xslt_test):
source = commonsource
transform = ""
parameters = {}
expected = ""
def test_transform(self):
# Preliminary, populate file with unindented HTML
from amara.xslt import transform
io = cStringIO.StringIO()
self.transform = stringsource(commontransform%(furi, "no"))
result = transform(self.source, self.transform, output=io)
self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
open(fname, 'w').write(io.getvalue())
class test_xslt_exsl_document_and_no_indent_2_jk_20050406(xslt_test):
source = commonsource
transform = commontransform
parameters = {}
expected = '<!DOCTYPE html PUBLIC "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n<html><head><meta content="text/html; charset=iso-8859-1" http-equiv="Content-Type"><title>test</title></head><body><p>hello world</p></body></html>'
def test_transform(self):
# Subsequent, read file and compare
from amara.xslt import transform
self.assert_(os.path.exists(fname))
file = open(fname, 'r')
fcontent = file.read()
file.close()
self.assert_(treecompare.html_compare(self.expected, fcontent))
os.unlink(fname)
# Re-populate file, with indented HTML
io = cStringIO.StringIO()
self.transform = commontransform%(furi, "yes")
result = transform(stringsource(self.source), stringsource(self.transform), output=io)
open(fname, 'w').write(io.getvalue())
self.assert_(treecompare.html_compare(self.expected, io.getvalue()))
class test_xslt_exsl_document_with_indent_jk_20050406(xslt_test):
source = commonsource
transform = commontransform
parameters = {}
expected = '<!DOCTYPE html PUBLIC "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">\n<html>\n <head>\n <meta content="text/html; charset=iso-8859-1" http-equiv="Content-Type">\n <title>test</title>\n </head>\n <body>\n <p>hello world</p>\n </body>\n</html>'
def test_transform(self):
# Subsequent, read file and compare
self.assertEquals(True, os.path.exists(fname))
file = open(fname, 'r')
fcontent = file.read()
file.close()
self.assert_(treecompare.html_compare(self.expected, fcontent))
os.unlink(fname)
if __name__ == '__main__':
test_main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/jk_20050406.py
|
jk_20050406.py
|
slides.xsl is a creation of Elliotte Rusty Harold, as explained in the message
below.
Note that one of the links in the message is wrong.
"http://metalab.unc.edu/xml/slides/xmlsig0899/xml.xll"
should read
"http://metalab.unc.edu/xml/slides/xmlsig0899/xll.xml"
===========
From: Elliotte Rusty Harold [mailto:[email protected]]
Sent: Wednesday, August 25, 1999 9:06 AM
To: [email protected]
Subject: PowerPoint is dead. Long live XML!
Last night I gave a talk on XLinks and XPointers to the XML SIG of the
Object Developers' Group. The slides are available here:
http://metalab.unc.edu/xml/slides/xmlsig0899/
This was adapted from Chapters 16 and 17 of The XML Bible.
http://metalab.unc.edu/xml/books/bible/updates/16.html
http://metalab.unc.edu/xml/books/bible/updates/17.html
These chapters were originally written in Microsoft Word, then
saved as HTML. Some hand editing had to be done to fix Word's
exscessively presentational approach to HTML.
For this talk, I started with the HTMLized version of
those chapters as posted at the above URLs. I added various XML
markup to split them into individual slides, bullet points, and
examples while simultaneously cutting them down to a size
appropriate for a presentation. I also had to clean up the
original HTML so it would be well-formed XML. An XSL style sheet
and James Clark's XT were used to generate the actual slides in
HTML. The presentation itself was delivered from a Web browser
reading the HTML files off the local hard disk. It would have
been equally easy to do it straight from the Web.
If you're curious you can see the original XML document at
http://metalab.unc.edu/xml/slides/xmlsig0899/xml.xll and the
stylesheet I used at
http://metalab.unc.edu/xml/slides/xmlsig0899/slides.xsl
I'm not proposing this as a general tag set for presentations, though.
It's mostly just a neat hack that allowed me to prepare this one
presentation a lot more easliy than would otherwise have been
the case.
They key developments that made this possible were the HTML
output method in the latest draft of XSL (so I didn't have to
worry about whether browsers could understand constructs like
<br/> or <hr></hr>) and the xt:document extension function (so I
could put each slide element in the input document into a
separate file). This also made it very straight-forward to
generate differently styled versions of the presentation for
printing transparencies, reading directly on the Web, projecting
onto a wall, and speaker's notes. For example, the online
version simply uses the browser's default fonts. However, the
versions designed for projecting onto a wall use 16-point body
text and bold monospaced fonts so they can more easily be read
from the back of the room. The print versions don't include
navigation links. The onscreen versions do.
With some additional work I think I can probably generate both
the book chapter and the slides from one XML document. The
speaker's notes already include a lot more text than what the
audience sees. I just need to mark certain parts "book only" or
"slides only", possibly using modes. I think I'm going to do all
my presentations this way in the future. PowerPoint is dead.
Long live XML!
+-----------------------+------------------------+-------------------+
| Elliotte Rusty Harold | [email protected] | Writer/Programmer |
+-----------------------+------------------------+-------------------+
| Java I/O (O'Reilly & Associates, 1999) |
| http://metalab.unc.edu/javafaq/books/javaio/ |
| http://www.amazon.com/exec/obidos/ISBN=1565924851/cafeaulaitA/ |
+----------------------------------+---------------------------------+
| Read Cafe au Lait for Java News: http://metalab.unc.edu/javafaq/ |
| Read Cafe con Leche for XML News: http://metalab.unc.edu/xml/ |
+----------------------------------+---------------------------------+
XSL-List info and archive: http://www.mulberrytech.com/xsl/xsl-list
XSL-List info and archive: http://www.mulberrytech.com/xsl/xsl-list
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xslt/borrowed/resources/README.slides.xsl
|
README.slides.xsl
|
from amara.test import test_case
from amara.xpath import datatypes
def _argstr(arg):
#if isinstance(arg, unicode):
# return arg.encode('unicode_escape')
# This is correct, we do want an exact type check
if type(arg) in (tuple, list):
return '(%s)' % ', '.join(_argstr(arg) for arg in arg)
else:
return unicode(arg)
class base_metaclass(type):
# The name of module where the class to be tested is defined
module_name = None
# The name of the class to be tested
class_name = None
# The return type of the expression class' evaluate method
return_type = None
# list of test cases to add; item is a 2-item tuple of (args, expected)
test_cases = ()
def __init__(cls, name, bases, namespace):
# load the expression factory
if cls.module_name and cls.class_name:
module = __import__(cls.module_name, {}, {}, [cls.class_name])
factory = getattr(module, cls.class_name)
# create the test methods
digits = len(str(len(cls.test_cases)))
for count, test in enumerate(cls.test_cases):
args, expected, extra = cls.unpack_tst_case(*test)
if cls.return_type is not None:
if not isinstance(expected, cls.return_type):
expected = cls.return_type(expected)
test_method = cls.new_tst_method(expected, factory, args, *extra)
# build the docstring
test_method.__doc__ = cls.class_name + _argstr(args)
method_name = 'test_%s_%0*d' % (cls.class_name, digits, count)
setattr(cls, method_name, test_method)
def unpack_tst_case(cls, args, expected, *extras):
return args, expected, extras
def new_tst_method(cls, expected, factory, args, *extras):
raise NotImplementedError
class base_xpath(test_case, object):
__metaclass__ = base_metaclass
def assertIsInstance(self, obj, cls):
if isinstance(cls, tuple):
expected = ' or '.join(cls.__name__ for cls in cls)
else:
expected = cls.__name__
msg = "expected %s, not %s" % (expected, type(obj).__name__)
self.assertTrue(isinstance(obj, cls), msg)
def assertEquals(self, first, second, msg=None):
if msg is None:
msg = '%r == %r' % (first, second)
# convert nan's to strings to prevent IEEE 754-style compares
if isinstance(first, float) and isinstance(second, float):
if datatypes.number(first).isnan():
first = 'NaN'
if datatypes.number(second).isnan():
second = 'NaN'
# convert nodesets to lists to prevent XPath-style nodeset compares
elif isinstance(first, list) and isinstance(second, list):
first, second = list(first), list(second)
return test_case.assertEquals(self, first, second, msg)
# update aliases as well
assertEqual = failUnlessEqual = assertEquals
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/test/xpath/__init__.py
|
__init__.py
|
#The following won't work because EXSLT is only supported in XsltContext and we use Ft.Xml.XPath.Context
#We can probably revisit when we make bindery nodes subclasses of Domlette
#inspectxml --ns=str="http://exslt.org/strings" -d "str:replace(., 'http://', '')" http://www.w3.org/2000/07/8378/xhtml/media-types/test4.xhtml "@href"
import os
import re
import sys
import codecs
import optparse
#import cStringIO
import amara
#from amara import tree
#from xml.dom import EMPTY_NAMESPACE as NULL_NAMESPACE
#from xml.dom import EMPTY_PREFIX as NULL_PREFIX
#FIXME: Use 4Suite L10N
def _(t): return t
def run(source, xpattern, xpath, limit, sentinel, display, prefixes):
prefixes = prefixes or {}
try:
prefixes = dict([ p.split('=') for p in prefixes ])
except ValueError:
raise ValueError("Invalid prefix declaration")
#if hasattr(source, 'read'):
# if hasattr(source, 'rewind'):
# nss = saxtools.sniff_namespace(source)
# source.rewind()
# else:
# source = source.read()
# nss = saxtools.sniff_namespace(source)
#else:
# nss = saxtools.sniff_namespace(source)
#nss.update(prefixes)
nss = prefixes
doc = amara.parse(source)
#nodes = amara.pushbind(source, xpattern, prefixes=nss)
count = 0
search_space = doc.xml_select(u'//' + xpattern.lstrip(u'//'))
#FIXME: Until we have something pushbind-like trim all nodes not in the search space
for node in search_space:
if not xpath or node.xml_select(xpath):
count += 1
if display:
#Print specified subset
result = node.xml_select(display)
if hasattr(result, 'next'):
#print '\n'.join([ n.xml_type == tree.attribute.xml_type and n.xml_value or amara.xml_print(n) for n in result ])
print '\n'.join( (n.xml_type == tree.attribute.xml_type and n.xml_value or amara.xml_print(n) for n in result) )
else:
print result
else:
#Print the whole thing
try:
amara.xml_print(node)
except AttributeError:
print unicode(node).encode('utf-8')
if limit != -1 and count >= limit:
break
if sentinel and node.xml_select(sentinel):
break
print
return
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def command_line_prep():
from optparse import OptionParser
usage = "%prog [options] source xpattern [xpath]"
parser = OptionParser(usage=usage)
parser.add_option("-c", "--limit",
action="store", type="int", dest="limit", default=-1,
help="limit the number of xpattern matches retrieved; files will not be parsed beyond this number, so it serves as optimization", metavar="NUMBER")
parser.add_option("-d", "--display",
action="store", type="string", dest="display",
help="xpath expression indicating what nodes to be displayed from matched and screened patterns", metavar="XPATH")
parser.add_option("-n", "--ns",
action="append", type="string", dest="ns",
help="prefix to namespace mapping", metavar="<PREFIX=URI>")
parser.add_option("--sentinel",
action="store", type="string", dest="sentinel",
help="xpath expression to be checked for each pattern match. If true it causes the reporting to stop, with no further parsing", metavar="XPATH")
#parser.add_option("-q", "--quiet",
# action="store_false", dest="verbose", default=1,
# help="don't print status messages to stdout")
return parser
def main(argv=None):
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
source = args[0]
except IndexError:
optparser.error("Missing filename/URL to parse")
try:
xpattern = args[1]
except IndexError:
optparser.error("Missing main xpattern")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
try:
xpath = args[2].decode('utf-8')
except IndexError:
xpath = None
xpattern = xpattern.decode('utf-8')
sentinel = options.sentinel and options.sentinel.decode('utf-8')
display = options.display and options.display.decode('utf-8')
prefixes = options.ns
limit = options.limit
if source == '-':
source = sys.stdin
run(source, xpattern, xpath, limit, sentinel, display, prefixes)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/inspectxml.py
|
inspectxml.py
|
import sys
from amara.bindery import html
from amara.writers.struct import *
from amara.namespaces import *
from amara.lib.xmlstring import *
from amara.lib.iri import absolutize
#from amara.bindery.model import *
#Give Amara an example so it knows what structure to expect
#label_model = examplotron_model('data/xhtmlrdfa.html')
def absolutize(uriref, docuri):
try:
return absolutize(uriref, docuri)
except:
return uriref
def expand(data, context=None):
if context:
nss = context.xml_namespaces.copy()
prefix, qname = splitqname(unicode(data))
if prefix and prefix in nss:
return nss[prefix] + qname
return data
def handle_statement(elem, docuri):
subject = elem.xml_select(u'ancestor::*/@about')
subject = absolutize(subject[0].xml_value, docuri) if subject else docuri
datatype = unicode(elem.xml_select(u'string(@datatype)'))
if datatype: datatype = expand(datatype, elem)
if elem.xml_select(u'@property') and elem.xml_select(u'@content'):
return ( subject , expand(elem.property, elem), elem.content, datatype or None )
elif elem.xml_select(u'@property'):
return ( subject, expand(elem.property, elem), expand(unicode(elem)), datatype or None )
elif elem.xml_select(u'@rel') and elem.xml_select(u'@resource'):
return ( subject, expand(elem.rel, elem), elem.resource, datatype or None )
elif elem.xml_select(u'@rel') and elem.xml_select(u'@href'):
return ( subject, expand(elem.rel, elem), elem.href, datatype or None )
else:
return ()
def rdfascrape(source):
from amara.lib import inputsource
source = inputsource(source, None)
doc = html.parse(source.stream)
try:
docuri = doc.html.head.base.href
except:
docuri = source.uri
statement_elems = doc.xml_select(u'//*[@property|@resource|@rel]')
triples = ( handle_statement(elem, docuri) for elem in statement_elems )
return triples
if __name__ == '__main__':
#doc = html.parse(DOCURI, model=label_model)
for triple in rdfascrape(sys.argv[1]):
print triple
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/rdfascrape.py
|
rdfascrape.py
|
import re
from itertools import groupby
import amara
from amara.lib import U
from amara.tree import element, text
SOURCE = '''<catalog>
<book>
<title>Spam for Supper</title>
<authors>By A.X. Ham and Franco Bacon</authors>
</book>
</catalog>'''
EXTRACT_AUTHORS_PAT = r'(\s*By\s*)|(\s*,\s*)|(\s*and\s*)'
EXTRACT_AUTHORS_PAT_GROUPS = 4
doc = amara.parse(SOURCE)
for author_node in doc.xml_select(u'/catalog/book/authors'):
authors = re.split(EXTRACT_AUTHORS_PAT, U(author_node))
for n in author_node.xml_children: author_node.xml_remove(n)
#Collect the regex match into the regex-defined groups
for i, subenum in groupby(enumerate(authors), lambda i: i[0]//EXTRACT_AUTHORS_PAT_GROUPS):
matchgroup = [ group for i, group in subenum ]
if matchgroup[0]:
link = element(None, u'a')
link.xml_attributes[None, u'href'] = 'http://example.org'
link.xml_append(text(matchgroup[0]))
author_node.xml_append(link)
for match in matchgroup[1:]:
if match:
author_node.xml_append(text(match))
doc.xml_write()
print
#The following variation contributed by Luis Miguel Morillas:
SOURCE = '''<catalog>
<book>
<title>Spam for Supper</title>
By A.X. Ham and Franco Bacon
<info> Other info</info>
</book>
</catalog>'''
for author_node in doc.xml_select(u'/catalog/book/authors'):
authors = re.split(EXTRACT_AUTHORS_PAT, U(author_node))
#Note: you can use author_node.xml_clear() if you use bindery
parent = author_node.xml_parent
pos = parent.xml_index(author_node)
parent.xml_remove(author_node)
#Collect the regex match into the regex-defined groups
for i, subenum in groupby(enumerate(authors), lambda i: i[0]//EXTRACT_AUTHORS_PAT_GROUPS):
matchgroup = [ group for i, group in subenum ]
if matchgroup[0]:
link = element(None, u'a')
link.xml_attributes[None, u'href'] = 'http://example.org'
link.xml_append(text(matchgroup[0]))
parent.xml_insert(pos, link)
pos += 1
for match in matchgroup[1:]:
if match:
parent.xml_insert(pos, (text(match)))
pos += 1
doc.xml_write()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/inject_markup.py
|
inject_markup.py
|
import sys
from itertools import *
from functools import *
from operator import *
from collections import defaultdict
import re, copy
from cStringIO import StringIO
from datetime import datetime
import amara
from amara.namespaces import *
from amara.bindery import html
from amara.lib import inputsource
from amara.bindery.model import *
from amara.lib.util import *
from amara.lib.xmlstring import *
from amara import bindery
from amara.writers.struct import *
from amara.bindery.util import dispatcher, node_handler
from amara.bindery.model import *
from amara.lib.util import *
__all__ = [
'ENTRY_MODEL', 'FEED_MODEL', 'ENTRY_MODEL_XML', 'FEED_MODEL_XML',
'ATOM_IMT', 'PREFIXES', 'DEFAULT_SKEL',
'tidy_content_element', 'feed', 'aggregate_entries',
'ejsonize', 'entry_metadata',
'author', 'link', 'category',
]
#
#Basic schematic models
#
#From 1.1 of the spec
ENTRY_MODEL_XML = """<atom:entry xmlns:atom="http://www.w3.org/2005/Atom" xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel" ak:resource="atom:id">
<ak:rel name="'type'" value="'atom:entry'"/>
<ak:rel name="'alternate_link'" value='atom:link[@rel="alternate"]/@href' />
<ak:rel name="'self_link'" value='atom:link[not(@rel) or @rel="self"]/@href' />
<atom:id ak:rel="local-name()"/>
<atom:title type="xhtml" ak:rel="local-name()"/>
<atom:updated ak:rel="local-name()"></atom:updated>
<atom:published ak:rel="local-name()"></atom:published>
<atom:link rel="self" eg:occurs="*" ak:rel="concat(local-name(), '_', @rel)" ak:value="@href" />
<atom:summary type="xhtml" ak:rel="local-name()"/>
<atom:category eg:occurs="*" ak:rel="local-name()"/>
<!--
<atom:author eg:occurs="*" ak:rel="local-name()" ak:resource="(atom:name|atom:uri|atom:email)[1]">
-->
<atom:author eg:occurs="*" ak:rel="local-name()">
<ak:rel name="'type'" value="'atom:author'"/>
<atom:name ak:rel="local-name()" ak:value="." />
<atom:uri ak:rel="local-name()" ak:value="." />
<atom:email ak:rel="local-name()" ak:value="." />
</atom:author>
<atom:content type="xhtml" eg:occurs="?" ak:rel="local-name()" ak:value="."/>
</atom:entry>"""
FEED_MODEL_XML = """<atom:feed xmlns:atom="http://www.w3.org/2005/Atom" xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel" ak:resource="atom:id">
<ak:rel name="'type'" value="'atom:feed'"/>
<ak:rel name="'alternate_link'" value='atom:link[@rel="alternate"]/@href' />
<ak:rel name="'self_link'" value='atom:link[not(@rel) or @rel="self"]/@href' />
<atom:title ak:rel="local-name()"></atom:title>
<atom:subtitle ak:rel="local-name()"></atom:subtitle>
<atom:updated ak:rel="local-name()"></atom:updated>
<!--
<atom:author eg:occurs="*" ak:rel="local-name()" ak:resource="(atom:name|atom:uri|atom:email)[1]">
-->
<atom:author eg:occurs="*" ak:rel="local-name()">
<ak:rel name="'type'" value="'atom:author'"/>
<atom:name ak:rel="local-name()"/>
<atom:uri ak:rel="local-name()"/>
<atom:email ak:rel="local-name()"/>
</atom:author>
<atom:id ak:rel="local-name()"></atom:id>
<atom:link rel="self" eg:occurs="*" ak:rel="concat(local-name(), '_', @rel)" ak:value="@href" />
<atom:rights ak:rel="local-name()"></atom:rights>
%s
</atom:feed>
""" % ENTRY_MODEL_XML
FEED_MODEL = examplotron_model(FEED_MODEL_XML)
ENTRY_MODEL = examplotron_model(ENTRY_MODEL_XML)
ATOM_IMT = u'application/atom+xml'
PREFIXES = {COMMON_NAMESPACES[ATOM_NAMESPACE]: ATOM_NAMESPACE, COMMON_NAMESPACES[ATOMTHR_EXT_NAMESPACE]: ATOMTHR_EXT_NAMESPACE}
SLUGCHARS = r'a-zA-Z0-9\-\_'
OMIT_FROM_SLUG_PAT = re.compile('[^%s]'%SLUGCHARS)
DEFAULT_SKEL = '''<?xml version="1.0" encoding="UTF-8"?>
<feed xmlns="http://www.w3.org/2005/Atom">
<title>Sample feed</title>
<id>http://example.org/CHANGE_ME</id>
<!--updated>2009-03-03T11:50:21Z</updated-->
</feed>
'''
def slug_from_title(title, maxlen=None):
if maxlen:
return OMIT_FROM_SLUG_PAT.sub('_', title).lower().decode('utf-8')[:maxlen]
else:
return OMIT_FROM_SLUG_PAT.sub('_', title).lower().decode('utf-8')
datetime_from_iso = lambda ds: datetime.strptime(ds, "%Y-%m-%dT%H:%M:%SZ")
path_from_datetime = lambda dt: '%i/%i'%dt.utctimetuple()[:2]
#
#Utility functions
#
def aggregate_entries(envelope, entries):
'''
envelope - input source of atom feed document to enclose entries
if it has any entries, the new ones are appended
entries - sequence of entry input sources
'''
envelope_doc = bindery.parse(envelope, model=FEED_MODEL)
entrydocs = [ bindery.parse(entry, model=ENTRY_MODEL) for entry in entries ]
#for entry in sorted(entrydocs, key=lambda x: attrgetter('updated')):
for entry_doc in sorted(entrydocs, key=lambda x: str(x.entry.updated), reverse=True):
envelope_doc.feed.xml_append(entry_doc.entry)
metadata = generate_metadata(envelope_doc)
return envelope_doc, metadata
def tidy_content_element(root, check=u'//atom:title|//atom:summary|//atom:content', prefixes=PREFIXES):
"""
Takes all Atom content elements with type=html (i.e. a:title, a:summary or a:content)
And convert them to be of type=xhtml
This operation mutates root in place.
Example:
import amara; from util import tidy_content_element
A = '<entry xmlns="http://www.w3.org/2005/Atom"><id>urn:bogus:x</id><title type="html"><div>x<p>y<p></div></title></entry>'
doc = amara.parse(A)
tidy_content_element(doc)
doc.xml_write()
"""
nodes = root.xml_select(check, prefixes)
for node in nodes:
if node.xml_select(u'@type = "html"') and node.xml_select(u'string(.)'):
#unsouped = html.parse('<html xmlns="http://www.w3.org/1999/xhtml">%s</html>'%node.xml_select(u'string(.)').encode('utf-8'), encoding='utf-8')
unsouped = html.parse('<html>%s</html>'%node.xml_select(u'string(.)').encode('utf-8'), encoding='utf-8')
unsouped.html.xml_namespaces[None] = XHTML_NAMESPACE
subtree = element_subtree_iter(unsouped)
#Grab body, before changing the namespaces changes how it's bound
#After NS is changed, you'd need to remember to do unsouped.html_.body_
body = unsouped.html.body
for e in subtree:
if isinstance(e, tree.element):
e.xml_namespace = XHTML_NAMESPACE
#Temporary fixup until bindery can handle namespace change better
e.xml_parent.xml_fixup(e)
#amara.xml_print(unsouped, stream=sys.stderr, indent=True)
while node.xml_children: node.xml_remove(node.xml_first_child)
node.xml_append(amara.parse('<div xmlns="http://www.w3.org/1999/xhtml"/>').xml_first_child)
#node.xml_append_fragment('<div xmlns="http://www.w3.org/1999/xhtml"/>')
for child in body.xml_children:
node.xml_first_child.xml_append(child)
node.xml_attributes[None, u'type'] = u'xhtml'
return root
#
class author(object):
def __init__(self, node):
self.name = node.name
self.uri = node.uri
self.email = node.email
self.node = node
class category(object):
def __init__(self, node):
self.scheme = node.scheme
self.term = node.term
self.node = node
class link(object):
def __init__(self, node):
self.rel = node.rel
self.href = node.href
self.node = node
#
RSS_CONTENT_NAMESPACE = u"http://purl.org/rss/1.0/modules/content/"
RSS10_NAMESPACE = u"http://purl.org/rss/1.0/"
#
#An atom feed as a proper specialized version of bindery
#
class feed(bindery.nodes.entity_base):
'''
Class to facilitate building Atom feeds
'''
#If you override __init__ and change the signature, you must also override __new__
def __new__(cls, document_uri=None, feedxml=None, skel=None, title=None, updated=None, id=None):
return bindery.nodes.entity_base.__new__(cls, document_uri)
def __init__(self, document_uri=None, feedxml=None, skel=None, title=None, updated=None, id=None):
'''
skel - an input source with a starting Atom document, generally a skeleton
'''
#WARNING: id global masked herein
bindery.nodes.entity_base.__init__(self, document_uri)
source = feedxml or skel or DEFAULT_SKEL
source = bindery.parse(source, model=FEED_MODEL)
#FIXME: need copy.deepcopy implemented to ease this
for child in source.xml_children:
self.xml_append(child)
if title:
self.feed.title = title
if id:
self.feed.id = id
if not self.feed.id:
raise ValueError("Must supply id in skel or id kwarg")
#if not(hasattr)
#self.feed.xml_append(E((ATOM_NAMESPACE, u'updated'), updated or datetime.now().isoformat()))
self.feed.updated = updated or datetime.now().isoformat()
return
@staticmethod
def from_rss2(feedxml):
'''
feedxml - an input source with an RSS 2.0 document
'''
#WARNING: Quite broken! Probably need feedparser to e.g. deal with crap rss 2 dates
source = bindery.parse(feedxml)#, model=FEED_MODEL)
title = html.markup_fragment(inputsource.text(str(source.rss.channel.title))).xml_encode()
#FIXME: bindery modeling FTW!
try:
updated = unicode(source.rss.channel.pubDate)
except AttributeError:
updated = None
link = unicode(source.rss.channel.link)
try:
summary = html.markup_fragment(inputsource.text(str(source.rss.channel.description))).xml_encode()
except AttributeError:
summary = None
f = feed(title=title, updated=updated, id=link)
for item in source.rss.channel.item:
title = html.markup_fragment(inputsource.text(str(item.title))).xml_encode()
try:
summary = html.markup_fragment(inputsource.text(str(item.description))).xml_encode()
except AttributeError:
summary = None
#author is dc:creator?
#category is category/@domain?
#try:
# authors = [ (u'%s, %s, %s'%(U(metadata[a][u'LastName']), U(metadata[a].get(u'FirstName', [u''])[0]), U(metadata[a][u'Initials'])), None, None) for a in resource.get(u'Author', []) ]
#except:
# authors = []
links = [
#FIXME: self?
(U(item.link), u'alternate'),
]
f.append(
U(item.link),
title,
updated = unicode(item.pubDate),
summary=summary,
#authors=authors,
links=links,
)
return f
def append(self, id_, title, updated=None, summary=None, content=None, authors=None, categories=None, links=None, elements=None):
'''
append an entry
author is list of (u'Uche Ogbuji', u'[email protected]', u'http://Uche.Ogbuji.net'), any of which can be None
'''
authors = authors or []
links = links or []
categories = categories or []
elements = elements or []
updated = updated or datetime.now().isoformat()
entry = self.xml_element_factory(ATOM_NAMESPACE, u'entry')
#entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'id', content=id_))
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'id'))
entry.id.xml_append(U(id_))
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'updated'))
entry.updated.xml_append(U(updated))
#Only supports text titles, for now
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'title'))
entry.title.xml_append(U(title))
for link in links:
(href, rel) = link
entry.xml_append(E((ATOM_NAMESPACE, u'link'), {u'href': href, u'rel': rel}))
for category in categories:
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'category'))
try:
term, scheme = category
except TypeError:
term, scheme = category, None
entry.category[-1].xml_attributes[u'term'] = U(term)
if scheme: entry.category[-1].xml_attributes[u'scheme'] = U(scheme)
for author in authors:
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'author'))
(name, email, uri) = author
entry.author[-1].xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'name'))
entry.author[-1].name.xml_append(U(name))
if email:
entry.author[-1].xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'email'))
entry.author[-1].name.xml_append(U(email))
if uri:
entry.author[-1].xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'uri'))
entry.author[-1].uri.xml_append(U(uri))
for elem in elements:
buf = StringIO()
w = structwriter(indent=u"yes", stream=buf)
w.feed(elem)
entry.xml_append_fragment(buf.getvalue())
#FIXME: Support other content types
if summary:
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'summary'))
entry.summary.xml_attributes[u'type'] = u'text'
entry.summary.xml_append(U(summary))
if content:
entry.xml_append(self.xml_element_factory(ATOM_NAMESPACE, u'content'))
entry.content.xml_attributes[u'type'] = u'text'
entry.content.xml_append(U(content))
self.feed.xml_append(entry)
return
#
def rss1format(self):
'''
Return export as string in RSS 1.0 format
'''
#doc = bindery.parse(isrc, model=FEED_MODEL, prefixes={u'a', ATOM_NAMESPACE})
converter = self.atom_2rss1()
self.feed.xml_namespaces[u'a'] = ATOM_NAMESPACE
self.feed.xml_namespaces[u'html'] = XHTML_NAMESPACE
buf = StringIO()
structwriter(indent=u"yes", stream=buf).feed(
converter.dispatch(self.feed)
)
return buf.getvalue()
#
class atom_2rss1(dispatcher):
'''
A dispatcher for converting Atom to RSS 1.0
'''
MAX_ITEM_DESC = 500
@node_handler(u'a:feed')
def feed(self, node):
yield E((RDF_NAMESPACE, u'rdf:RDF'),
NS(u'dc', DC_NAMESPACE),
NS(u'content', RSS_CONTENT_NAMESPACE),
E((RSS10_NAMESPACE, u'channel'), {(u'rdf:about'): node.xml_avt(u"{a:link[@rel='alternate']/@href}")},
E((RSS10_NAMESPACE, u'title'), self.text_construct(node.title)),
E((RSS10_NAMESPACE, u'description'), self.text_construct(node.subtitle)),
E((RSS10_NAMESPACE, u'link'), node.xml_avt(u"{a:link[@rel='alternate']/@href}")),
E((RSS10_NAMESPACE, u'items'),
E((RDF_NAMESPACE, u'rdf:Seq'),
chain(*imap(partial(self.dispatch, mode=u'index'), node.entry))
)
)
),
chain(*imap(partial(self.dispatch, mode=u'full'), node.entry))
)
@node_handler(u'a:entry', mode=u'index')
def entry_index(self, node):
yield E((RDF_NAMESPACE, u'rdf:li'),
node.xml_avt(u"{a:link[@rel='alternate']/@href}")
)
@node_handler(u'a:entry', mode=u'full')
def entry_full(self, node):
yield E((RSS10_NAMESPACE, u'item'), {(u'rdf:about'): node.xml_avt(u"{a:link[@rel='alternate']/@href}")},
E((RSS10_NAMESPACE, u'title'), self.text_construct(node.title)),
E((RSS10_NAMESPACE, u'description'), self.description(node)),
E((RSS_CONTENT_NAMESPACE, u'content:encoded'), self.text_construct(node.summary or node.content)),
E((DC_NAMESPACE, u'dc:date'), node.updated),
[ E((DC_NAMESPACE, u'dc:subject'), s.term) for s in iter(node.category or []) ],
E((RSS10_NAMESPACE, u'link'), node.xml_avt(u"{a:link[@rel='alternate']/@href}")),
)
def description(self, node):
if node.summary:
d = unicode(node.summary)
else:
d = unicode(node.content)
if len(d) > self.MAX_ITEM_DESC:
d = d[:self.MAX_ITEM_DESC] + u'\n...[Truncated]'
return d
@node_handler(u'html:*')
def html_elem(self, node):
yield E(node.xml_local, node.xml_attributes.copy(),
chain(*imap(self.dispatch, node.xml_children))
)
def text_construct(self, node):
#FIXME: why is this a generator, anyway?
if not node:
yield u''
return
#FIXME: Need to fix a nasty bug in models before using node.type
type_ = node.xml_avt(u"{@type}")
#FIXME: will be None, not u'' when said bug is fixed
if type_ in [u'', u'text']:
yield unicode(node)
elif type_ == u'xhtml':
buf = StringIO()
w = structwriter(indent=u"yes", stream=buf, encoding='utf-8')
for child in node.xml_select(u'html:div/node()'):
w.feed(self.dispatch(child))
encoded = buf.getvalue().decode('utf-8')
#print (encoded,)
yield encoded
#
def entry_metadata(isrc):
'''
Organize the metadata of an atom document according to its entries
'''
#metadata = doc.xml_model.generate_metadata(doc)
#delimited = groupby(metadata, lambda row: (row[1:] == (u'type', u'atom:entry') ))
#first, rows = delimited.next()
def handle_row(resource, rel, val):
#if rel in [u"id", u"title"]:
if rel == u"author":
yield rel, (unicode(val[0].name), unicode(val[0].email), unicode(val[0].uri))
if rel == u"link":
yield rel, (val[0].rel, unicode(val[0]))
if rel in [u"title", u"updated"]:
yield rel, unicode(val[0])
if rel in [u"id"]:
yield rel, unicode(val[0])
yield u"label", unicode(val[0])
#u"link": [ l for l in doc.feed.link if l.rel == u"alternate" ][0].href,
#u"authors": [ unicode(a.name) for a in iter(doc.feed.author or []) ],
#u"updated": unicode(doc.feed.updated),
if not first:
#Must be a full feed, so consume the first entry's delimiter
first, rows = delimited.next()
entries = []
for isboundary, rows in delimited:
entryinfo = {u"type": u"atom:entry"}
if isboundary:
#consume/skip the entry's delimiter
continue
for k, v in (kvpair for row in rows for kvpair in handle_row(*row)):
print k, v
#print isboundary, list(rows)
entries.append(entryinfo)
try:
doc_entries = iter(doc.feed.entry)
feedinfo = {
u"label": unicode(doc.feed.id),
u"type": u"Feed",
u"title": unicode(doc.feed.title),
u"link": [ l for l in doc.feed.link if l.rel == u"alternate" ][0].href,
u"authors": [ unicode(a.name) for a in iter(doc.feed.author or []) ],
u"updated": unicode(doc.feed.updated),
}
except AttributeError:
try:
doc_entries = iter(doc.entry)
feedinfo = None
except AttributeError:
return None, []
return
def deserialize_text_construct(node):
#FIXME: Need to fix a nasty bug in models before using node.type
type_ = node.type
if type_ in [None, u'text', u'html']:
return unicode(node)
elif type_ == u'xhtml':
encoded = node.div.xml_encode()
return encoded
def ejsonize(isrc):
'''
Convert Atom syntax to a dictionary
Note: the conventions used are designed to simplify conversion to Exhibit JSON
(see: http://www.ibm.com/developerworks/web/library/wa-realweb6/ ; listing 3)
'''
doc = bindery.parse(isrc, model=FEED_MODEL)
def process_entry(e):
known_elements = [u'id', u'title', u'link', u'author', u'category', u'updated', u'content', u'summary']
data = {
u"id": unicode(e.id),
#XXX Shall we use title for label?
u"label": unicode(e.id),
u"type": u"Entry",
u"title": unicode(e.title),
u"link": first_item([ l.href for l in e.link if l.rel in [None, u"alternate"] ], []),
#Nested list comprehension to select the alternate link,
#then select the first result ([0]) and gets its href attribute
u"authors": [ unicode(a.name) for a in iter(e.author or []) ],
#Nested list comprehension to create a list of category values
u"categories": [ unicode(c.term) for c in iter(e.category or []) ],
u"updated": unicode(e.updated),
u"summary": unicode(e.summary),
}
if not data[u"categories"]: del data[u"categories"]
if e.summary is not None:
data[u"summary"] = unicode(e.summary)
if e.content is not None:
try:
data[u"content_src"] = unicode(e.content.src)
except AttributeError:
data[u"content_text"] = deserialize_text_construct(e.content)
for child in e.xml_elements:
if child.xml_namespace != ATOM_NAMESPACE and child.xml_local not in known_elements:
data[child.xml_local] = unicode(child)
return data
try:
doc_entries = iter(doc.feed.entry)
feedinfo = {
u"id": unicode(doc.feed.id),
#XXX Shall we use title for label?
u"label": unicode(doc.feed.id),
u"type": u"Feed",
u"title": unicode(doc.feed.title),
u"link": first_item([ l.href for l in doc.feed.link if l.rel in [None, u"alternate"] ], []),
u"authors": [ unicode(a.name) for a in iter(doc.feed.author or []) ],
u"updated": unicode(doc.feed.updated),
}
except AttributeError:
try:
doc_entries = iter(doc.entry)
feedinfo = None
except AttributeError:
#FIXME L10N
raise ValueError("Does not appear to be a valid Atom file")
return [ process_entry(e) for e in doc_entries ]
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/atomtools.py
|
atomtools.py
|
import sys
import amara
from amara import tree
from amara.writers.struct import *
#from amara.namespaces import *
INDENT_STR = ' '
def srewrite(source, **kwargs):
doc = amara.parse(source)
def handle_node(node, indent=0):
empty = True
if isinstance(node, tree.text):
yield repr(node.xml_value)
elif isinstance(node, tree.entity):
yield INDENT_STR*indent + 'ROOT('
for child in node.xml_children:
empty = False
yield '\n'
for chunk in handle_node(child, indent+1):
yield chunk
yield (not empty)*INDENT_STR*indent + ')\n'
elif isinstance(node, tree.element):
yield INDENT_STR*indent + 'E('
yield repr((node.xml_namespace, node.xml_local)) if node.xml_namespace else repr(node.xml_local)
if node.xml_attributes:
yield repr(dict(node.xml_attributes))
for child in node.xml_children:
empty = False
yield '\n'
for chunk in handle_node(child, indent+1):
yield chunk
yield (not empty)*INDENT_STR*indent + ')\n'
for chunk in handle_node(doc): yield chunk
return
def launch(source, **kwargs):
print kwargs['srewrite']
if kwargs.get('srewrite', True):
print ''.join(srewrite(source, **kwargs))
return
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
def command_line_prep():
from optparse import OptionParser
usage = "Amara 2.x. Tool to generate code to generate XML.\n"
usage += "python -m 'amara.tools.kekule' [options] source"
parser = OptionParser(usage=usage)
parser.add_option("--struct-rewrite",
action="store_true", dest="srewrite", default=True,
help="Output a skeleton of structwriter code corresponding to the given XML")
parser.add_option("-S", "--struct",
action="store_true", dest="structw", default=True,
help="Output code for Amara structwriter")
return parser
def main(argv=None):
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
source = args[0]
except IndexError:
optparser.error("Missing source kekule")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
if source == '-':
source = sys.stdin
launch(source, structw=options.structw, srewrite=options.srewrite)
return
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/kekule.py
|
kekule.py
|
#parse(unichr(10).join([u'|x|y|z|', u'|a|b|c|', u'|d|e|f|', u'test']))
#A roundabout way, for purposes of doctest, to write: parse(u'|x|y|z|\n|a|b|c|\n|d|e|f|\ntest')
import re
import creole
from amara.bindery import html
from amara import tree
class text(tree.text):
@property
def content(self):
return self.xml_value
@content.setter
def content(self, x):
self.xml_value = x
def DocNode(kind='', parent=None, content=None):
#import sys; print >> sys.stderr, (kind, parent, content)
if parent is not None:
if kind == 'text':
e = text(content or u'')
else:
e = parent.factory_entity.xml_element_factory(None, unicode(kind))
if content is not None:
e.xml_append(content)
parent.xml_append(e)
e.kind = kind
e.content = content
return e
class Parser(creole.Parser):
def __init__(self, raw, rules=None):
'''
'''
rules = rules or Rules()
creole.Parser.__init__(self, raw, rules)
self.raw = raw
self.root = html.entity()
self.cur = self.root # The most recent document node
self.root.kind = 'document'
self.text = None # The node to add inline characters to
def _defitem_repl(self, groups):
term = groups.get('defterm', u'')
defn = groups.get('defdef', u'')
kind = 'definition_list'
lst = self.cur
# Find a list of the same kind up the tree
#FIXME: check/test how this works for defn lists in Moin
#print groups, repr(lst)
while (lst and
not lst.kind == kind and
not lst.kind in ('document', 'section', 'blockquote')):
lst = lst.parent
if lst and lst.kind == kind:
self.cur = lst
else:
# Create a new level of list
self.cur = self._upto(self.cur,
('item', 'document', 'section', 'blockquote'))
self.cur = DocNode(kind, self.cur)
item = DocNode('item', self.cur)
self.cur = item
self.cur = DocNode('term', self.cur)
self.text = None
self.parse_inline(term)
self.cur = item
self.cur = DocNode('defn', self.cur)
self.text = None
self.parse_inline(defn)
self.text = None
_defterm_repl = _defitem_repl
_defdef_repl = _defitem_repl
def _deflist_repl(self, groups):
text = groups.get('deflist', u'')
self.rules.defitem_re.sub(self._replace, text)
class Rules(creole.Rules):
deflist = r'''(?P<deflist>
^ [ \t]*[\w-].*?::[ \t].* $
( \n[ \t]*[\w-].*?::[ \t].* $ )*
)'''
defitem = r'''(?P<defitem>
^ \s*
(?P<defterm> [\w-]+.*?)::[ \t](?P<defdef> .*?)
$
)'''
def __init__(self, bloglike_lines=False, url_protocols=None, wiki_words=False):
creole.Rules.__init__(self, bloglike_lines, url_protocols, wiki_words)
self.defitem_re = re.compile(self.defitem, re.X | re.U | re.M)
self.block_re = re.compile('|'.join([self.line, self.head, self.separator,
self.pre, self.list, self.deflist, self.table,
self.text]), re.X | re.U | re.M)
from amara.lib import inputsource
def parse(source):
if isinstance(source, str):
doc = Parser(source).parse()
elif isinstance(source, unicode):
doc = Parser(source.encode('utf-8')).parse()
else:
doc = Parser(inputsource.text(source).stream.read()).parse()
return doc
#Monkeypatching
creole.DocNode = DocNode
#--------------
#Unit test
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/creoletools.py
|
creoletools.py
|
from amara import writer
w = writer(indent=u"yes") #Operates in streaming mode
w.start_document()
w.start_element(u'xsa')
w.start_element(u'vendor')
#Element with simple text (#PCDATA) content
w.simple_element(u'name', content=u'Centigrade systems')
#Note writer.text(content) still works
w.simple_element(u'email', content=u"[email protected]")
w.end_element(u'vendor')
#Element with an attribute
w.start_element(u'product', attributes={u'id': u"100\u00B0"})
#Note w.attribute(name, value, namespace=None) still works
w.simple_element(u'name', content=u"100\u00B0 Server")
#XML fragment
#w.xml_fragment('<version>1.0</version><last-release>20030401</last-release>')
#Empty element
w.simple_element(u'changes')
w.end_element(u'product')
w.end_element(u'xsa')
w.end_document()
print
#Now an HTML example
w = writer(method=u"html") #indent=u"yes" is default in this mode
w.start_document()
w.start_element(u'html')
w.start_element(u'head')
w.simple_element(u'title', content=u'Hello')
w.end_element(u'head')
w.start_element(u'body')
#w.start_element(u'body', attributes={u'id': u"100\u00B0"})
w.simple_element(u'p', content=u"World")
#XML fragment
#w.xml_fragment('<version>1.0</version><last-release>20030401</last-release>')
#Empty element
w.simple_element(u'br')
w.end_element(u'html')
w.end_document()
print
from amara.writers.struct import *
w = structwriter(indent=u"yes").feed(
ROOT(
E(u'doc',
E(u'a', u'hello'),
#E('float-content', 3.14),
E((None, u'b'), u'this is unicode: \u221e'),
#E(u'list-content', [E('child', 'a'), RAW('<raw-node message="hello"/>'), E('child', 'b')]),
E(u'c', {u'parrot': u'dead', u'spam': u'eggs'}),
E((None, u'c'), {u'parrot': u'dead', (None, u'spam'): u'eggs'}, u'again'),
E(u'gen-content', (E('node', x) for x in range(6))),
E(u'monty', E('spam', 'eggs')),
E(u'empty'),
E(u'func', lambda: u'this is a func'),
#E(u'raw-xml-content', RAW('<a>b</a>', '<c>d</c>')) #The multiple raw text bits are just concatenated
)
))
print
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/writing_xml.py
|
writing_xml.py
|
MORE_DOC = """
Zthes relation types explained at http://www.loc.gov/z3950/agency/profiles/zthes-02.html
``NT'' Narrower term: that is, the related term is more specific than the current one. -> skos:narrower
``BT'' Broader term: that is, the related term is more general than the current one. -> skos:broader
``USE'' Use instead: that is, the related term should be used in preference to the current one. -> z:useInstead
``UF'' Use for: that is, the current term should be used in preference to the related one -> z:useFor
``RT'' Related term. -> skos:related
See also:
* http://www.w3.org/2001/sw/Europe/reports/thes/1.0/migrate/
"""
import sys
import amara
from amara.writers.struct import *
from amara.namespaces import *
ZTHES_NAMESPACE = u"http://zthes.z3950.org/model/index.html"
#http://www.loc.gov/z3950/agency/profiles/zthes-02.html
RELATION_LOOKUP = {
u'RT': (SKOS_NAMESPACE, u'skos:related'),
u'NT': (SKOS_NAMESPACE, u'skos:narrower'),
u'BT': (SKOS_NAMESPACE, u'skos:broader'),
u'USE': (ZTHES_NAMESPACE, u'z:useInstead'),
u'UF': (ZTHES_NAMESPACE, u'z:useFor'),
}
doc = amara.parse(sys.argv[1])
w = structwriter(indent=u"yes").feed(
ROOT(
E((RDF_NAMESPACE, u'rdf:RDF'),
NS(u'skos', SKOS_NAMESPACE),
NS(u'z', ZTHES_NAMESPACE),
(
E(
(SKOS_NAMESPACE, u'skos:Concept'),
{(RDF_NAMESPACE, u'rdf:ID'): term.xml_select(u'string(termId)')},
E((SKOS_NAMESPACE, u'skos:prefLabel'), term.xml_select(u'string(termName)')),
(E((SKOS_NAMESPACE, u'skos:note'),
E((SKOS_NAMESPACE, u'skos:Note'),
E((RDF_NAMESPACE, u'rdf:label'), note.xml_select(u'string(@label)')),
E((RDF_NAMESPACE, u'rdf:value'), note.xml_select(u'string(.)'))
)
) for note in term.xml_select(u'termNote') ),
(E(RELATION_LOOKUP.get(rel.xml_select(u'string(relationType)'), (ZTHES_NAMESPACE, u'z:'+rel.xml_local)),
{(RDF_NAMESPACE, u'rdf:resource'): rel.xml_select(u'concat("#", termId)')}
) for rel in term.xml_select(u'relation') )
)
#E((SKOS_NAMESPACE, u'skos:note'), term.xml_select(u'string(termName)')),
for term in doc.xml_select(u'/Zthes/term'))
)
))
print
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/zthes2skos.py
|
zthes2skos.py
|
import sys
print '---------' 'Grouping'
import itertools
import operator
import amara
from amara.writers.struct import *
XML="""\
<env>
<a id="1"/>
<b id="1.1"/>
<c id="1.2"/>
<a id="2"/>
<b id="2.1"/>
<c id="2.2"/>
<a id="3"/>
<b id="3.1"/>
<c id="3.2"/>
</env>
"""
doc = amara.parse(XML)
leaves = sorted(doc.xml_select(u'/env/*'), key=operator.attrgetter('xml_name'))
w = structwriter(indent=u"yes")
w.feed(
ROOT(
E(u'env',
( E(ename + u'-wrapper',
( E(ename, e.xml_attributes.copy(), e.xml_children) for e in elems )
) for ename, elems in itertools.groupby(leaves, lambda x: x.xml_qname) ),
)
))
print
print '---------' 'Bindery and operator'
RSS = 'http://feeds.feedburner.com/ClevelandClinicHealthTalkPodcast'
ATOM1 = 'http://uche.ogbuji.net/tech/publications.atom'
import operator
import itertools
from amara import bindery
ATOM1 = 'http://zepheira.com/news/atom/entries/'
ATOM2 = 'http://ma.gnolia.com/atom/full/people/Uche'
doc1 = bindery.parse(ATOM1)
doc2 = bindery.parse(ATOM2)
combined = itertools.chain(*[doc.feed.entry for doc in (doc1, doc2)])
for node in sorted(combined, key=operator.attrgetter('updated')):
print node.title
print '---------' 'Merge XBEL'
BM1 = 'bm1.xbel'
BM2 = 'bm2.xbel'
from amara import bindery, xml_print
def merge_folders(folder1, folder2):
#Yes, the list must be copied to avoid mutate-while-iterate bugs
for child in folder2.xml_select('*'):
#No need to copy title element
if child.xml_qname == u'title': continue
elif child.xml_qname == u'folder':
for a_folder in folder1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
folder1.xml_append(child)
else:
folder1.xml_append(child)
return
def xbel_merge(xbel1, xbel2):
for child in xbel2.xml_select('*'):
if child.xml_qname == u'folder':
for a_folder in xbel1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
xbel1.xml_append(child)
elif child.xml_qname == u'bookmark':
xbel1.xml_append(child)
return
doc1 = bindery.parse(BM1)
doc2 = bindery.parse(BM2)
xbel_merge(doc1.xbel, doc2.xbel)
xml_print(doc1, indent=True)
print
print '---------' 'Merge XBEL by grouping iterators'
BM1 = 'bm1.xbel'
BM2 = 'bm2.xbel'
import itertools
import functools
from amara import bindery, xml_print
from amara.bindery.util import property_str_getter
title_getter = functools.partial(property_str_getter, 'title')
def merge(f1, f2):
folders = sorted(itertools.chain(f1.folder or [], f2.folder or []),
key=title_getter)
folder_groups = itertools.groupby(folders, title_getter)
for ftitle, folders in folder_groups:
main = folders.next()
rest = list(folders)
for f in rest:
merge(main, f)
if main.xml_parent != f1:
f1.xml_append(main)
#All non-folder, non-title elements
for e in f2.xml_select(u'*[not(self::folder or self::title)]'):
f1.xml_append(e)
return
doc1 = bindery.parse(BM1)
doc2 = bindery.parse(BM2)
merge(doc1.xbel, doc2.xbel)
xml_print(doc1, indent=True)
print
print '---------' 'Merge XBEL by grouping iterators, with model'
XBEL_DTDECL = '''<!DOCTYPE xbel PUBLIC
"+//IDN python.org//DTD XML Bookmark Exchange Language 1.0//EN//XML"
"http://www.python.org/topics/xml/dtds/xbel-1.0.dtd">'''
XBEL_MODEL = '''<?xml version="1.0"?>
<xbel version="1.0" xmlns:eg="http://examplotron.org/0/" xmlns:ak="http://purl.org/xml3k/akara/xmlmodel">
<info eg:occurs="?">
<metadata owner="http://example.com" eg:occurs="?">MD1</metadata>
</info>
<folder eg:occurs="*">
<info eg:occurs="?">
<metadata owner="http://example.com" eg:occurs="?">MD1</metadata>
</info>
<title>F1</title>
<bookmark href="http://www.example.com" eg:occurs="*">
<info eg:occurs="?">
<metadata owner="http://example.com" eg:occurs="?">MD1</metadata>
</info>
<title eg:occurs="?">B1</title>
<desc eg:occurs="?">DESC-B1</desc>
</bookmark>
</folder>
<bookmark href="http://www.example.com">
<info eg:occurs="?">
<metadata owner="http://example.com" eg:occurs="?">MD1</metadata>
</info>
<title eg:occurs="?">B1</title>
<desc eg:occurs="?">DESC-B1</desc>
</bookmark>
</xbel>
'''
#...
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/7days/day3.py
|
day3.py
|
import sys
print '---------' 'Basic constraints'
from amara import bindery, xml_print
from amara.bindery.model import *
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
doc = bindery.parse(MONTY_XML)
c = constraint(u'@ministry')
try:
doc.monty.python.xml_model.add_constraint(c, validate=True)
except bindery.BinderyError, e:
print e
doc.monty.python.xml_attributes[None, u'ministry'] = u'argument'
doc.monty.python.xml_model.add_constraint(c, validate=True)
#print dir(doc)
#print dir(doc.monty.python)
#print doc.monty.python.xml_namespaces[None]
#print doc.xml_namespaces
print
print '---------' 'Attribute constraint class'
doc = bindery.parse(MONTY_XML)
c = attribute_constraint(None, u'ministry', u'nonesuch')
doc.monty.python.xml_model.add_constraint(c, validate=True)
xml_print(doc)
print
print '---------' 'Child element constraint class'
SVG = """<?xml version="1.0" encoding="utf-8"?>
<svg version="1.1" baseProfile="full"
xmlns="http://www.w3.org/2000/svg">
<title>A pair of lines and a pair of ellipses</title>
<g>
<ellipse cx="150" cy="100" rx="100" ry="50"/>
<line x1="450" y1="50" x2="550" y2="150"/>
</g>
<g>
<title>Rotated shapes</title>
<ellipse cx="150" cy="300" rx="100" ry="50"
transform="rotate(20)"/>
<line x1="350" y1="200" x2="450" y2="300"
transform="rotate(20)"/>
</g>
</svg>
"""
from amara.namespaces import *
doc = bindery.parse(SVG)
c = child_element_constraint(SVG_NAMESPACE, u'title', u'[NO TITLE]')
doc.svg.g.xml_model.add_constraint(c, validate=True)
xml_print(doc)
print
print '---------' 'Examplotron model def'
LABEL_MODEL = '''<?xml version="1.0" encoding="utf-8"?>
<labels>
<label>
<name>[Addressee name]</name>
<address>
<street>[Address street info]</street>
<city>[City]</city>
<state>[State abbreviation]</state>
</address>
</label>
</labels>
'''
VALID_LABEL_XML = '''<?xml version="1.0" encoding="utf-8"?>
<labels>
<label>
<name>Thomas Eliot</name>
<address>
<street>3 Prufrock Lane</street>
<city>Stamford</city>
<state>CT</state>
</address>
</label>
</labels>
'''
INVALID_LABEL_XML = '''<?xml version="1.0" encoding="utf-8"?>
<labels>
<label>
<quote>What thou lovest well remains, the rest is dross</quote>
<name>Ezra Pound</name>
<address>
<street>45 Usura Place</street>
<city>Hailey</city>
<state>ID</state>
</address>
</label>
</labels>
'''
from amara.bindery.model import *
label_model = examplotron_model(LABEL_MODEL)
doc = bindery.parse(VALID_LABEL_XML, model=label_model)
doc.xml_validate()
doc = bindery.parse(INVALID_LABEL_XML, model=label_model)
try:
doc.xml_validate()
except bindery.BinderyError, e:
print e
xml_print(doc)
print
print '---------' 'Binding defaults'
LABEL_MODEL = '''<?xml version="1.0" encoding="utf-8"?>
<labels>
<label>
<quote>What thou lovest well remains, the rest is dross</quote>
<name>Ezra Pound</name>
<address>
<street>45 Usura Place</street>
<city>Hailey</city>
<state>ID</state>
</address>
</label>
</labels>
'''
TEST_LABEL_XML = '''<?xml version="1.0" encoding="utf-8"?>
<labels>
<label>
<name>Thomas Eliot</name>
<address>
<street>3 Prufrock Lane</street>
<city>Stamford</city>
<state>CT</state>
</address>
</label>
</labels>
'''
#An alternative to fixup that updates the binding object behavior without updating the XML itself
#Note: you can choose to combine binding defaults with fixup, if you like, but many usage patterns will tend to use one or the other: either you want a mechnism to normalize the XML itself, or a mechanism for smooth handling of non-normalized XML
#doc = bindery.parse(LABEL_XML)
#doc.labels.label.xml_model.set_default_value(None, u'quote', None)
from amara.bindery.model import *
label_model = examplotron_model(LABEL_MODEL)
doc = bindery.parse(TEST_LABEL_XML, model=label_model)
print doc.labels.label.quote #None, rather than raising AttributeError
#doc.xml_validate()
sys.exit(0)
doc = bindery.parse(INVALID_LABEL_XML, model=label_model)
doc.labels.label.xml_model.debug(doc.labels.label)
doc.labels.label.xml_model.validate()
doc.xml_model.validate()
print
print '---------' 'Bindery and operator'
RSS = 'http://feeds.feedburner.com/ClevelandClinicHealthTalkPodcast'
ATOM1 = 'http://uche.ogbuji.net/tech/publications.atom'
import operator
import itertools
from amara import bindery
ATOM1 = 'http://zepheira.com/news/atom/entries/'
ATOM2 = 'http://ma.gnolia.com/atom/full/people/Uche'
doc1 = bindery.parse(ATOM1)
doc2 = bindery.parse(ATOM2)
combined = itertools.chain(*[doc.feed.entry for doc in (doc1, doc2)])
for node in sorted(combined, key=operator.attrgetter('updated')):
print node.title
print '---------' 'Merge XBEL'
BM1 = 'bm1.xbel'
BM2 = 'bm2.xbel'
from amara import bindery, xml_print
def merge_folders(folder1, folder2):
#Yes, the list must be copied to avoid mutate-while-iterate bugs
for child in folder2.xml_select('*'):
#No need to copy title element
if child.xml_qname == u'title': continue
elif child.xml_qname == u'folder':
for a_folder in folder1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
folder1.xml_append(child)
else:
folder1.xml_append(child)
return
def xbel_merge(xbel1, xbel2):
for child in xbel2.xml_select('*'):
if child.xml_qname == u'folder':
for a_folder in xbel1.folder:
if unicode(child.title) == unicode(a_folder.title):
merge_folders(a_folder, child)
break
else:
xbel1.xml_append(child)
elif child.xml_qname == u'bookmark':
xbel1.xml_append(child)
return
doc1 = bindery.parse(BM1)
doc2 = bindery.parse(BM2)
xbel_merge(doc1.xbel, doc2.xbel)
xml_print(doc1, indent=True)
print
print '---------' 'Merge XBEL by grouping iterators'
BM1 = 'bm1.xbel'
BM2 = 'bm2.xbel'
import itertools
import functools
from amara import bindery, xml_print
from amara.bindery.util import property_str_getter
title_getter = functools.partial(property_str_getter, 'title')
def merge(f1, f2):
folders = sorted(itertools.chain(f1.folder or [], f2.folder or []),
key=title_getter)
folder_groups = itertools.groupby(folders, title_getter)
for ftitle, folders in folder_groups:
main = folders.next()
rest = list(folders)
for f in rest:
merge(main, f)
if main.xml_parent != f1:
f1.xml_append(main)
#All non-folder, non-title elements
for e in f2.xml_select(u'*[not(self::folder or self::title)]'):
f1.xml_append(e)
return
doc1 = bindery.parse(BM1)
doc2 = bindery.parse(BM2)
merge(doc1.xbel, doc2.xbel)
xml_print(doc1, indent=True)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/7days/day4.py
|
day4.py
|
print '---------' 'Core tree'
import amara
from amara import tree
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
doc = amara.parse(MONTY_XML)
#Node types use string rather than numerical constants now
#The root node type is called entity
assert doc.xml_type == tree.entity.xml_type
m = doc.xml_children[0] #xml_children is a sequence of child nodes
assert m.xml_local == u'monty' #local name, i.e. without any prefix
assert m.xml_qname == u'monty' #qualified name, e.g. includes prefix
assert m.xml_prefix == None
assert m.xml_qname == u'monty' #qualified name, e.g. includes prefix
assert m.xml_namespace == None
assert m.xml_name == (None, u'monty') #The "universal name" or "expanded name"
assert m.xml_parent == doc
p1 = m.xml_children[0]
from amara import xml_print
#<python spam="eggs">What do you mean "bleh"</python>
xml_print(p1)
print
print p1.xml_attributes[(None, u'spam')]
#Some manipulation
p1.xml_attributes[(None, u'spam')] = u"greeneggs"
p1.xml_children[0].xml_value = u"Close to the edit"
xml_print(p1)
print
print
print '---------' 'Bindery'
from amara import bindery
from amara import xml_print
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
doc = bindery.parse(MONTY_XML)
m = doc.monty
p1 = doc.monty.python #or m.python; p1 is just the first python element
print
print p1.xml_attributes[(None, u'spam')]
print p1.spam
for p in doc.monty.python: #The loop will pick up both python elements
xml_print(p)
print
print
print '---------' 'DOM'
from amara import dom
from amara import xml_print
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
doc = dom.parse(MONTY_XML)
for p in doc.getElementsByTagNameNS(None, u"python"): #A generator
xml_print(p)
print
p1 = doc.getElementsByTagNameNS(None, u"python").next()
print p1.getAttributeNS(None, u'spam')
print
print '---------' 'XPath'
from amara import bindery
from amara import xml_print
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
doc = bindery.parse(MONTY_XML)
m = doc.monty
p1 = doc.monty.python
print p1.xml_select(u'string(@spam)')
for p in doc.xml_select(u'//python'):
xml_print(p)
print
print
print '---------' 'HTML'
import html5lib
from html5lib import treebuilders
from amara.bindery import html
from amara import xml_print
f = open("eg.html")
parser = html5lib.HTMLParser(tree=html.treebuilder)
doc = parser.parse(f)
print unicode(doc.html.head.title)
xml_print(doc.html.head.title)
print
print doc.xml_select(u"string(/html/head/title)")
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/demo/7days/day2.py
|
day2.py
|
def process_filters(event_type, event_data, depth=0):
for filter in filters:
if filter.active:
filter.depth += depth
if filter.handlers[event_type]:
status = filter.handlers[event_type](*event_data)
else:
status = filter.default
if status == ExpatFilter.HANDLED:
break
elif criteria.matches(event_type, event_data):
filter.active = True
if filter.handlers[START_FILTER]:
filter.handlers[START_FILTER](*event_data)
break
return
def startDocument():
for filter in filters:
if filter.active:
filter.depth += 1
if filter.startDocument:
status = filter.startDocument()
else:
status = filter.defaultHandler()
if status == ExpatFilter.HANDLED:
break
elif criteria.matches(event_type, event_data):
filter.active = True
if filter.startFilter:
filter.startFilter(*event_data)
break
def endDocument():
process_filters(END_DOCUMENT, (), -1)
def startElement(*args):
process_filters(START_ELEMENT, args, 1)
def endElement(*args):
process_filters(END_ELEMENT, args, -1)
def characters(*args)
process_filters(CHARACTERS, args)
def ignorableWhitespace(*args):
process_filters(IGNORABLE_WHITESPACE, args)
class simple_string_element(HandlerType):
def start_filter(self, expandedName, tagName):
self.value = []
def end_filter(self, expandedName, tagName):
# Its possible that the parentNode may already have an attribute of the
# same name (via a child element).
value = u''.join(self.value)
self.chain_next.attribute(expandedName, tagName, value)
def start_element(self, expandedName, tagName, attributes):
warn()
def characters(self, data):
self.value.append(data)
class omit_element(HandlerType):
"""Drops all descendant events"""
class element_skeleton(FilterType):
"""Drops all character data for the matching element and descendants"""
def characters(self, data):
pass
def whitespace(self, data):
pass
class ws_strip_element(FilterType):
"""Drops all ignorable whitespace for the matching element and descendants"""
def whitespace(self, data):
pass
class ws_preserve_element(FilterType):
"""Converts ignorable whitespace into regular character data"""
def whitespace(self, data):
self.chain_next.characters(data)
class type_inference(FilterType):
"""Stores attributes as a more specific type"""
def start_element(self, expandedName, tagName, attributes):
for key, value in attributes.items():
value = infer_data_from_string(value)
if value is not None:
attributes[key] = value
self.chain_next.start_element(expandedName, tagName, attributes)
def attribute(self, expandedName, name, value):
typed_value = infer_data_from_string(value)
if typed_value is not None:
value = typed_value
self.chain_next.attribute(expandedName, name, value)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/sandbox/filters.py
|
filters.py
|
import amara
from itertools import chain
from amara import bindery
import time
import datetime
import sys
N = 5000
SIMPLEDOC = ''.join(chain(['<a>'], [ '<b/>' for i in xrange(N) ], ['</a>']))
ATTRDOC = ''.join(chain(['<a>'], [ "<b c='%i'/>"%i for i in xrange(N) ], ['</a>']))
def timeit(f, *args):
count = 1
while 1:
t1 = time.time()
for x in range(count):
result = f(*args)
t2 = time.time()
dt = t2-t1
if dt >= 0.1:
break
count *= 10
best = [dt]
for i in "12":
t1 = time.time()
for x in range(count):
result = f(*args)
t2 = time.time()
best.append(t2-t1)
return result, min(best)/count * 1000 # in ms
#EXERCISE 1: Testing speed of parse
def amara_parse1():
result, dt = timeit(amara.parse, SIMPLEDOC)
return dt
#EXERCISE 2: Parse once and test speed of XPath using descendant-or-self, with large result
def amara_parse2():
doc = amara.parse(SIMPLEDOC)
result, dt = timeit(doc.xml_select, u'//b')
assert len(result) == N
return dt
#EXERCISE 3: Parse once and test speed of XPath using descendant-or-self, with empty result
def amara_parse3():
doc = amara.parse(SIMPLEDOC)
result, dt = timeit(doc.xml_select, u'//c')
assert len(result) == 0
return dt
#EXERCISE 4: Testing speed of parse, part 2
def amara_parse4():
result, dt = timeit(amara.parse, ATTRDOC)
return dt
#EXERCISE 5: Parse once and test speed of XPath using descendant-or-self with attribute predicate (small result)
def amara_parse5():
doc = amara.parse(ATTRDOC)
result, dt = timeit(doc.xml_select, u"//b[@c='10']")
assert len(result) == 1
return dt
#EXERCISE 1: Testing speed of parse
def bindery_parse1():
result, dt = timeit(bindery.parse, SIMPLEDOC)
return dt
#EXERCISE 2: Parse once and test speed of XPath using descendant-or-self, with large result
def bindery_parse2():
doc = bindery.parse(SIMPLEDOC)
result, dt = timeit(doc.xml_select, u'//b')
assert len(result) == N
return dt
#EXERCISE 3: Parse once and test speed of XPath using descendant-or-self, with empty result
def bindery_parse3():
doc = bindery.parse(SIMPLEDOC)
result, dt = timeit(doc.xml_select, u'//c')
assert len(result) == 0
return dt
#EXERCISE 4: Testing speed of parse, part 2
def bindery_parse4():
result, dt = timeit(bindery.parse, ATTRDOC)
return dt
#EXERCISE 5: Parse once and test speed of XPath using descendant-or-self with attribute predicate (small result)
def bindery_parse5():
doc = bindery.parse(ATTRDOC)
result, dt = timeit(doc.xml_select, u"//b[@c='10']")
assert len(result) == 1
return dt
row_names = [
"Parse once (no attributes)",
" descendant-or-self, many results",
" descendant-or-self, no results",
"Parse once (with attributes)",
" descendant-or-self w/ attribute, 1 result",
]
colwidth = max(len(name) for name in row_names)
header_format = "%" + str(colwidth) + "s %10s %10s"
row_format = "%-" + str(colwidth) + "s:" + " %8.2f ms %8.2f ms"
amara_parse_tests = [amara_parse1, amara_parse2, amara_parse3, amara_parse4, amara_parse5]
bindery_parse_tests = [bindery_parse1, bindery_parse2, bindery_parse3, bindery_parse4,
bindery_parse5]
now = datetime.datetime.now().isoformat().split("T")[0]
class TextReporter(object):
def start(self):
print "Parse and select timings for Amara", amara.__version__
print "Started on %s. Reporting best of 3 tries" % (now,)
def header(self):
print header_format % ("", "core tree", "bindery")
def row(self, cols):
print row_format % cols
class MarkupReporter(object):
def __init__(self):
self.exercise = 1
def start(self):
print "== Amara", amara.__version__, "on", now, "=="
def header(self):
print "||Exercise||N||Amara 2.x core tree||Amara 2.x bindery||"
def row(self, cols):
label, dt1, dt2 = cols
print "||%d||%d||%.2f msec||%.2f msec||" % (self.exercise, N,
dt1, dt2)
self.exercise += 1
def report(reporter):
reporter.start()
reporter.header()
for (label, f1, f2) in zip(row_names, amara_parse_tests, bindery_parse_tests):
reporter.row( (label, f1(), f2()) )
def main():
import optparse
parser = optparse.OptionParser()
parser.add_option("--markup", dest="markup", action="store_true")
parser.add_option("--profile", dest="profile")
options, args = parser.parse_args()
if options.profile:
# See if I can find the function.
func = globals()[options.profile]
#
import profile, pstats
profile.run(options.profile + "()", "profile.out")
p = pstats.Stats("profile.out")
p.strip_dirs().sort_stats(-1).print_stats()
print "Profile saved in 'profile.out'"
return
if options.markup:
reporter = MarkupReporter()
else:
reporter = TextReporter()
report(reporter)
if __name__ == "__main__":
main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/sandbox/parsebench.py
|
parsebench.py
|
__all__ = ["parse", 'node', 'entity', 'element', 'attribute', 'comment', 'processing_instruction', 'text']
from amara._domlette import *
from amara._domlette import parse as _parse
from amara.lib import inputsource
#node = Node
#document = Document
#entity = Document
#element = Element
#namespace = Namespace
#attribute = Attr
#comment = Comment
#processing_instruction = ProcessingInstruction
#text = Text
#character_data = CharacterData
#FIXME: and so on
def parse(obj, uri=None, entity_factory=None, standalone=False, validate=False, rule_handler=None):
'''
Parse an XML input source and return a tree
:param obj: object with "text" to parse
:type obj: string, Unicode object (only if you really
know what you're doing), file-like object (stream), file path, URI or
`amara.inputsource` object
:param uri: optional document URI. You really should provide this if the input source is a
text string or stream
:type uri: string
:return: Parsed tree object
:rtype: `amara.tree.entity` instance
:raises `amara.ReaderError`: If the XML is not well formed, or there are other core parsing errors
entity_factory - optional factory callable for creating entity nodes. This is the
main lever for customizing the classes used to construct tree nodes
standalone - similar to the standalone declaration for XML. Asserts that the XML
being parsed does not require any resouces outside the given input source
(e.g. on the network). In this case has the side-effect of ignoring such
external resources if they are encountered (which is where it diverges
from XML core. In XML core that would be a fatal error)
validate - whether or not to apply DTD validation
rule_handler - Handler object used to perform rule matching in incremental processing.
Examples:
>>> import amara
>>> MONTY_XML = """<monty>
... <python spam="eggs">What do you mean "bleh"</python>
... <python ministry="abuse">But I was looking for argument</python>
... </monty>"""
>>> doc = amara.parse(MONTY_XML)
>>> len(doc.xml_children)
1
'''
if standalone:
flags = PARSE_FLAGS_STANDALONE
elif validate:
flags = PARSE_FLAGS_VALIDATE
else:
flags = PARSE_FLAGS_EXTERNAL_ENTITIES
return _parse(inputsource(obj, uri), flags, entity_factory=entity_factory,rule_handler=rule_handler)
#Rest of the functions are deprecated, and will be removed soon
def NonvalParse(isrc, readExtDtd=True, nodeFactories=None):
'''
DEPRECATED. Please instead use parse(isrc, validate=False)
'''
import warnings
if readExtDtd:
warnings.warn("use parse(source, PARSE_FLAGS_EXTERNAL_ENTITIES"
"[, node_factories]) instead",
DeprecationWarning, 2)
flags = PARSE_FLAGS_EXTERNAL_ENTITIES
else:
warnings.warn("use parse(source, PARSE_FLAGS_STANDALONE"
"[, node_factories]) instead",
DeprecationWarning)
flags = PARSE_FLAGS_STANDALONE
if nodeFactories is not None:
args = (nodeFactories,)
else:
args = ()
return parse(isrc, flags, *args)
def ValParse(isrc, nodeFactories=None):
'''
DEPRECATED. Please instead use parse(isrc)
'''
import warnings
warnings.warn("use parse(source, PARSE_FLAGS_VALIDATE[, node_factories]) "
"instead",
DeprecationWarning)
if nodeFactories is not None:
args = (nodeFactories,)
else:
args = ()
return parse(isrc, PARSE_FLAGS_VALIDATE, *args)
def ParseFragment(*args, **kwds):
'''
DEPRECATED. Please instead use parse(isrc)
'''
import warnings
warnings.warn("use parse_fragment(source[, namespaces[, node_factories]]) "
"instead",
DeprecationWarning)
return parse_fragment(*args, **kwds)
# ------------------------------------------------------------
# Experimental sendtree() support. This should not be used
# for any kind of production. It is an experimental prototype
# ------------------------------------------------------------
def sendtree(obj, pattern, target, uri=None, entity_factory=None, standalone=False, validate=False):
# This callback does "pattern matching". Nothing really implemented now--just a simple check
# for an element name match. Eventually build into a pattern matching system.
def callback(node):
if node.xml_local == pattern:
target.send(node)
# Run the parser
return parse(obj,uri,entity_factory,standalone,validate,callback)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/tree.py
|
tree.py
|
NULL_NAMESPACE = None
COMMON_NAMESPACES = {}
AKARA_NAMESPACE = u"http://purl.org/xml3k/akara/xmlmodel"; COMMON_NAMESPACES[AKARA_NAMESPACE] = u'ak'
AKARA_XSLT_NAMESPACE = u"http://purl.org/xml3k/akara/xslt"; COMMON_NAMESPACES[AKARA_XSLT_NAMESPACE] = u'ax'
XML_NAMESPACE = u"http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = u"http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = u"http://www.w3.org/1999/xhtml"; COMMON_NAMESPACES[XHTML_NAMESPACE] = u'html'
XSL_NAMESPACE = u'http://www.w3.org/1999/XSL/Transform'; COMMON_NAMESPACES[XSL_NAMESPACE] = u'xsl'
RNG_NAMESPACE = u"http://relaxng.org/ns/structure/1.0"; COMMON_NAMESPACES[RNG_NAMESPACE] = u'rng'
EG_NAMESPACE = u"http://examplotron.org/0/"; COMMON_NAMESPACES[EG_NAMESPACE] = u'eg'
OLD_STRON_NAMESPACE = 'http://www.ascc.net/xml/schematron'
STRON_NAMESPACE = 'http://purl.oclc.org/dsdl/schematron'
#XML Linking Language: http://www.w3.org/TR/xlink/
XLINK_NAMESPACE = u"http://www.w3.org/1999/xlink"; COMMON_NAMESPACES[XLINK_NAMESPACE] = u'xlink'
XINCLUDE_NAMESPACE = u'http://www.w3.org/2001/XInclude'; COMMON_NAMESPACES[XINCLUDE_NAMESPACE] = u'xinclude'
SVG_NAMESPACE = u"http://www.w3.org/2000/svg"; COMMON_NAMESPACES[SVG_NAMESPACE] = u'svg'
#RDF
RDF_NAMESPACE = u"http://www.w3.org/1999/02/22-rdf-syntax-ns#"; COMMON_NAMESPACES[RDF_NAMESPACE] = u'rdf'
RDFS_NAMESPACE = u"http://www.w3.org/2000/01/rdf-schema#"; COMMON_NAMESPACES[RDFS_NAMESPACE] = u'rdfs'
DC_NAMESPACE = u"http://purl.org/dc/elements/1.1/"; COMMON_NAMESPACES[DC_NAMESPACE] = u'dc'
#Note: some people use the old namespace: <http://www.w3.org/2004/02/skos/core#>
SKOS_NAMESPACE = u"http://www.w3.org/2008/05/skos#"; COMMON_NAMESPACES[SKOS_NAMESPACE] = u'skos'
OWL_NAMESPACE = u"http://www.w3.org/2002/07/owl#"; COMMON_NAMESPACES[OWL_NAMESPACE] = u'owl'
RDF_GROUP = [RDF_NAMESPACE, RDFS_NAMESPACE, DC_NAMESPACE, OWL_NAMESPACE, SKOS_NAMESPACE]
ATOM_NAMESPACE = u'http://www.w3.org/2005/Atom'; COMMON_NAMESPACES[ATOM_NAMESPACE] = u'atom'
ATOMPUB_NAMESPACE = u'http://www.w3.org/2007/app'; COMMON_NAMESPACES[ATOMPUB_NAMESPACE] = u'app'
ATOMTHR_EXT_NAMESPACE = u'http://purl.org/syndication/thread/1.0'; COMMON_NAMESPACES[ATOMTHR_EXT_NAMESPACE] = u'thr'
#obsolete
EXTENSION_NAMESPACE = u'http://xmlns.4suite.org/ext'
COMMON_PREFIXES = dict((v, k) for (k, v) in COMMON_NAMESPACES.iteritems())
"""
Probably out of date (from pyxml)
#DSIG, XML-Signature Syntax and Processing: http://www.w3.org/TR/xmldsig-core/
DSIG_BASE = u"http://www.w3.org/2000/09/xmldsig#" #basic namespace defined by the specification
DIGEST_SHA1 = BASE + u"sha1" #The SHA-1 digest method
DIGEST_MD2 = BASE + u"md2" #The MD2 digest method
DIGEST_MD5 = BASE + u"md5" #The MD5 digest method
SIG_DSA_SHA1 = BASE + u"dsa-sha1" #The DSA/DHA-1 signature method
SIG_RSA_SHA1 = BASE + u"rsa-sha1" #The RSA/DHA-1 signature method
HMAC_SHA1 = BASE + u"hmac-sha1" #The SHA-1 HMAC method
ENC_BASE64 = BASE + u"base64" #The Base64 encoding method
ENVELOPED = BASE + u"enveloped-signature" #an enveloped XML signature
#C14N
C14N_NAMESPACE = u"http://www.w3.org/TR/2000/CR-xml-c14n-20010315" #XML canonicalization
C14N_COMM_NAMESPACE = C14N + u"#WithComments" #XML canonicalization, retaining comments
C14N_EXCL_NAMESPACE = u"http://www.w3.org/2001/10/xml-exc-c14n#" #XML exclusive canonicalization
XPATH_NAMESPACE = u"http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT_NAMESPACE = u"http://www.w3.org/TR/1999/REC-xslt-19991116"
SOAPENV_NAMESPACE = u"http://schemas.xmlsoap.org/soap/envelope/"
SOAPENC_NAMESPACE = u"http://schemas.xmlsoap.org/soap/encoding/"
class ENCRYPTION:
\"""ENCRYPTION, XML-Encryption Syntax and Processing
ENCRYPTION (26-Jun-2001) is a W3C Working Draft. It is specified in
http://www.w3.org/TR/xmlenc-core/
BASE -- the basic namespace defined by the specification
BLOCK_3DES -- The triple-DES symmetric encryption method
BLOCK_AES128 -- The 128-bit AES symmetric encryption method
BLOCK_AES256 -- The 256-bit AES symmetric encryption method
BLOCK_AES192 -- The 192-bit AES symmetric encryption method
STREAM_ARCFOUR -- The ARCFOUR symmetric encryption method
KT_RSA_1_5 -- The RSA v1.5 key transport method
KT_RSA_OAEP -- The RSA OAEP key transport method
KA_DH -- The Diffie-Hellman key agreement method
WRAP_3DES -- The triple-DES symmetric key wrap method
WRAP_AES128 -- The 128-bit AES symmetric key wrap method
WRAP_AES256 -- The 256-bit AES symmetric key wrap method
WRAP_AES192 -- The 192-bit AES symmetric key wrap method
DIGEST_SHA256 -- The SHA-256 digest method
DIGEST_SHA512 -- The SHA-512 digest method
DIGEST_RIPEMD160 -- The RIPEMD-160 digest method
\"""
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = BASE + "des-cbc"
BLOCK_AES128 = BASE + "aes128-cbc"
BLOCK_AES256 = BASE + "aes256-cbc"
BLOCK_AES192 = BASE + "aes192-cbc"
STREAM_ARCFOUR = BASE + "arcfour"
KT_RSA_1_5 = BASE + "rsa-1_5"
KT_RSA_OAEP = BASE + "rsa-oaep-mgf1p"
KA_DH = BASE + "dh"
WRAP_3DES = BASE + "kw-3des"
WRAP_AES128 = BASE + "kw-aes128"
WRAP_AES256 = BASE + "kw-aes256"
WRAP_AES192 = BASE + "kw-aes192"
DIGEST_SHA256 = BASE + "sha256"
DIGEST_SHA512 = BASE + "sha512"
DIGEST_RIPEMD160 = BASE + "ripemd160"
class SCHEMA:
\"""SCHEMA, XML Schema
XML Schema (30-Mar-2001) is a W3C candidate recommendation. It is
specified in http://www.w3.org/TR/xmlschema-1 (Structures) and
http://www.w3.org/TR/xmlschema-2 (Datatypes). Schema has been under
development for a comparitively long time, and other standards have
at times used earlier drafts. This class defines the most-used, and
sets BASE to the latest.
BASE -- the basic namespace (2001)
XSD1, XSI1 -- schema and schema-instance for 1999
XSD2, XSI2 -- schema and schema-instance for October 2000
XSD3, XSI3 -- schema and schema-instance for 2001
XSD_LIST -- a sequence of the XSDn values
XSI_LIST -- a sequence of the XSIn values
\"""
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3 ]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class WSDL:
\"""WSDL, Web Services Description Language
WSDL (V1.1, 15-Mar-2001) is a W3C Note. It is specified in
http://www.w3.org/TR/wsdl
BASE -- the basic namespace defined by this specification
BIND_SOAP -- SOAP binding for WSDL
BIND_HTTP -- HTTP GET and POST binding for WSDL
BIND_MIME -- MIME binding for WSDL
\"""
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_SOAP = BASE + "soap/"
BIND_HTTP = BASE + "http/"
BIND_MIME = BASE + "mime/"
class RNG:
\"""RELAX NG, schema language for XML
RELAX NG (03-Dec-2001) is a simple schema languge for XML,
published under the auspices of OASIS. The specification, tutorial,
and other information are available from http://www.relaxng.org.
\"""
BASE = "http://relaxng.org/ns/structure/1.0"
class DCMI:
\"""Dublin Core Metadata Initiative
The DCMI defines a commonly-used set of general metadata elements.
There is a base set of elements, a variety of refinements of
those, a set of value encodings, and a 'type vocabulary' used to
describe what something described in metadata actually is (a text,
a physical object, a collection, etc.).
Documentation on the Dublin Core, including recommendations for
encoding Dublin Core metadata in XML and HTML/XHTML can be found
at http://dublincore.org/.
\"""
# not used directly:
BASE = "http://purl.org/dc/"
# the core element set:
DCMES_1_1 = BASE + "elements/1.1/"
DCMES = DCMES_1_1
# standardized additions and refinements:
TERMS = BASE + "terms/"
# type vocabulary:
TYPE = BASE + "dcmitype/"
"""
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/namespaces.py
|
namespaces.py
|
import sys
from amara._expat import SaxReader #Should this be renamed to _saxreader?
# Support xml.sax.make_parser()
# To create a parser using this method, use the following:
# parser = xml.sax.make_parser(['amara.reader'])
create_parser = SaxReader
#Rename to match standards
saxreader = SaxReader
# Amara-specific SAX features
from amara._expat import FEATURE_PROCESS_XINCLUDES
from amara._expat import FEATURE_GENERATOR
# Amara-specific SAX properties
from amara._expat import PROPERTY_WHITESPACE_RULES
from amara._expat import PROPERTY_YIELD_RESULT
from amara import XMLNS_NAMESPACE
#from Ft.Xml.Lib.XmlPrinter import XmlPrinter
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface for the Parser. The order of
events in this interface mirrors the order of the information in the
document."""
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
def startDocument(self):
"""Receive notification of the beginning of a document.
The parser will invoke this method only once, before any
other methods in this interface."""
def endDocument(self):
"""Receive notification of the end of a document.
The parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the XmlParser will automatically replace
prefixes for element and attribute names.
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElementNS event, and all endPrefixMapping events will occur
after the corresponding endElementNS event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElementNS event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElementNS(self, (uri, localName), qualifiedName, atts):
"""Signals the start of an element.
The uri parameter is None for elements which have no namespace,
the qualifiedName parameter is the raw XML name used in the source
document, and the atts parameter holds an instance of the
Attributes class containing the attributes of the element.
"""
def endElementNS(self, (uri, localName), qualifiedName):
"""Signals the end of an element.
The uri parameter is None for elements which have no namespace,
the qualifiedName parameter is the raw XML name used in the source
document."""
def characters(self, content):
"""Receive notification of character data.
The parser will call this method to report each chunk of
character data. The parser will return all contiguous
character data in a single chunk."""
class Locator:
"""Interface for associating a parse event with a document
location. A locator object will return valid results only during
calls to ContentHandler methods; at any other time, the results are
unpredictable."""
def getColumnNumber(self):
"""Return the column number where the current event ends."""
def getLineNumber(self):
"""Return the line number where the current event ends."""
def getSystemId(self):
"""Return the system identifier for the current event."""
class Attributes:
"""Interface for a set of XML attributes.
Contains a set of XML attributes, accessible by expanded name."""
def getValue(self, name):
"""Returns the value of the attribute with the given name."""
def getQNameByName(self, name):
"""Returns the qualified name of the attribute with the given name."""
def __len__(self):
"""Returns the number of attributes in the list."""
return len(self._values)
def __getitem__(self, name):
"""Alias for getValue."""
def __delitem__(self, name):
"""Removes the attribute with the given name."""
def __contains__(self, name):
"""Alias for has_key."""
def has_key(self, name):
"""Returns True if the attribute name is in the list,
False otherwise."""
def get(self, name, alternative=None):
"""Return the value associated with attribute name; if it is not
available, then return the alternative."""
def keys(self):
"""Returns a list of the names of all attribute in the list."""
def items(self):
"""Return a list of (attribute_name, value) pairs."""
def values(self):
"""Return a list of all attribute values."""
class SaxPrinter(ContentHandler):
"""
A ContentHandler that serializes the result using a 4Suite printer
"""
#def __init__(self, printer=XmlPrinter(sys.stdout, 'utf-8')):
def __init__(self, printer=None): #Restore above line once XmlPrinter is ported
self._printer = printer
try:
self._printer.reset()
except AttributeError:
pass
self._namespaces = {}
return
def startDocument(self):
self._printer.startDocument()
return
def endDocument(self):
self._printer.endDocument()
return
def startPrefixMapping(self, prefix, uri):
self._namespaces[prefix] = uri
return
def startElementNS(self, (namespaceURI, localName), qualifiedName,
attributes):
attributes = dict([ (attributes.getQNameByName(name), value)
for name, value in attributes.items() ])
self._printer.startElement(namespaceURI, qualifiedName,
self._namespaces, attributes)
self._namespaces = {}
return
def endElementNS(self, (namespaceURI, localName), qualifiedName):
self._printer.endElement(namespaceURI, qualifiedName)
return
def characters(self, data):
self._printer.text(data)
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/reader.py
|
reader.py
|
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE
from version import version_info
__version__ = '.'.join(version_info)
#FIXME: Proper i18n soon
def _(text): return text
class Error(Exception):
message = ''
_message_table = None
# defer localization of the messages until needed
def __new__(cls, code, *args, **kwds):
if cls._message_table is None:
cls._message_table = cls._load_messages()
# change `cls.__new__` to default __new__ as loading is complete
# and will just waste cycles.
cls.__new__ = Exception.__new__
return Exception.__new__(cls)
def __init__(self, code, **kwds):
assert self._message_table is not None
message = self._message_table[code]
if kwds:
message %= kwds
Exception.__init__(self, code, message)
self.code = code
self.message = message
# map keywords into attributes
for name, value in kwds.iteritems():
setattr(self, name, value)
def __str__(self):
return self.message
@classmethod
def _load_messages(cls):
raise NotImplementedError("subclass %s must override" % cls.__name__)
class ReaderError(Error):
"""
Exception class for errors specific to XML reading
(at a level above standard, non-namespace-aware parsing)
"""
# Fatal errors
# Note: These are actual Expat error codes redefined here to allow for
# translation of the error messages.
#NO_MEMORY = 1 # mapped to MemoryError
SYNTAX_ERROR = 2
NO_ELEMENTS = 3
INVALID_TOKEN = 4
UNCLOSED_TOKEN = 5
PARTIAL_CHAR = 6
TAG_MISMATCH = 7
DUPLICATE_ATTRIBUTE = 8
JUNK_AFTER_DOCUMENT_ELEMENT = 9
ILLEGAL_PARAM_ENTITY_REF = 10
UNDEFINED_ENTITY = 11
RECURSIVE_ENTITY_REF = 12
ASYNC_ENTITY = 13
BAD_CHAR_REF = 14
BINARY_ENTITY_REF = 15
ATTRIBUTE_EXTERNAL_ENTITY_REF = 16
MISPLACED_XML_PI = 17
UNKNOWN_ENCODING = 18
INCORRECT_ENCODING = 19
UNCLOSED_CDATA_SECTION = 20
EXTERNAL_ENTITY_HANDLING = 21
NOT_STANDALONE = 22
#UNEXPECTED_STATE = 23 # mapped to SystemError
ENTITY_DECLARED_IN_PE = 24
#FEATURE_REQUIRES_XML_DTD = 25 # mapped to SystemError
#CANT_CHANGE_FEATURE_ONCE_PARSING = 26 # mapped to SystemError
UNBOUND_PREFIX = 27
UNDECLARED_PREFIX = 28
INCOMPLETE_PE = 29
INVALID_XML_DECL = 30
INVALID_TEXT_DECL = 31
INVALID_PUBLICID = 32
#SUSPENDED = 33 # mapped to SystemError
#NOT_SUSPENDED = 34 # mapped to RuntimeError
#ABORTED = 35 # mapped to SystemError
#FINISHED = 36 # mapped to SystemError
#SUSPEND_PE = 37 # mapped to SystemError
RESERVED_PREFIX_XML = 38
RESERVED_PREFIX_XMLNS = 39
RESERVED_NAMESPACE_URI = 40
# Validity errors
MISSING_DOCTYPE = 1000
INVALID_ELEMENT = 1001
ROOT_ELEMENT_MISMATCH = 1002
UNDECLARED_ELEMENT = 1003
INCOMPLETE_ELEMENT = 1004
INVALID_TEXT = 1005
UNDECLARED_ATTRIBUTE = 1006
DUPLICATE_ID = 1007
UNDECLARED_ENTITY = 1008
INVALID_ENTITY = 1009
UNDECLARED_NOTATION = 1010
MISSING_ATTRIBUTE = 1011
UNDEFINED_ID = 1012 # FIXME: implement
DUPLICATE_ELEMENT_DECL = 1013
DUPLICATE_ID_DECL = 1014
ID_ATTRIBUTE_DEFAULT = 1015
XML_SPACE_DECL = 1016
XML_SPACE_VALUES = 1017
INVALID_NAME_VALUE = 1018
INVALID_NAME_SEQ_VALUE = 1019
INVALID_NMTOKEN_VALUE = 1020
INVALID_NMTOKEN_SEQ_VALUE = 1021
INVALID_ENUM_VALUE = 1022
ATTRIBUTE_UNDECLARED_NOTATION = 1023
ENTITY_UNDECLARED_NOTATION = 1024 # FIXME: implement
# Warnings
ATTRIBUTES_WITHOUT_ELEMENT = 2000
ATTRIBUTE_DECLARED = 2001
ENTITY_DECLARED = 2002
def __init__(self, code, systemId, lineNumber, columnNumber, **kwords):
Error.__init__(self, code, **kwords)
self.systemId = systemId
self.lineNumber = lineNumber
self.columnNumber = columnNumber
return
def __str__(self):
from gettext import gettext as _
systemId = self.systemId
if isinstance(systemId, unicode):
systemId = systemId.encode('unicode_escape')
return _("In %s, line %s, column %s: %s") % (systemId,
self.lineNumber,
self.columnNumber,
self.message)
@classmethod
def _load_messages(cls):
from gettext import gettext as _
return {
# Fatal errors
ReaderError.SYNTAX_ERROR: _(
"syntax error"),
ReaderError.NO_ELEMENTS: _(
"no element found"),
ReaderError.INVALID_TOKEN: _(
"not well-formed (invalid token)"),
ReaderError.UNCLOSED_TOKEN: _(
"unclosed token"),
ReaderError.PARTIAL_CHAR: _(
"partial character"),
ReaderError.TAG_MISMATCH: _(
"mismatched tag"),
ReaderError.DUPLICATE_ATTRIBUTE: _(
"duplicate attribute"),
ReaderError.JUNK_AFTER_DOCUMENT_ELEMENT: _(
"junk after document element"),
ReaderError.ILLEGAL_PARAM_ENTITY_REF: _(
"illegal parameter entity reference"),
ReaderError.UNDEFINED_ENTITY: _(
"undefined entity"),
ReaderError.RECURSIVE_ENTITY_REF: _(
"recursive entity reference"),
ReaderError.ASYNC_ENTITY: _(
"asynchronous entity"),
ReaderError.BAD_CHAR_REF: _(
"reference to invalid character number"),
ReaderError.BINARY_ENTITY_REF: _(
"reference to binary entity"),
ReaderError.ATTRIBUTE_EXTERNAL_ENTITY_REF: _(
"reference to external entity in attribute"),
ReaderError.MISPLACED_XML_PI: _(
"XML or text declaration not at start of entity"),
ReaderError.UNKNOWN_ENCODING: _(
"unknown encoding"),
ReaderError.INCORRECT_ENCODING: _(
"encoding specified in XML declaration is incorrect"),
ReaderError.UNCLOSED_CDATA_SECTION: _(
"unclosed CDATA section"),
ReaderError.EXTERNAL_ENTITY_HANDLING: _(
"error in processing external entity reference"),
ReaderError.NOT_STANDALONE: _(
"document is not standalone"),
ReaderError.ENTITY_DECLARED_IN_PE: _(
"entity declared in parameter entity"),
ReaderError.UNBOUND_PREFIX: _(
"unbound prefix"),
ReaderError.UNDECLARED_PREFIX: _(
"must not undeclare prefix"),
ReaderError.INCOMPLETE_PE: _(
"incomplete markup in parameter entity"),
ReaderError.INVALID_XML_DECL: _(
"XML declaration not well-formed"),
ReaderError.INVALID_TEXT_DECL: _(
"text declaration not well-formed"),
ReaderError.INVALID_PUBLICID: _(
"illegal character(s) in public id"),
ReaderError.RESERVED_PREFIX_XML: _(
"reserved prefix (xml) must not be undeclared or bound to "
"another namespace name"),
ReaderError.RESERVED_PREFIX_XMLNS: _(
"reserved prefix (xmlns) must not be declared or undeclared"),
ReaderError.RESERVED_NAMESPACE_URI: _(
"prefix must not be bound to one of the reserved namespace "
"names"),
# Validity Errors
ReaderError.MISSING_DOCTYPE: _(
"Missing document type declaration"),
ReaderError.INVALID_ELEMENT: _(
"Element '%(element)s' not allowed here"),
ReaderError.ROOT_ELEMENT_MISMATCH: _(
"Document root element '%(element)s' does not match declared "
"root element"),
ReaderError.UNDECLARED_ELEMENT: _(
"Element '%(element)s' not declared"),
ReaderError.INCOMPLETE_ELEMENT: _(
"Element '%(element)s' ended before all required elements "
"found"),
ReaderError.INVALID_TEXT: _(
"Character data not allowed in the content of element "
"'%(element)s'"),
ReaderError.UNDECLARED_ATTRIBUTE: _(
"Attribute '%(attr)s' not declared"),
ReaderError.DUPLICATE_ID: _(
"ID '%(id)s' appears more than once"),
ReaderError.UNDECLARED_ENTITY: _(
"Entity '%(entity)s' not declared"),
ReaderError.INVALID_ENTITY: _(
"Entity '%(entity)s' is not an unparsed entity"),
ReaderError.UNDECLARED_NOTATION: _(
"Notation '%(notation)s' not declared"),
ReaderError.MISSING_ATTRIBUTE: _(
"Missing required attribute '%(attr)s'"),
ReaderError.UNDEFINED_ID: _(
"IDREF referred to non-existent ID '%(id)s'"),
ReaderError.DUPLICATE_ELEMENT_DECL: _(
"Element '%(element)s' declared more than once"),
ReaderError.DUPLICATE_ID_DECL: _(
"Only one ID attribute allowed on each element type"),
ReaderError.ID_ATTRIBUTE_DEFAULT: _(
"ID attributes cannot have a default value"),
ReaderError.XML_SPACE_DECL: _(
"xml:space must be declared an enumeration type"),
ReaderError.XML_SPACE_VALUES: _(
"xml:space must have exactly one or both of the values "
"'default' and 'preserve'"),
ReaderError.INVALID_NAME_VALUE: _(
"Value of '%(attr)s' attribute is not a valid name"),
ReaderError.INVALID_NAME_SEQ_VALUE: _(
"Value of '%(attr)s' attribute is not a valid name sequence"),
ReaderError.INVALID_NMTOKEN_VALUE: _(
"Value of '%(attr)s' attribute is not a valid name token"),
ReaderError.INVALID_NMTOKEN_SEQ_VALUE: _(
"Value of '%(attr)s' attribute is not a valid name token "
"sequence"),
ReaderError.INVALID_ENUM_VALUE: _(
"'%(value)s in not an allowed value for the '%(attr)s' "
"attribute"),
ReaderError.ATTRIBUTE_UNDECLARED_NOTATION: _(
"Notation attribute '%(attr)s' uses undeclared notation "
"'%(notation)s'"),
ReaderError.ENTITY_UNDECLARED_NOTATION: _(""),
# Warnings
ReaderError.ATTRIBUTES_WITHOUT_ELEMENT: _(
"Attribute list for undeclared element '%(element)s'"),
ReaderError.ATTRIBUTE_DECLARED: _(
"Attribute '%(attr)s' already declared"),
ReaderError.ENTITY_DECLARED: _(
"Entity '%(entity)s' already declared"),
}
class XIncludeError(ReaderError):
pass
from amara.tree import parse
from amara.lib import xmlstring as string
#FIXME: Remove this function when amara goes beta
def xml_print(*args, **kwargs):
import warnings
from amara.writers import xml_print as _xml_print
warnings.warn("xml_print() function is deprecated; use xml_write() or xml_encode() method instead")
return _xml_print(*args, **kwargs)
##from amara.writers._treevisitor import xml_print
import sys
def writer(stream=sys.stdout, **kwargs):
from amara.writers.outputparameters import outputparameters
oparams = outputparameters(**kwargs)
if kwargs.get("method", "xml") == "xml":
from amara.writers.xmlwriter import _xmluserwriter
writer_class = _xmluserwriter
else:
from amara.writers.htmlwriter import _htmluserwriter
writer_class = _htmluserwriter
return writer_class(oparams, stream)
def launch(source, **kwargs):
doc = parse(source, validate=kwargs['validate'], standalone=kwargs['standalone'])
if 'pretty' in kwargs:
doc.xml_write('xml-indent')
else:
doc.xml_write()
return
#Ideas borrowed from
# http://www.artima.com/forums/flat.jsp?forum=106&thread=4829
#FIXME: A lot of this is copied boilerplate that neds to be cleaned up
def command_line_prep():
from optparse import OptionParser
usage = "Amara 2.x. Command line support for basic parsing.\n"
usage += "python -m 'amara' [options] source cmd"
parser = OptionParser(usage=usage)
parser.add_option("-p", "--pretty",
action="store_true", dest="pretty", default=False,
help="Pretty-print the XML output")
parser.add_option("-v", "--validate",
action="store_true", dest="validate", default=False,
help="Apply DTD validation")
parser.add_option("-V", "--version",
action="store_true", dest="version", default=False,
help="Print the Amara version and exit")
parser.add_option("-s", "--standalone",
action="store_true", dest="standalone", default=False,
help="Parse the XML with standalone rules")
return parser
def main(argv=None):
#But with better integration of entry points
if argv is None:
argv = sys.argv
# By default, optparse usage errors are terminated by SystemExit
try:
optparser = command_line_prep()
options, args = optparser.parse_args(argv[1:])
# Process mandatory arguments with IndexError try...except blocks
try:
source = args[0]
except IndexError:
optparser.error("Missing source for XML")
#try:
# xpattern = args[1]
#except IndexError:
# optparser.error("Missing main xpattern")
except SystemExit, status:
return status
# Perform additional setup work here before dispatching to run()
# Detectable errors encountered here should be handled and a status
# code of 1 should be returned. Note, this would be the default code
# for a SystemExit exception with a string message.
#try:
# xpath = args[2].decode('utf-8')
#except IndexError:
# xpath = None
#xpattern = xpattern.decode('utf-8')
#sentinel = options.sentinel and options.sentinel.decode('utf-8')
if options.version:
print __version__
return
pretty = options.pretty
validate = options.validate
standalone = options.standalone
if source == '-':
source = sys.stdin
#if options.test:
# test()
#else:
launch(source, pretty=pretty, validate=validate, standalone=standalone)
return
#from amara.thirdparty import httplib2
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/__init__.py
|
__init__.py
|
Httplib2
--------------------------------------------------------------------
Introduction
A comprehensive HTTP client library, httplib2.py supports many
features left out of other HTTP libraries.
HTTP and HTTPS
HTTPS support is only available if the socket module was
compiled with SSL support.
Keep-Alive
Supports HTTP 1.1 Keep-Alive, keeping the socket open and
performing multiple requests over the same connection if
possible.
Authentication
The following three types of HTTP Authentication are
supported. These can be used over both HTTP and HTTPS.
* Digest
* Basic
* WSSE
Caching
The module can optionally operate with a private cache that
understands the Cache-Control: header and uses both the ETag
and Last-Modified cache validators.
All Methods
The module can handle any HTTP request method, not just GET
and POST.
Redirects
Automatically follows 3XX redirects on GETs.
Compression
Handles both 'deflate' and 'gzip' types of compression.
Lost update support
Automatically adds back ETags into PUT requests to resources
we have already cached. This implements Section 3.2 of
Detecting the Lost Update Problem Using Unreserved Checkout.
Unit Tested
A large and growing set of unit tests.
For more information on this module, see:
http://bitworking.org/projects/httplib2/
--------------------------------------------------------------------
Installation
The httplib2 module is shipped as a distutils package. To install
the library, unpack the distribution archive, and issue the following
command:
$ python setup.py install
--------------------------------------------------------------------
Usage
A simple retrieval:
import httplib2
h = httplib2.Http(".cache")
(resp_headers, content) = h.request("http://example.org/", "GET")
The 'content' is the content retrieved from the URL. The content
is already decompressed or unzipped if necessary.
To PUT some content to a server that uses SSL and Basic authentication:
import httplib2
h = httplib2.Http(".cache")
h.add_credentials('name', 'password')
(resp, content) = h.request("https://example.org/chapter/2",
"PUT", body="This is text",
headers={'content-type':'text/plain'} )
Use the Cache-Control: header to control how the caching operates.
import httplib2
h = httplib2.Http(".cache")
(resp, content) = h.request("http://bitworking.org/", "GET")
...
(resp, content) = h.request("http://bitworking.org/", "GET",
headers={'cache-control':'no-cache'})
The first request will be cached and since this is a request
to bitworking.org it will be set to be cached for two hours,
because that is how I have my server configured. Any subsequent
GET to that URI will return the value from the on-disk cache
and no request will be made to the server. You can use the
Cache-Control: header to change the caches behavior and in
this example the second request adds the Cache-Control:
header with a value of 'no-cache' which tells the library
that the cached copy must not be used when handling this request.
--------------------------------------------------------------------
Httplib2 Software License
Copyright (c) 2006 by Joe Gregorio
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/httplib2/README.httplib2
|
README.httplib2
|
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = []
__version__ = "1.0.0"
__license__ = "MIT"
__history__ = """
"""
import urlparse
# Convert an IRI to a URI following the rules in RFC 3987
#
# The characters we need to enocde and escape are defined in the spec:
#
# iprivate = %xE000-F8FF / %xF0000-FFFFD / %x100000-10FFFD
# ucschar = %xA0-D7FF / %xF900-FDCF / %xFDF0-FFEF
# / %x10000-1FFFD / %x20000-2FFFD / %x30000-3FFFD
# / %x40000-4FFFD / %x50000-5FFFD / %x60000-6FFFD
# / %x70000-7FFFD / %x80000-8FFFD / %x90000-9FFFD
# / %xA0000-AFFFD / %xB0000-BFFFD / %xC0000-CFFFD
# / %xD0000-DFFFD / %xE1000-EFFFD
escape_range = [
(0xA0, 0xD7FF ),
(0xE000, 0xF8FF ),
(0xF900, 0xFDCF ),
(0xFDF0, 0xFFEF),
(0x10000, 0x1FFFD ),
(0x20000, 0x2FFFD ),
(0x30000, 0x3FFFD),
(0x40000, 0x4FFFD ),
(0x50000, 0x5FFFD ),
(0x60000, 0x6FFFD),
(0x70000, 0x7FFFD ),
(0x80000, 0x8FFFD ),
(0x90000, 0x9FFFD),
(0xA0000, 0xAFFFD ),
(0xB0000, 0xBFFFD ),
(0xC0000, 0xCFFFD),
(0xD0000, 0xDFFFD ),
(0xE1000, 0xEFFFD),
(0xF0000, 0xFFFFD ),
(0x100000, 0x10FFFD)
]
def encode(c):
retval = c
i = ord(c)
for low, high in escape_range:
if i < low:
break
if i >= low and i <= high:
retval = "".join(["%%%2X" % ord(o) for o in c.encode('utf-8')])
break
return retval
def iri2uri(uri):
"""Convert an IRI to a URI. Note that IRIs must be
passed in a unicode strings. That is, do not utf-8 encode
the IRI before passing it into the function."""
if isinstance(uri ,unicode):
(scheme, authority, path, query, fragment) = urlparse.urlsplit(uri)
authority = authority.encode('idna')
# For each character in 'ucschar' or 'iprivate'
# 1. encode as utf-8
# 2. then %-encode each octet of that utf-8
uri = urlparse.urlunsplit((scheme, authority, path, query, fragment))
uri = "".join([encode(c) for c in uri])
return uri
if __name__ == "__main__":
import unittest
class Test(unittest.TestCase):
def test_uris(self):
"""Test that URIs are invariant under the transformation."""
invariant = [
u"ftp://ftp.is.co.za/rfc/rfc1808.txt",
u"http://www.ietf.org/rfc/rfc2396.txt",
u"ldap://[2001:db8::7]/c=GB?objectClass?one",
u"mailto:[email protected]",
u"news:comp.infosystems.www.servers.unix",
u"tel:+1-816-555-1212",
u"telnet://192.0.2.16:80/",
u"urn:oasis:names:specification:docbook:dtd:xml:4.1.2" ]
for uri in invariant:
self.assertEqual(uri, iri2uri(uri))
def test_iri(self):
""" Test that the right type of escaping is done for each part of the URI."""
self.assertEqual("http://xn--o3h.com/%E2%98%84", iri2uri(u"http://\N{COMET}.com/\N{COMET}"))
self.assertEqual("http://bitworking.org/?fred=%E2%98%84", iri2uri(u"http://bitworking.org/?fred=\N{COMET}"))
self.assertEqual("http://bitworking.org/#%E2%98%84", iri2uri(u"http://bitworking.org/#\N{COMET}"))
self.assertEqual("#%E2%98%84", iri2uri(u"#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}"))
self.assertEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}")))
self.assertNotEqual("/fred?bar=%E2%98%9A#%E2%98%84", iri2uri(u"/fred?bar=\N{BLACK LEFT POINTING INDEX}#\N{COMET}".encode('utf-8')))
unittest.main()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/httplib2/iri2uri.py
|
iri2uri.py
|
from __future__ import generators
"""
httplib2
A caching http interface that supports ETags and gzip
to conserve bandwidth.
Requires Python 2.3 or later
Changelog:
2007-08-18, Rick: Modified so it's able to use a socks proxy if needed.
"""
__author__ = "Joe Gregorio ([email protected])"
__copyright__ = "Copyright 2006, Joe Gregorio"
__contributors__ = ["Thomas Broyer ([email protected])",
"James Antill",
"Xavier Verges Farrero",
"Jonathan Feinberg",
"Blair Zajac",
"Sam Ruby",
"Louis Nyffenegger"]
__license__ = "MIT"
__version__ = "0.7.0"
import re
import sys
import email
import email.Utils
import email.Message
import email.FeedParser
import StringIO
import gzip
import zlib
import httplib
import urlparse
import base64
import os
import copy
import calendar
import time
import random
import errno
# remove depracated warning in python2.6
try:
from hashlib import sha1 as _sha, md5 as _md5
except ImportError:
import sha
import md5
_sha = sha.new
_md5 = md5.new
import hmac
from gettext import gettext as _
import socket
# Build the appropriate socket wrapper for ssl
try:
import ssl # python 2.6
ssl_SSLError = ssl.SSLError
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if disable_validation:
cert_reqs = ssl.CERT_NONE
else:
cert_reqs = ssl.CERT_REQUIRED
# We should be specifying SSL version 3 or TLS v1, but the ssl module
# doesn't expose the necessary knobs. So we need to go with the default
# of SSLv23.
return ssl.wrap_socket(sock, keyfile=key_file, certfile=cert_file,
cert_reqs=cert_reqs, ca_certs=ca_certs)
except (AttributeError, ImportError):
ssl_SSLError = None
def _ssl_wrap_socket(sock, key_file, cert_file,
disable_validation, ca_certs):
if not disable_validation:
raise CertificateValidationUnsupported(
"SSL certificate validation is not supported without "
"the ssl module installed. To avoid this error, install "
"the ssl module, or explicity disable validation.")
ssl_sock = socket.ssl(sock, key_file, cert_file)
return httplib.FakeSocket(sock, ssl_sock)
if sys.version_info >= (2,3):
from iri2uri import iri2uri
else:
def iri2uri(uri):
return uri
def has_timeout(timeout): # python 2.6
if hasattr(socket, '_GLOBAL_DEFAULT_TIMEOUT'):
return (timeout is not None and timeout is not socket._GLOBAL_DEFAULT_TIMEOUT)
return (timeout is not None)
__all__ = ['Http', 'Response', 'ProxyInfo', 'HttpLib2Error',
'RedirectMissingLocation', 'RedirectLimit', 'FailedToDecompressContent',
'UnimplementedDigestAuthOptionError', 'UnimplementedHmacDigestAuthOptionError',
'debuglevel', 'ProxiesUnavailableError']
# The httplib debug level, set to a non-zero value to get debug output
debuglevel = 0
# Python 2.3 support
if sys.version_info < (2,4):
def sorted(seq):
seq.sort()
return seq
# Python 2.3 support
def HTTPResponse__getheaders(self):
"""Return list of (header, value) tuples."""
if self.msg is None:
raise httplib.ResponseNotReady()
return self.msg.items()
if not hasattr(httplib.HTTPResponse, 'getheaders'):
httplib.HTTPResponse.getheaders = HTTPResponse__getheaders
# All exceptions raised here derive from HttpLib2Error
class HttpLib2Error(Exception): pass
# Some exceptions can be caught and optionally
# be turned back into responses.
class HttpLib2ErrorWithResponse(HttpLib2Error):
def __init__(self, desc, response, content):
self.response = response
self.content = content
HttpLib2Error.__init__(self, desc)
class RedirectMissingLocation(HttpLib2ErrorWithResponse): pass
class RedirectLimit(HttpLib2ErrorWithResponse): pass
class FailedToDecompressContent(HttpLib2ErrorWithResponse): pass
class UnimplementedDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class UnimplementedHmacDigestAuthOptionError(HttpLib2ErrorWithResponse): pass
class MalformedHeader(HttpLib2Error): pass
class RelativeURIError(HttpLib2Error): pass
class ServerNotFoundError(HttpLib2Error): pass
class ProxiesUnavailableError(HttpLib2Error): pass
class CertificateValidationUnsupported(HttpLib2Error): pass
class SSLHandshakeError(HttpLib2Error): pass
class CertificateHostnameMismatch(SSLHandshakeError):
def __init__(self, desc, host, cert):
HttpLib2Error.__init__(self, desc)
self.host = host
self.cert = cert
# Open Items:
# -----------
# Proxy support
# Are we removing the cached content too soon on PUT (only delete on 200 Maybe?)
# Pluggable cache storage (supports storing the cache in
# flat files by default. We need a plug-in architecture
# that can support Berkeley DB and Squid)
# == Known Issues ==
# Does not handle a resource that uses conneg and Last-Modified but no ETag as a cache validator.
# Does not handle Cache-Control: max-stale
# Does not use Age: headers when calculating cache freshness.
# The number of redirections to follow before giving up.
# Note that only GET redirects are automatically followed.
# Will also honor 301 requests by saving that info and never
# requesting that URI again.
DEFAULT_MAX_REDIRECTS = 5
# Default CA certificates file bundled with httplib2.
CA_CERTS = os.path.join(
os.path.dirname(os.path.abspath(__file__ )), "cacerts.txt")
# Which headers are hop-by-hop headers by default
HOP_BY_HOP = ['connection', 'keep-alive', 'proxy-authenticate', 'proxy-authorization', 'te', 'trailers', 'transfer-encoding', 'upgrade']
def _get_end2end_headers(response):
hopbyhop = list(HOP_BY_HOP)
hopbyhop.extend([x.strip() for x in response.get('connection', '').split(',')])
return [header for header in response.keys() if header not in hopbyhop]
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
def urlnorm(uri):
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise RelativeURIError("Only absolute URIs are allowed. uri = %s" % uri)
authority = authority.lower()
scheme = scheme.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
scheme = scheme.lower()
defrag_uri = scheme + "://" + authority + request_uri
return scheme, authority, request_uri, defrag_uri
# Cache filename construction (original borrowed from Venus http://intertwingly.net/code/venus/)
re_url_scheme = re.compile(r'^\w+://')
re_slash = re.compile(r'[?/:|]+')
def safename(filename):
"""Return a filename suitable for the cache.
Strips dangerous and common characters to create a filename we
can use to store the cache in.
"""
try:
if re_url_scheme.match(filename):
if isinstance(filename,str):
filename = filename.decode('utf-8')
filename = filename.encode('idna')
else:
filename = filename.encode('idna')
except UnicodeError:
pass
if isinstance(filename,unicode):
filename=filename.encode('utf-8')
filemd5 = _md5(filename).hexdigest()
filename = re_url_scheme.sub("", filename)
filename = re_slash.sub(",", filename)
# limit length of filename
if len(filename)>200:
filename=filename[:200]
return ",".join((filename, filemd5))
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
def _parse_cache_control(headers):
retval = {}
if headers.has_key('cache-control'):
parts = headers['cache-control'].split(',')
parts_with_args = [tuple([x.strip().lower() for x in part.split("=", 1)]) for part in parts if -1 != part.find("=")]
parts_wo_args = [(name.strip().lower(), 1) for name in parts if -1 == name.find("=")]
retval = dict(parts_with_args + parts_wo_args)
return retval
# Whether to use a strict mode to parse WWW-Authenticate headers
# Might lead to bad results in case of ill-formed header value,
# so disabled by default, falling back to relaxed parsing.
# Set to true to turn on, usefull for testing servers.
USE_WWW_AUTH_STRICT_PARSING = 0
# In regex below:
# [^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+ matches a "token" as defined by HTTP
# "(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?" matches a "quoted-string" as defined by HTTP, when LWS have already been replaced by a single space
# Actually, as an auth-param value can be either a token or a quoted-string, they are combined in a single pattern which matches both:
# \"?((?<=\")(?:[^\0-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x08\x0A-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?
WWW_AUTH_STRICT = re.compile(r"^(?:\s*(?:,\s*)?([^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+)\s*=\s*\"?((?<=\")(?:[^\0-\x08\x0A-\x1f\x7f-\xff\\\"]|\\[\0-\x7f])*?(?=\")|(?<!\")[^\0-\x1f\x7f-\xff()<>@,;:\\\"/[\]?={} \t]+(?!\"))\"?)(.*)$")
WWW_AUTH_RELAXED = re.compile(r"^(?:\s*(?:,\s*)?([^ \t\r\n=]+)\s*=\s*\"?((?<=\")(?:[^\\\"]|\\.)*?(?=\")|(?<!\")[^ \t\r\n,]+(?!\"))\"?)(.*)$")
UNQUOTE_PAIRS = re.compile(r'\\(.)')
def _parse_www_authenticate(headers, headername='www-authenticate'):
"""Returns a dictionary of dictionaries, one dict
per auth_scheme."""
retval = {}
if headers.has_key(headername):
try:
authenticate = headers[headername].strip()
www_auth = USE_WWW_AUTH_STRICT_PARSING and WWW_AUTH_STRICT or WWW_AUTH_RELAXED
while authenticate:
# Break off the scheme at the beginning of the line
if headername == 'authentication-info':
(auth_scheme, the_rest) = ('digest', authenticate)
else:
(auth_scheme, the_rest) = authenticate.split(" ", 1)
# Now loop over all the key value pairs that come after the scheme,
# being careful not to roll into the next scheme
match = www_auth.search(the_rest)
auth_params = {}
while match:
if match and len(match.groups()) == 3:
(key, value, the_rest) = match.groups()
auth_params[key.lower()] = UNQUOTE_PAIRS.sub(r'\1', value) # '\\'.join([x.replace('\\', '') for x in value.split('\\\\')])
match = www_auth.search(the_rest)
retval[auth_scheme.lower()] = auth_params
authenticate = the_rest.strip()
except ValueError:
raise MalformedHeader("WWW-Authenticate")
return retval
def _entry_disposition(response_headers, request_headers):
"""Determine freshness from the Date, Expires and Cache-Control headers.
We don't handle the following:
1. Cache-Control: max-stale
2. Age: headers are not used in the calculations.
Not that this algorithm is simpler than you might think
because we are operating as a private (non-shared) cache.
This lets us ignore 's-maxage'. We can also ignore
'proxy-invalidate' since we aren't a proxy.
We will never return a stale document as
fresh as a design decision, and thus the non-implementation
of 'max-stale'. This also lets us safely ignore 'must-revalidate'
since we operate as if every server has sent 'must-revalidate'.
Since we are private we get to ignore both 'public' and
'private' parameters. We also ignore 'no-transform' since
we don't do any transformations.
The 'no-store' parameter is handled at a higher level.
So the only Cache-Control parameters we look at are:
no-cache
only-if-cached
max-age
min-fresh
"""
retval = "STALE"
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if request_headers.has_key('pragma') and request_headers['pragma'].lower().find('no-cache') != -1:
retval = "TRANSPARENT"
if 'cache-control' not in request_headers:
request_headers['cache-control'] = 'no-cache'
elif cc.has_key('no-cache'):
retval = "TRANSPARENT"
elif cc_response.has_key('no-cache'):
retval = "STALE"
elif cc.has_key('only-if-cached'):
retval = "FRESH"
elif response_headers.has_key('date'):
date = calendar.timegm(email.Utils.parsedate_tz(response_headers['date']))
now = time.time()
current_age = max(0, now - date)
if cc_response.has_key('max-age'):
try:
freshness_lifetime = int(cc_response['max-age'])
except ValueError:
freshness_lifetime = 0
elif response_headers.has_key('expires'):
expires = email.Utils.parsedate_tz(response_headers['expires'])
if None == expires:
freshness_lifetime = 0
else:
freshness_lifetime = max(0, calendar.timegm(expires) - date)
else:
freshness_lifetime = 0
if cc.has_key('max-age'):
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if cc.has_key('min-fresh'):
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
current_age += min_fresh
if freshness_lifetime > current_age:
retval = "FRESH"
return retval
def _decompressContent(response, new_content):
content = new_content
try:
encoding = response.get('content-encoding', None)
if encoding in ['gzip', 'deflate']:
if encoding == 'gzip':
content = gzip.GzipFile(fileobj=StringIO.StringIO(new_content)).read()
if encoding == 'deflate':
content = zlib.decompress(content)
response['content-length'] = str(len(content))
# Record the historical presence of the encoding in a way the won't interfere.
response['-content-encoding'] = response['content-encoding']
del response['content-encoding']
except IOError:
content = ""
raise FailedToDecompressContent(_("Content purported to be compressed with %s but failed to decompress.") % response.get('content-encoding'), response, content)
return content
def _updateCache(request_headers, response_headers, content, cache, cachekey):
if cachekey:
cc = _parse_cache_control(request_headers)
cc_response = _parse_cache_control(response_headers)
if cc.has_key('no-store') or cc_response.has_key('no-store'):
cache.delete(cachekey)
else:
info = email.Message.Message()
for key, value in response_headers.iteritems():
if key not in ['status','content-encoding','transfer-encoding']:
info[key] = value
# Add annotations to the cache to indicate what headers
# are variant for this request.
vary = response_headers.get('vary', None)
if vary:
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
try:
info[key] = request_headers[header]
except KeyError:
pass
status = response_headers.status
if status == 304:
status = 200
status_header = 'status: %d\r\n' % status
header_str = info.as_string()
header_str = re.sub("\r(?!\n)|(?<!\r)\n", "\r\n", header_str)
text = "".join([status_header, header_str, content])
cache.set(cachekey, text)
def _cnonce():
dig = _md5("%s:%s" % (time.ctime(), ["0123456789"[random.randrange(0, 9)] for i in range(20)])).hexdigest()
return dig[:16]
def _wsse_username_token(cnonce, iso_now, password):
return base64.b64encode(_sha("%s%s%s" % (cnonce, iso_now, password)).digest()).strip()
# For credentials we need two things, first
# a pool of credential to try (not necesarily tied to BAsic, Digest, etc.)
# Then we also need a list of URIs that have already demanded authentication
# That list is tricky since sub-URIs can take the same auth, or the
# auth scheme may change as you descend the tree.
# So we also need each Auth instance to be able to tell us
# how close to the 'top' it is.
class Authentication(object):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
self.path = path
self.host = host
self.credentials = credentials
self.http = http
def depth(self, request_uri):
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return request_uri[len(self.path):].count("/")
def inscope(self, host, request_uri):
# XXX Should we normalize the request_uri?
(scheme, authority, path, query, fragment) = parse_uri(request_uri)
return (host == self.host) and path.startswith(self.path)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header. Over-rise this in sub-classes."""
pass
def response(self, response, content):
"""Gives us a chance to update with new nonces
or such returned from the last authorized response.
Over-rise this in sub-classes if necessary.
Return TRUE is the request is to be retried, for
example Digest may return stale=true.
"""
return False
class BasicAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'Basic ' + base64.b64encode("%s:%s" % self.credentials).strip()
class DigestAuthentication(Authentication):
"""Only do qop='auth' and MD5, since that
is all Apache currently implements"""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['digest']
qop = self.challenge.get('qop', 'auth')
self.challenge['qop'] = ('auth' in [x.strip() for x in qop.split()]) and 'auth' or None
if self.challenge['qop'] is None:
raise UnimplementedDigestAuthOptionError( _("Unsupported value for qop: %s." % qop))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'MD5').upper()
if self.challenge['algorithm'] != 'MD5':
raise UnimplementedDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.A1 = "".join([self.credentials[0], ":", self.challenge['realm'], ":", self.credentials[1]])
self.challenge['nc'] = 1
def request(self, method, request_uri, headers, content, cnonce = None):
"""Modify the request headers"""
H = lambda x: _md5(x).hexdigest()
KD = lambda s, d: H("%s:%s" % (s, d))
A2 = "".join([method, ":", request_uri])
self.challenge['cnonce'] = cnonce or _cnonce()
request_digest = '"%s"' % KD(H(self.A1), "%s:%s:%s:%s:%s" % (self.challenge['nonce'],
'%08x' % self.challenge['nc'],
self.challenge['cnonce'],
self.challenge['qop'], H(A2)
))
headers['authorization'] = 'Digest username="%s", realm="%s", nonce="%s", uri="%s", algorithm=%s, response=%s, qop=%s, nc=%08x, cnonce="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['nonce'],
request_uri,
self.challenge['algorithm'],
request_digest,
self.challenge['qop'],
self.challenge['nc'],
self.challenge['cnonce'],
)
self.challenge['nc'] += 1
def response(self, response, content):
if not response.has_key('authentication-info'):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('digest', {})
if 'true' == challenge.get('stale'):
self.challenge['nonce'] = challenge['nonce']
self.challenge['nc'] = 1
return True
else:
updated_challenge = _parse_www_authenticate(response, 'authentication-info').get('digest', {})
if updated_challenge.has_key('nextnonce'):
self.challenge['nonce'] = updated_challenge['nextnonce']
self.challenge['nc'] = 1
return False
class HmacDigestAuthentication(Authentication):
"""Adapted from Robert Sayre's code and DigestAuthentication above."""
__author__ = "Thomas Broyer ([email protected])"
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
self.challenge = challenge['hmacdigest']
# TODO: self.challenge['domain']
self.challenge['reason'] = self.challenge.get('reason', 'unauthorized')
if self.challenge['reason'] not in ['unauthorized', 'integrity']:
self.challenge['reason'] = 'unauthorized'
self.challenge['salt'] = self.challenge.get('salt', '')
if not self.challenge.get('snonce'):
raise UnimplementedHmacDigestAuthOptionError( _("The challenge doesn't contain a server nonce, or this one is empty."))
self.challenge['algorithm'] = self.challenge.get('algorithm', 'HMAC-SHA-1')
if self.challenge['algorithm'] not in ['HMAC-SHA-1', 'HMAC-MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for algorithm: %s." % self.challenge['algorithm']))
self.challenge['pw-algorithm'] = self.challenge.get('pw-algorithm', 'SHA-1')
if self.challenge['pw-algorithm'] not in ['SHA-1', 'MD5']:
raise UnimplementedHmacDigestAuthOptionError( _("Unsupported value for pw-algorithm: %s." % self.challenge['pw-algorithm']))
if self.challenge['algorithm'] == 'HMAC-MD5':
self.hashmod = _md5
else:
self.hashmod = _sha
if self.challenge['pw-algorithm'] == 'MD5':
self.pwhashmod = _md5
else:
self.pwhashmod = _sha
self.key = "".join([self.credentials[0], ":",
self.pwhashmod.new("".join([self.credentials[1], self.challenge['salt']])).hexdigest().lower(),
":", self.challenge['realm']
])
self.key = self.pwhashmod.new(self.key).hexdigest().lower()
def request(self, method, request_uri, headers, content):
"""Modify the request headers"""
keys = _get_end2end_headers(headers)
keylist = "".join(["%s " % k for k in keys])
headers_val = "".join([headers[k] for k in keys])
created = time.strftime('%Y-%m-%dT%H:%M:%SZ',time.gmtime())
cnonce = _cnonce()
request_digest = "%s:%s:%s:%s:%s" % (method, request_uri, cnonce, self.challenge['snonce'], headers_val)
request_digest = hmac.new(self.key, request_digest, self.hashmod).hexdigest().lower()
headers['authorization'] = 'HMACDigest username="%s", realm="%s", snonce="%s", cnonce="%s", uri="%s", created="%s", response="%s", headers="%s"' % (
self.credentials[0],
self.challenge['realm'],
self.challenge['snonce'],
cnonce,
request_uri,
created,
request_digest,
keylist,
)
def response(self, response, content):
challenge = _parse_www_authenticate(response, 'www-authenticate').get('hmacdigest', {})
if challenge.get('reason') in ['integrity', 'stale']:
return True
return False
class WsseAuthentication(Authentication):
"""This is thinly tested and should not be relied upon.
At this time there isn't any third party server to test against.
Blogger and TypePad implemented this algorithm at one point
but Blogger has since switched to Basic over HTTPS and
TypePad has implemented it wrong, by never issuing a 401
challenge but instead requiring your client to telepathically know that
their endpoint is expecting WSSE profile="UsernameToken"."""
def __init__(self, credentials, host, request_uri, headers, response, content, http):
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'WSSE profile="UsernameToken"'
iso_now = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime())
cnonce = _cnonce()
password_digest = _wsse_username_token(cnonce, iso_now, self.credentials[1])
headers['X-WSSE'] = 'UsernameToken Username="%s", PasswordDigest="%s", Nonce="%s", Created="%s"' % (
self.credentials[0],
password_digest,
cnonce,
iso_now)
class GoogleLoginAuthentication(Authentication):
def __init__(self, credentials, host, request_uri, headers, response, content, http):
from urllib import urlencode
Authentication.__init__(self, credentials, host, request_uri, headers, response, content, http)
challenge = _parse_www_authenticate(response, 'www-authenticate')
service = challenge['googlelogin'].get('service', 'xapi')
# Bloggger actually returns the service in the challenge
# For the rest we guess based on the URI
if service == 'xapi' and request_uri.find("calendar") > 0:
service = "cl"
# No point in guessing Base or Spreadsheet
#elif request_uri.find("spreadsheets") > 0:
# service = "wise"
auth = dict(Email=credentials[0], Passwd=credentials[1], service=service, source=headers['user-agent'])
resp, content = self.http.request("https://www.google.com/accounts/ClientLogin", method="POST", body=urlencode(auth), headers={'Content-Type': 'application/x-www-form-urlencoded'})
lines = content.split('\n')
d = dict([tuple(line.split("=", 1)) for line in lines if line])
if resp.status == 403:
self.Auth = ""
else:
self.Auth = d['Auth']
def request(self, method, request_uri, headers, content):
"""Modify the request headers to add the appropriate
Authorization header."""
headers['authorization'] = 'GoogleLogin Auth=' + self.Auth
AUTH_SCHEME_CLASSES = {
"basic": BasicAuthentication,
"wsse": WsseAuthentication,
"digest": DigestAuthentication,
"hmacdigest": HmacDigestAuthentication,
"googlelogin": GoogleLoginAuthentication
}
AUTH_SCHEME_ORDER = ["hmacdigest", "googlelogin", "digest", "wsse", "basic"]
class FileCache(object):
"""Uses a local directory as a store for cached files.
Not really safe to use if multiple threads or processes are going to
be running on the same cache.
"""
def __init__(self, cache, safe=safename): # use safe=lambda x: md5.new(x).hexdigest() for the old behavior
self.cache = cache
self.safe = safe
if not os.path.exists(cache):
os.makedirs(self.cache)
def get(self, key):
retval = None
cacheFullPath = os.path.join(self.cache, self.safe(key))
try:
f = file(cacheFullPath, "rb")
retval = f.read()
f.close()
except IOError:
pass
return retval
def set(self, key, value):
cacheFullPath = os.path.join(self.cache, self.safe(key))
f = file(cacheFullPath, "wb")
f.write(value)
f.close()
def delete(self, key):
cacheFullPath = os.path.join(self.cache, self.safe(key))
if os.path.exists(cacheFullPath):
os.remove(cacheFullPath)
class Credentials(object):
def __init__(self):
self.credentials = []
def add(self, name, password, domain=""):
self.credentials.append((domain.lower(), name, password))
def clear(self):
self.credentials = []
def iter(self, domain):
for (cdomain, name, password) in self.credentials:
if cdomain == "" or domain == cdomain:
yield (name, password)
class KeyCerts(Credentials):
"""Identical to Credentials except that
name/password are mapped to key/cert."""
pass
class ProxyInfo(object):
"""Collect information required to use a proxy."""
def __init__(self, proxy_type, proxy_host, proxy_port, proxy_rdns=None, proxy_user=None, proxy_pass=None):
"""The parameter proxy_type must be set to one of socks.PROXY_TYPE_XXX
constants. For example:
p = ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, proxy_host='localhost', proxy_port=8000)
"""
self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns, self.proxy_user, self.proxy_pass = proxy_type, proxy_host, proxy_port, proxy_rdns, proxy_user, proxy_pass
def astuple(self):
return (self.proxy_type, self.proxy_host, self.proxy_port, self.proxy_rdns,
self.proxy_user, self.proxy_pass)
def isgood(self):
return (self.proxy_host != None) and (self.proxy_port != None)
class HTTPConnectionWithTimeout(httplib.HTTPConnection):
"""
HTTPConnection subclass that supports timeouts
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, strict=None, timeout=None, proxy_info=None):
httplib.HTTPConnection.__init__(self, host, port, strict)
self.timeout = timeout
self.proxy_info = proxy_info
def connect(self):
"""Connect to the host and port specified in __init__."""
# Mostly verbatim from httplib.py.
if self.proxy_info and socks is None:
raise ProxiesUnavailableError(
'Proxy support missing but proxy use was requested!')
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
if self.proxy_info and self.proxy_info.isgood():
self.sock = socks.socksocket(af, socktype, proto)
self.sock.setproxy(*self.proxy_info.astuple())
else:
self.sock = socket.socket(af, socktype, proto)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Different from httplib: support timeouts.
if has_timeout(self.timeout):
self.sock.settimeout(self.timeout)
# End of difference from httplib.
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
self.sock.connect(sa)
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
class HTTPSConnectionWithTimeout(httplib.HTTPSConnection):
"""
This class allows communication via SSL.
All timeouts are in seconds. If None is passed for timeout then
Python's default timeout for sockets will be used. See for example
the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
httplib.HTTPSConnection.__init__(self, host, port=port, key_file=key_file,
cert_file=cert_file, strict=strict)
self.timeout = timeout
self.proxy_info = proxy_info
if ca_certs is None:
ca_certs = CA_CERTS
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# The following two methods were adapted from https_wrapper.py, released
# with the Google Appengine SDK at
# http://googleappengine.googlecode.com/svn-history/r136/trunk/python/google/appengine/tools/https_wrapper.py
# under the following license:
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def _GetValidHostsForCert(self, cert):
"""Returns a list of valid host globs for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
Returns:
list: A list of valid host globs.
"""
if 'subjectAltName' in cert:
return [x[1] for x in cert['subjectAltName']
if x[0].lower() == 'dns']
else:
return [x[0][1] for x in cert['subject']
if x[0][0].lower() == 'commonname']
def _ValidateCertificateHostname(self, cert, hostname):
"""Validates that a given hostname is valid for an SSL certificate.
Args:
cert: A dictionary representing an SSL certificate.
hostname: The hostname to test.
Returns:
bool: Whether or not the hostname is valid for this certificate.
"""
hosts = self._GetValidHostsForCert(cert)
for host in hosts:
host_re = host.replace('.', '\.').replace('*', '[^.]*')
if re.search('^%s$' % (host_re,), hostname, re.I):
return True
return False
def connect(self):
"Connect to a host on a given (SSL) port."
msg = "getaddrinfo returns an empty list"
for family, socktype, proto, canonname, sockaddr in socket.getaddrinfo(
self.host, self.port, 0, socket.SOCK_STREAM):
try:
if self.proxy_info and self.proxy_info.isgood():
sock = socks.socksocket(family, socktype, proto)
sock.setproxy(*self.proxy_info.astuple())
else:
sock = socket.socket(family, socktype, proto)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if has_timeout(self.timeout):
sock.settimeout(self.timeout)
sock.connect((self.host, self.port))
self.sock =_ssl_wrap_socket(
sock, self.key_file, self.cert_file,
self.disable_ssl_certificate_validation, self.ca_certs)
if self.debuglevel > 0:
print "connect: (%s, %s)" % (self.host, self.port)
if not self.disable_ssl_certificate_validation:
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not self._ValidateCertificateHostname(cert, hostname):
raise CertificateHostnameMismatch(
'Server presented certificate that does not match '
'host %s: %s' % (hostname, cert), hostname, cert)
except ssl_SSLError, e:
if sock:
sock.close()
if self.sock:
self.sock.close()
self.sock = None
# Unfortunately the ssl module doesn't seem to provide any way
# to get at more detailed error information, in particular
# whether the error is due to certificate validation or
# something else (such as SSL protocol mismatch).
if e.errno == ssl.SSL_ERROR_SSL:
raise SSLHandshakeError(e)
else:
raise
except (socket.timeout, socket.gaierror):
raise
except socket.error, msg:
if self.debuglevel > 0:
print 'connect fail:', (self.host, self.port)
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
SCHEME_TO_CONNECTION = {
'http': HTTPConnectionWithTimeout,
'https': HTTPSConnectionWithTimeout
}
# Use a different connection object for Google App Engine
try:
from google.appengine.api.urlfetch import fetch
from google.appengine.api.urlfetch import InvalidURLError
from google.appengine.api.urlfetch import DownloadError
from google.appengine.api.urlfetch import ResponseTooLargeError
from google.appengine.api.urlfetch import SSLCertificateError
class ResponseDict(dict):
"""Is a dictionary that also has a read() method, so
that it can pass itself off as an httlib.HTTPResponse()."""
def read(self):
pass
class AppEngineHttpConnection(object):
"""Emulates an httplib.HTTPConnection object, but actually uses the Google
App Engine urlfetch library. This allows the timeout to be properly used on
Google App Engine, and avoids using httplib, which on Google App Engine is
just another wrapper around urlfetch.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None, ca_certs=None,
disable_certificate_validation=False):
self.host = host
self.port = port
self.timeout = timeout
if key_file or cert_file or proxy_info or ca_certs:
raise NotSupportedOnThisPlatform()
self.response = None
self.scheme = 'http'
self.validate_certificate = not disable_certificate_validation
self.sock = True
def request(self, method, url, body, headers):
# Calculate the absolute URI, which fetch requires
netloc = self.host
if self.port:
netloc = '%s:%s' % (self.host, self.port)
absolute_uri = '%s://%s%s' % (self.scheme, netloc, url)
try:
response = fetch(absolute_uri, payload=body, method=method,
headers=headers, allow_truncated=False, follow_redirects=False,
deadline=self.timeout,
validate_certificate=self.validate_certificate)
self.response = ResponseDict(response.headers)
self.response['status'] = response.status_code
setattr(self.response, 'read', lambda : response.content)
# Make sure the exceptions raised match the exceptions expected.
except InvalidURLError:
raise socket.gaierror('')
except (DownloadError, ResponseTooLargeError, SSLCertificateError):
raise httplib.HTTPException()
def getresponse(self):
return self.response
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
class AppEngineHttpsConnection(AppEngineHttpConnection):
"""Same as AppEngineHttpConnection, but for HTTPS URIs."""
def __init__(self, host, port=None, key_file=None, cert_file=None,
strict=None, timeout=None, proxy_info=None):
AppEngineHttpConnection.__init__(self, host, port, key_file, cert_file,
strict, timeout, proxy_info)
self.scheme = 'https'
# Update the connection classes to use the Googel App Engine specific ones.
SCHEME_TO_CONNECTION = {
'http': AppEngineHttpConnection,
'https': AppEngineHttpsConnection
}
except ImportError:
pass
class Http(object):
"""An HTTP client that handles:
- all methods
- caching
- ETags
- compression,
- HTTPS
- Basic
- Digest
- WSSE
and more.
"""
def __init__(self, cache=None, timeout=None, proxy_info=None,
ca_certs=None, disable_ssl_certificate_validation=False):
"""
The value of proxy_info is a ProxyInfo instance.
If 'cache' is a string then it is used as a directory name for
a disk cache. Otherwise it must be an object that supports the
same interface as FileCache.
All timeouts are in seconds. If None is passed for timeout
then Python's default timeout for sockets will be used. See
for example the docs of socket.setdefaulttimeout():
http://docs.python.org/library/socket.html#socket.setdefaulttimeout
ca_certs is the path of a file containing root CA certificates for SSL
server certificate validation. By default, a CA cert file bundled with
httplib2 is used.
If disable_ssl_certificate_validation is true, SSL cert validation will
not be performed.
"""
self.proxy_info = proxy_info
self.ca_certs = ca_certs
self.disable_ssl_certificate_validation = \
disable_ssl_certificate_validation
# Map domain name to an httplib connection
self.connections = {}
# The location of the cache, for now a directory
# where cached responses are held.
if cache and isinstance(cache, basestring):
self.cache = FileCache(cache)
else:
self.cache = cache
# Name/password
self.credentials = Credentials()
# Key/cert
self.certificates = KeyCerts()
# authorization objects
self.authorizations = []
# If set to False then no redirects are followed, even safe ones.
self.follow_redirects = True
# Which HTTP methods do we apply optimistic concurrency to, i.e.
# which methods get an "if-match:" etag header added to them.
self.optimistic_concurrency_methods = ["PUT", "PATCH"]
# If 'follow_redirects' is True, and this is set to True then
# all redirecs are followed, including unsafe ones.
self.follow_all_redirects = False
self.ignore_etag = False
self.force_exception_to_status_code = False
self.timeout = timeout
def _auth_from_challenge(self, host, request_uri, headers, response, content):
"""A generator that creates Authorization objects
that can be applied to requests.
"""
challenges = _parse_www_authenticate(response, 'www-authenticate')
for cred in self.credentials.iter(host):
for scheme in AUTH_SCHEME_ORDER:
if challenges.has_key(scheme):
yield AUTH_SCHEME_CLASSES[scheme](cred, host, request_uri, headers, response, content, self)
def add_credentials(self, name, password, domain=""):
"""Add a name and password that will be used
any time a request requires authentication."""
self.credentials.add(name, password, domain)
def add_certificate(self, key, cert, domain):
"""Add a key and cert that will be used
any time a request requires authentication."""
self.certificates.add(key, cert, domain)
def clear_credentials(self):
"""Remove all the names and passwords
that are used for authentication"""
self.credentials.clear()
self.authorizations = []
def _conn_request(self, conn, request_uri, method, body, headers):
for i in range(2):
try:
if conn.sock is None:
conn.connect()
conn.request(method, request_uri, body, headers)
except socket.timeout:
raise
except socket.gaierror:
conn.close()
raise ServerNotFoundError("Unable to find the server at %s" % conn.host)
except ssl_SSLError:
conn.close()
raise
except socket.error, e:
err = 0
if hasattr(e, 'args'):
err = getattr(e, 'args')[0]
else:
err = e.errno
if err == errno.ECONNREFUSED: # Connection refused
raise
except httplib.HTTPException:
# Just because the server closed the connection doesn't apparently mean
# that the server didn't send a response.
if conn.sock is None:
if i == 0:
conn.close()
conn.connect()
continue
else:
conn.close()
raise
if i == 0:
conn.close()
conn.connect()
continue
pass
try:
response = conn.getresponse()
except (socket.error, httplib.HTTPException):
if i == 0:
conn.close()
conn.connect()
continue
else:
raise
else:
content = ""
if method == "HEAD":
response.close()
else:
content = response.read()
response = Response(response)
if method != "HEAD":
content = _decompressContent(response, content)
break
return (response, content)
def _request(self, conn, host, absolute_uri, request_uri, method, body, headers, redirections, cachekey):
"""Do the actual request using the connection object
and also follow one level of redirects if necessary"""
auths = [(auth.depth(request_uri), auth) for auth in self.authorizations if auth.inscope(host, request_uri)]
auth = auths and sorted(auths)[0][1] or None
if auth:
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers)
if auth:
if auth.response(response, body):
auth.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers )
response._stale_digest = 1
if response.status == 401:
for authorization in self._auth_from_challenge(host, request_uri, headers, response, content):
authorization.request(method, request_uri, headers, body)
(response, content) = self._conn_request(conn, request_uri, method, body, headers, )
if response.status != 401:
self.authorizations.append(authorization)
authorization.response(response, body)
break
if (self.follow_all_redirects or (method in ["GET", "HEAD"]) or response.status == 303):
if self.follow_redirects and response.status in [300, 301, 302, 303, 307]:
# Pick out the location header and basically start from the beginning
# remembering first to strip the ETag header and decrement our 'depth'
if redirections:
if not response.has_key('location') and response.status != 300:
raise RedirectMissingLocation( _("Redirected but the response is missing a Location: header."), response, content)
# Fix-up relative redirects (which violate an RFC 2616 MUST)
if response.has_key('location'):
location = response['location']
(scheme, authority, path, query, fragment) = parse_uri(location)
if authority == None:
response['location'] = urlparse.urljoin(absolute_uri, location)
if response.status == 301 and method in ["GET", "HEAD"]:
response['-x-permanent-redirect-url'] = response['location']
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
if headers.has_key('if-none-match'):
del headers['if-none-match']
if headers.has_key('if-modified-since'):
del headers['if-modified-since']
if response.has_key('location'):
location = response['location']
old_response = copy.deepcopy(response)
if not old_response.has_key('content-location'):
old_response['content-location'] = absolute_uri
redirect_method = method
if response.status in [302, 303]:
redirect_method = "GET"
body = None
(response, content) = self.request(location, redirect_method, body=body, headers = headers, redirections = redirections - 1)
response.previous = old_response
else:
raise RedirectLimit("Redirected more times than rediection_limit allows.", response, content)
elif response.status in [200, 203] and method in ["GET", "HEAD"]:
# Don't cache 206's since we aren't going to handle byte range requests
if not response.has_key('content-location'):
response['content-location'] = absolute_uri
_updateCache(headers, response, content, self.cache, cachekey)
return (response, content)
def _normalize_headers(self, headers):
return _normalize_headers(headers)
# Need to catch and rebrand some exceptions
# Then need to optionally turn all exceptions into status codes
# including all socket.* and httplib.* exceptions.
def request(self, uri, method="GET", body=None, headers=None, redirections=DEFAULT_MAX_REDIRECTS, connection_type=None):
""" Performs a single HTTP request.
The 'uri' is the URI of the HTTP resource and can begin
with either 'http' or 'https'. The value of 'uri' must be an absolute URI.
The 'method' is the HTTP method to perform, such as GET, POST, DELETE, etc.
There is no restriction on the methods allowed.
The 'body' is the entity body to be sent with the request. It is a string
object.
Any extra headers that are to be sent with the request should be provided in the
'headers' dictionary.
The maximum number of redirect to follow before raising an
exception is 'redirections. The default is 5.
The return value is a tuple of (response, content), the first
being and instance of the 'Response' class, the second being
a string that contains the response entity body.
"""
try:
if headers is None:
headers = {}
else:
headers = self._normalize_headers(headers)
if not headers.has_key('user-agent'):
headers['user-agent'] = "Python-httplib2/%s (gzip)" % __version__
uri = iri2uri(uri)
(scheme, authority, request_uri, defrag_uri) = urlnorm(uri)
domain_port = authority.split(":")[0:2]
if len(domain_port) == 2 and domain_port[1] == '443' and scheme == 'http':
scheme = 'https'
authority = domain_port[0]
conn_key = scheme+":"+authority
if conn_key in self.connections:
conn = self.connections[conn_key]
else:
if not connection_type:
connection_type = SCHEME_TO_CONNECTION[scheme]
certs = list(self.certificates.iter(authority))
if issubclass(connection_type, HTTPSConnectionWithTimeout):
if certs:
conn = self.connections[conn_key] = connection_type(
authority, key_file=certs[0][0],
cert_file=certs[0][1], timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info,
ca_certs=self.ca_certs,
disable_ssl_certificate_validation=
self.disable_ssl_certificate_validation)
else:
conn = self.connections[conn_key] = connection_type(
authority, timeout=self.timeout,
proxy_info=self.proxy_info)
conn.set_debuglevel(debuglevel)
if 'range' not in headers and 'accept-encoding' not in headers:
headers['accept-encoding'] = 'gzip, deflate'
info = email.Message.Message()
cached_value = None
if self.cache:
cachekey = defrag_uri
cached_value = self.cache.get(cachekey)
if cached_value:
# info = email.message_from_string(cached_value)
#
# Need to replace the line above with the kludge below
# to fix the non-existent bug not fixed in this
# bug report: http://mail.python.org/pipermail/python-bugs-list/2005-September/030289.html
try:
info, content = cached_value.split('\r\n\r\n', 1)
feedparser = email.FeedParser.FeedParser()
feedparser.feed(info)
info = feedparser.close()
feedparser._parse = None
except IndexError:
self.cache.delete(cachekey)
cachekey = None
cached_value = None
else:
cachekey = None
if method in self.optimistic_concurrency_methods and self.cache and info.has_key('etag') and not self.ignore_etag and 'if-match' not in headers:
# http://www.w3.org/1999/04/Editing/
headers['if-match'] = info['etag']
if method not in ["GET", "HEAD"] and self.cache and cachekey:
# RFC 2616 Section 13.10
self.cache.delete(cachekey)
# Check the vary header in the cache to see if this request
# matches what varies in the cache.
if method in ['GET', 'HEAD'] and 'vary' in info:
vary = info['vary']
vary_headers = vary.lower().replace(' ', '').split(',')
for header in vary_headers:
key = '-varied-%s' % header
value = info[key]
if headers.get(header, None) != value:
cached_value = None
break
if cached_value and method in ["GET", "HEAD"] and self.cache and 'range' not in headers:
if info.has_key('-x-permanent-redirect-url'):
# Should cached permanent redirects be counted in our redirection count? For now, yes.
if redirections <= 0:
raise RedirectLimit("Redirected more times than rediection_limit allows.", {}, "")
(response, new_content) = self.request(info['-x-permanent-redirect-url'], "GET", headers = headers, redirections = redirections - 1)
response.previous = Response(info)
response.previous.fromcache = True
else:
# Determine our course of action:
# Is the cached entry fresh or stale?
# Has the client requested a non-cached response?
#
# There seems to be three possible answers:
# 1. [FRESH] Return the cache entry w/o doing a GET
# 2. [STALE] Do the GET (but add in cache validators if available)
# 3. [TRANSPARENT] Do a GET w/o any cache validators (Cache-Control: no-cache) on the request
entry_disposition = _entry_disposition(info, headers)
if entry_disposition == "FRESH":
if not cached_value:
info['status'] = '504'
content = ""
response = Response(info)
if cached_value:
response.fromcache = True
return (response, content)
if entry_disposition == "STALE":
if info.has_key('etag') and not self.ignore_etag and not 'if-none-match' in headers:
headers['if-none-match'] = info['etag']
if info.has_key('last-modified') and not 'last-modified' in headers:
headers['if-modified-since'] = info['last-modified']
elif entry_disposition == "TRANSPARENT":
pass
(response, new_content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
if response.status == 304 and method == "GET":
# Rewrite the cache entry with the new end-to-end headers
# Take all headers that are in response
# and overwrite their values in info.
# unless they are hop-by-hop, or are listed in the connection header.
for key in _get_end2end_headers(response):
info[key] = response[key]
merged_response = Response(info)
if hasattr(response, "_stale_digest"):
merged_response._stale_digest = response._stale_digest
_updateCache(headers, merged_response, content, self.cache, cachekey)
response = merged_response
response.status = 200
response.fromcache = True
elif response.status == 200:
content = new_content
else:
self.cache.delete(cachekey)
content = new_content
else:
cc = _parse_cache_control(headers)
if cc.has_key('only-if-cached'):
info['status'] = '504'
response = Response(info)
content = ""
else:
(response, content) = self._request(conn, authority, uri, request_uri, method, body, headers, redirections, cachekey)
except Exception, e:
if self.force_exception_to_status_code:
if isinstance(e, HttpLib2ErrorWithResponse):
response = e.response
content = e.content
response.status = 500
response.reason = str(e)
elif isinstance(e, socket.timeout):
content = "Request Timeout"
response = Response( {
"content-type": "text/plain",
"status": "408",
"content-length": len(content)
})
response.reason = "Request Timeout"
else:
content = str(e)
response = Response( {
"content-type": "text/plain",
"status": "400",
"content-length": len(content)
})
response.reason = "Bad Request"
else:
raise
return (response, content)
class Response(dict):
"""An object more like email.Message than httplib.HTTPResponse."""
"""Is this response from our local cache"""
fromcache = False
"""HTTP protocol version used by server. 10 for HTTP/1.0, 11 for HTTP/1.1. """
version = 11
"Status code returned by server. "
status = 200
"""Reason phrase returned by server."""
reason = "Ok"
previous = None
def __init__(self, info):
# info is either an email.Message or
# an httplib.HTTPResponse object.
if isinstance(info, httplib.HTTPResponse):
for key, value in info.getheaders():
self[key.lower()] = value
self.status = info.status
self['status'] = str(self.status)
self.reason = info.reason
self.version = info.version
elif isinstance(info, email.Message.Message):
for key, value in info.items():
self[key] = value
self.status = int(self['status'])
else:
for key, value in info.iteritems():
self[key] = value
self.status = int(self.get('status', self.status))
def __getattr__(self, name):
if name == 'dict':
return self
else:
raise AttributeError, name
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/httplib2/__init__.py
|
__init__.py
|
Minor modifications made by Christopher Gilbert (http://motomastyle.com/)
for use in PyLoris (http://pyloris.sourceforge.net/)
Minor modifications made by Mario Vilas (http://breakingcode.wordpress.com/)
mainly to merge bug fixes found in Sourceforge
"""
import socket
import struct
import sys
if getattr(socket, 'socket', None) is None:
raise ImportError('socket.socket missing, proxy support unusable')
PROXY_TYPE_SOCKS4 = 1
PROXY_TYPE_SOCKS5 = 2
PROXY_TYPE_HTTP = 3
_defaultproxy = None
_orgsocket = socket.socket
class ProxyError(Exception): pass
class GeneralProxyError(ProxyError): pass
class Socks5AuthError(ProxyError): pass
class Socks5Error(ProxyError): pass
class Socks4Error(ProxyError): pass
class HTTPError(ProxyError): pass
_generalerrors = ("success",
"invalid data",
"not connected",
"not available",
"bad proxy type",
"bad input")
_socks5errors = ("succeeded",
"general SOCKS server failure",
"connection not allowed by ruleset",
"Network unreachable",
"Host unreachable",
"Connection refused",
"TTL expired",
"Command not supported",
"Address type not supported",
"Unknown error")
_socks5autherrors = ("succeeded",
"authentication is required",
"all offered authentication methods were rejected",
"unknown username or invalid password",
"unknown error")
_socks4errors = ("request granted",
"request rejected or failed",
"request rejected because SOCKS server cannot connect to identd on the client",
"request rejected because the client program and identd report different user-ids",
"unknown error")
def setdefaultproxy(proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setdefaultproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets a default proxy which all further socksocket objects will use,
unless explicitly changed.
"""
global _defaultproxy
_defaultproxy = (proxytype, addr, port, rdns, username, password)
def wrapmodule(module):
"""wrapmodule(module)
Attempts to replace a module's socket library with a SOCKS socket. Must set
a default proxy using setdefaultproxy(...) first.
This will only work on modules that import socket directly into the namespace;
most of the Python Standard Library falls into this category.
"""
if _defaultproxy != None:
module.socket.socket = socksocket
else:
raise GeneralProxyError((4, "no proxy specified"))
class socksocket(socket.socket):
"""socksocket([family[, type[, proto]]]) -> socket object
Open a SOCKS enabled socket. The parameters are the same as
those of the standard socket init. In order for SOCKS to work,
you must specify family=AF_INET, type=SOCK_STREAM and proto=0.
"""
def __init__(self, family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0, _sock=None):
_orgsocket.__init__(self, family, type, proto, _sock)
if _defaultproxy != None:
self.__proxy = _defaultproxy
else:
self.__proxy = (None, None, None, None, None, None)
self.__proxysockname = None
self.__proxypeername = None
def __recvall(self, count):
"""__recvall(count) -> data
Receive EXACTLY the number of bytes requested from the socket.
Blocks until the required number of bytes have been received.
"""
data = self.recv(count)
while len(data) < count:
d = self.recv(count-len(data))
if not d: raise GeneralProxyError((0, "connection closed unexpectedly"))
data = data + d
return data
def setproxy(self, proxytype=None, addr=None, port=None, rdns=True, username=None, password=None):
"""setproxy(proxytype, addr[, port[, rdns[, username[, password]]]])
Sets the proxy to be used.
proxytype - The type of the proxy to be used. Three types
are supported: PROXY_TYPE_SOCKS4 (including socks4a),
PROXY_TYPE_SOCKS5 and PROXY_TYPE_HTTP
addr - The address of the server (IP or DNS).
port - The port of the server. Defaults to 1080 for SOCKS
servers and 8080 for HTTP proxy servers.
rdns - Should DNS queries be preformed on the remote side
(rather than the local side). The default is True.
Note: This has no effect with SOCKS4 servers.
username - Username to authenticate with to the server.
The default is no authentication.
password - Password to authenticate with to the server.
Only relevant when username is also provided.
"""
self.__proxy = (proxytype, addr, port, rdns, username, password)
def __negotiatesocks5(self, destaddr, destport):
"""__negotiatesocks5(self,destaddr,destport)
Negotiates a connection through a SOCKS5 server.
"""
# First we'll send the authentication packages we support.
if (self.__proxy[4]!=None) and (self.__proxy[5]!=None):
# The username/password details were supplied to the
# setproxy method so we support the USERNAME/PASSWORD
# authentication (in addition to the standard none).
self.sendall(struct.pack('BBBB', 0x05, 0x02, 0x00, 0x02))
else:
# No username/password were entered, therefore we
# only support connections with no authentication.
self.sendall(struct.pack('BBB', 0x05, 0x01, 0x00))
# We'll receive the server's response to determine which
# method was selected
chosenauth = self.__recvall(2)
if chosenauth[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
# Check the chosen authentication method
if chosenauth[1:2] == chr(0x00).encode():
# No authentication is required
pass
elif chosenauth[1:2] == chr(0x02).encode():
# Okay, we need to perform a basic username/password
# authentication.
self.sendall(chr(0x01).encode() + chr(len(self.__proxy[4])) + self.__proxy[4] + chr(len(self.__proxy[5])) + self.__proxy[5])
authstat = self.__recvall(2)
if authstat[0:1] != chr(0x01).encode():
# Bad response
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if authstat[1:2] != chr(0x00).encode():
# Authentication failed
self.close()
raise Socks5AuthError((3, _socks5autherrors[3]))
# Authentication succeeded
else:
# Reaching here is always bad
self.close()
if chosenauth[1] == chr(0xFF).encode():
raise Socks5AuthError((2, _socks5autherrors[2]))
else:
raise GeneralProxyError((1, _generalerrors[1]))
# Now we can request the actual connection
req = struct.pack('BBB', 0x05, 0x01, 0x00)
# If the given destination address is an IP address, we'll
# use the IPv4 address request even if remote resolving was specified.
try:
ipaddr = socket.inet_aton(destaddr)
req = req + chr(0x01).encode() + ipaddr
except socket.error:
# Well it's not an IP number, so it's probably a DNS name.
if self.__proxy[3]:
# Resolve remotely
ipaddr = None
req = req + chr(0x03).encode() + chr(len(destaddr)).encode() + destaddr
else:
# Resolve locally
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
req = req + chr(0x01).encode() + ipaddr
req = req + struct.pack(">H", destport)
self.sendall(req)
# Get the response
resp = self.__recvall(4)
if resp[0:1] != chr(0x05).encode():
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
elif resp[1:2] != chr(0x00).encode():
# Connection failed
self.close()
if ord(resp[1:2])<=8:
raise Socks5Error((ord(resp[1:2]), _socks5errors[ord(resp[1:2])]))
else:
raise Socks5Error((9, _socks5errors[9]))
# Get the bound address/port
elif resp[3:4] == chr(0x01).encode():
boundaddr = self.__recvall(4)
elif resp[3:4] == chr(0x03).encode():
resp = resp + self.recv(1)
boundaddr = self.__recvall(ord(resp[4:5]))
else:
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
boundport = struct.unpack(">H", self.__recvall(2))[0]
self.__proxysockname = (boundaddr, boundport)
if ipaddr != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def getproxysockname(self):
"""getsockname() -> address info
Returns the bound IP address and port number at the proxy.
"""
return self.__proxysockname
def getproxypeername(self):
"""getproxypeername() -> address info
Returns the IP and port number of the proxy.
"""
return _orgsocket.getpeername(self)
def getpeername(self):
"""getpeername() -> address info
Returns the IP address and port number of the destination
machine (note: getproxypeername returns the proxy)
"""
return self.__proxypeername
def __negotiatesocks4(self,destaddr,destport):
"""__negotiatesocks4(self,destaddr,destport)
Negotiates a connection through a SOCKS4 server.
"""
# Check if the destination address provided is an IP address
rmtrslv = False
try:
ipaddr = socket.inet_aton(destaddr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if self.__proxy[3]:
ipaddr = struct.pack("BBBB", 0x00, 0x00, 0x00, 0x01)
rmtrslv = True
else:
ipaddr = socket.inet_aton(socket.gethostbyname(destaddr))
# Construct the request packet
req = struct.pack(">BBH", 0x04, 0x01, destport) + ipaddr
# The username parameter is considered userid for SOCKS4
if self.__proxy[4] != None:
req = req + self.__proxy[4]
req = req + chr(0x00).encode()
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if rmtrslv:
req = req + destaddr + chr(0x00).encode()
self.sendall(req)
# Get the response from the server
resp = self.__recvall(8)
if resp[0:1] != chr(0x00).encode():
# Bad data
self.close()
raise GeneralProxyError((1,_generalerrors[1]))
if resp[1:2] != chr(0x5A).encode():
# Server returned an error
self.close()
if ord(resp[1:2]) in (91, 92, 93):
self.close()
raise Socks4Error((ord(resp[1:2]), _socks4errors[ord(resp[1:2]) - 90]))
else:
raise Socks4Error((94, _socks4errors[4]))
# Get the bound address/port
self.__proxysockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if rmtrslv != None:
self.__proxypeername = (socket.inet_ntoa(ipaddr), destport)
else:
self.__proxypeername = (destaddr, destport)
def __negotiatehttp(self, destaddr, destport):
"""__negotiatehttp(self,destaddr,destport)
Negotiates a connection through an HTTP server.
"""
# If we need to resolve locally, we do this now
if not self.__proxy[3]:
addr = socket.gethostbyname(destaddr)
else:
addr = destaddr
self.sendall(("CONNECT " + addr + ":" + str(destport) + " HTTP/1.1\r\n" + "Host: " + destaddr + "\r\n\r\n").encode())
# We read the response until we get the string "\r\n\r\n"
resp = self.recv(1)
while resp.find("\r\n\r\n".encode()) == -1:
resp = resp + self.recv(1)
# We just need the first line to check if the connection
# was successful
statusline = resp.splitlines()[0].split(" ".encode(), 2)
if statusline[0] not in ("HTTP/1.0".encode(), "HTTP/1.1".encode()):
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
try:
statuscode = int(statusline[1])
except ValueError:
self.close()
raise GeneralProxyError((1, _generalerrors[1]))
if statuscode != 200:
self.close()
raise HTTPError((statuscode, statusline[2]))
self.__proxysockname = ("0.0.0.0", 0)
self.__proxypeername = (addr, destport)
def connect(self, destpair):
"""connect(self, despair)
Connects to the specified destination through a proxy.
destpar - A tuple of the IP/DNS address and the port number.
(identical to socket's connect).
To select the proxy server use setproxy().
"""
# Do a minimal input check first
if (not type(destpair) in (list,tuple)) or (len(destpair) < 2) or (type(destpair[0]) != type('')) or (type(destpair[1]) != int):
raise GeneralProxyError((5, _generalerrors[5]))
if self.__proxy[0] == PROXY_TYPE_SOCKS5:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self, (self.__proxy[1], portnum))
self.__negotiatesocks5(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_SOCKS4:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 1080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatesocks4(destpair[0], destpair[1])
elif self.__proxy[0] == PROXY_TYPE_HTTP:
if self.__proxy[2] != None:
portnum = self.__proxy[2]
else:
portnum = 8080
_orgsocket.connect(self,(self.__proxy[1], portnum))
self.__negotiatehttp(destpair[0], destpair[1])
elif self.__proxy[0] == None:
_orgsocket.connect(self, (destpair[0], destpair[1]))
else:
raise GeneralProxyError((4, _generalerrors[4]))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/httplib2/socks.py
|
socks.py
|
import string, gettext
_ = gettext.gettext
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
EOF = None
E = {
"null-character":
_(u"Null character in input stream, replaced with U+FFFD."),
"invalid-character":
_(u"Invalid codepoint in stream."),
"incorrectly-placed-solidus":
_(u"Solidus (/) incorrectly placed in tag."),
"incorrect-cr-newline-entity":
_(u"Incorrect CR newline entity, replaced with LF."),
"illegal-windows-1252-entity":
_(u"Entity used with illegal number (windows-1252 reference)."),
"cant-convert-numeric-entity":
_(u"Numeric entity couldn't be converted to character "
u"(codepoint U+%(charAsInt)08x)."),
"illegal-codepoint-for-numeric-entity":
_(u"Numeric entity represents an illegal codepoint: "
u"U+%(charAsInt)08x."),
"numeric-entity-without-semicolon":
_(u"Numeric entity didn't end with ';'."),
"expected-numeric-entity-but-got-eof":
_(u"Numeric entity expected. Got end of file instead."),
"expected-numeric-entity":
_(u"Numeric entity expected but none found."),
"named-entity-without-semicolon":
_(u"Named entity didn't end with ';'."),
"expected-named-entity":
_(u"Named entity expected. Got none."),
"attributes-in-end-tag":
_(u"End tag contains unexpected attributes."),
"expected-tag-name-but-got-right-bracket":
_(u"Expected tag name. Got '>' instead."),
"expected-tag-name-but-got-question-mark":
_(u"Expected tag name. Got '?' instead. (HTML doesn't "
u"support processing instructions.)"),
"expected-tag-name":
_(u"Expected tag name. Got something else instead"),
"expected-closing-tag-but-got-right-bracket":
_(u"Expected closing tag. Got '>' instead. Ignoring '</>'."),
"expected-closing-tag-but-got-eof":
_(u"Expected closing tag. Unexpected end of file."),
"expected-closing-tag-but-got-char":
_(u"Expected closing tag. Unexpected character '%(data)s' found."),
"eof-in-tag-name":
_(u"Unexpected end of file in the tag name."),
"expected-attribute-name-but-got-eof":
_(u"Unexpected end of file. Expected attribute name instead."),
"eof-in-attribute-name":
_(u"Unexpected end of file in attribute name."),
"invalid-character-in-attribute-name":
_(u"Invalid chracter in attribute name"),
"duplicate-attribute":
_(u"Dropped duplicate attribute on tag."),
"expected-end-of-tag-name-but-got-eof":
_(u"Unexpected end of file. Expected = or end of tag."),
"expected-attribute-value-but-got-eof":
_(u"Unexpected end of file. Expected attribute value."),
"expected-attribute-value-but-got-right-bracket":
_(u"Expected attribute value. Got '>' instead."),
"eof-in-attribute-value-double-quote":
_(u"Unexpected end of file in attribute value (\")."),
"eof-in-attribute-value-single-quote":
_(u"Unexpected end of file in attribute value (')."),
"eof-in-attribute-value-no-quotes":
_(u"Unexpected end of file in attribute value."),
"unexpected-EOF-after-solidus-in-tag":
_(u"Unexpected end of file in tag. Expected >"),
"unexpected-character-after-soldius-in-tag":
_(u"Unexpected character after / in tag. Expected >"),
"expected-dashes-or-doctype":
_(u"Expected '--' or 'DOCTYPE'. Not found."),
"incorrect-comment":
_(u"Incorrect comment."),
"eof-in-comment":
_(u"Unexpected end of file in comment."),
"eof-in-comment-end-dash":
_(u"Unexpected end of file in comment (-)"),
"unexpected-dash-after-double-dash-in-comment":
_(u"Unexpected '-' after '--' found in comment."),
"eof-in-comment-double-dash":
_(u"Unexpected end of file in comment (--)."),
"unexpected-char-in-comment":
_(u"Unexpected character in comment found."),
"need-space-after-doctype":
_(u"No space after literal string 'DOCTYPE'."),
"expected-doctype-name-but-got-right-bracket":
_(u"Unexpected > character. Expected DOCTYPE name."),
"expected-doctype-name-but-got-eof":
_(u"Unexpected end of file. Expected DOCTYPE name."),
"eof-in-doctype-name":
_(u"Unexpected end of file in DOCTYPE name."),
"eof-in-doctype":
_(u"Unexpected end of file in DOCTYPE."),
"expected-space-or-right-bracket-in-doctype":
_(u"Expected space or '>'. Got '%(data)s'"),
"unexpected-end-of-doctype":
_(u"Unexpected end of DOCTYPE."),
"unexpected-char-in-doctype":
_(u"Unexpected character in DOCTYPE."),
"eof-in-innerhtml":
_(u"XXX innerHTML EOF"),
"unexpected-doctype":
_(u"Unexpected DOCTYPE. Ignored."),
"non-html-root":
_(u"html needs to be the first start tag."),
"expected-doctype-but-got-eof":
_(u"Unexpected End of file. Expected DOCTYPE."),
"unknown-doctype":
_(u"Erroneous DOCTYPE."),
"expected-doctype-but-got-chars":
_(u"Unexpected non-space characters. Expected DOCTYPE."),
"expected-doctype-but-got-start-tag":
_(u"Unexpected start tag (%(name)s). Expected DOCTYPE."),
"expected-doctype-but-got-end-tag":
_(u"Unexpected end tag (%(name)s). Expected DOCTYPE."),
"end-tag-after-implied-root":
_(u"Unexpected end tag (%(name)s) after the (implied) root element."),
"expected-named-closing-tag-but-got-eof":
_(u"Unexpected end of file. Expected end tag (%(name)s)."),
"two-heads-are-not-better-than-one":
_(u"Unexpected start tag head in existing head. Ignored."),
"unexpected-end-tag":
_(u"Unexpected end tag (%(name)s). Ignored."),
"unexpected-start-tag-out-of-my-head":
_(u"Unexpected start tag (%(name)s) that can be in head. Moved."),
"unexpected-start-tag":
_(u"Unexpected start tag (%(name)s)."),
"missing-end-tag":
_(u"Missing end tag (%(name)s)."),
"missing-end-tags":
_(u"Missing end tags (%(name)s)."),
"unexpected-start-tag-implies-end-tag":
_(u"Unexpected start tag (%(startName)s) "
u"implies end tag (%(endName)s)."),
"unexpected-start-tag-treated-as":
_(u"Unexpected start tag (%(originalName)s). Treated as %(newName)s."),
"deprecated-tag":
_(u"Unexpected start tag %(name)s. Don't use it!"),
"unexpected-start-tag-ignored":
_(u"Unexpected start tag %(name)s. Ignored."),
"expected-one-end-tag-but-got-another":
_(u"Unexpected end tag (%(gotName)s). "
u"Missing end tag (%(expectedName)s)."),
"end-tag-too-early":
_(u"End tag (%(name)s) seen too early. Expected other end tag."),
"end-tag-too-early-named":
_(u"Unexpected end tag (%(gotName)s). Expected end tag (%(expectedName)s)."),
"end-tag-too-early-ignored":
_(u"End tag (%(name)s) seen too early. Ignored."),
"adoption-agency-1.1":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 1 of the adoption agency algorithm."),
"adoption-agency-1.2":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 2 of the adoption agency algorithm."),
"adoption-agency-1.3":
_(u"End tag (%(name)s) violates step 1, "
u"paragraph 3 of the adoption agency algorithm."),
"unexpected-end-tag-treated-as":
_(u"Unexpected end tag (%(originalName)s). Treated as %(newName)s."),
"no-end-tag":
_(u"This element (%(name)s) has no end tag."),
"unexpected-implied-end-tag-in-table":
_(u"Unexpected implied end tag (%(name)s) in the table phase."),
"unexpected-implied-end-tag-in-table-body":
_(u"Unexpected implied end tag (%(name)s) in the table body phase."),
"unexpected-char-implies-table-voodoo":
_(u"Unexpected non-space characters in "
u"table context caused voodoo mode."),
"unexpected-hidden-input-in-table":
_(u"Unexpected input with type hidden in table context."),
"unexpected-form-in-table":
_(u"Unexpected form in table context."),
"unexpected-start-tag-implies-table-voodoo":
_(u"Unexpected start tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-end-tag-implies-table-voodoo":
_(u"Unexpected end tag (%(name)s) in "
u"table context caused voodoo mode."),
"unexpected-cell-in-table-body":
_(u"Unexpected table cell start tag (%(name)s) "
u"in the table body phase."),
"unexpected-cell-end-tag":
_(u"Got table cell end tag (%(name)s) "
u"while required end tags are missing."),
"unexpected-end-tag-in-table-body":
_(u"Unexpected end tag (%(name)s) in the table body phase. Ignored."),
"unexpected-implied-end-tag-in-table-row":
_(u"Unexpected implied end tag (%(name)s) in the table row phase."),
"unexpected-end-tag-in-table-row":
_(u"Unexpected end tag (%(name)s) in the table row phase. Ignored."),
"unexpected-select-in-select":
_(u"Unexpected select start tag in the select phase "
u"treated as select end tag."),
"unexpected-input-in-select":
_(u"Unexpected input start tag in the select phase."),
"unexpected-start-tag-in-select":
_(u"Unexpected start tag token (%(name)s in the select phase. "
u"Ignored."),
"unexpected-end-tag-in-select":
_(u"Unexpected end tag (%(name)s) in the select phase. Ignored."),
"unexpected-table-element-start-tag-in-select-in-table":
_(u"Unexpected table element start tag (%(name)s) in the select in table phase."),
"unexpected-table-element-end-tag-in-select-in-table":
_(u"Unexpected table element end tag (%(name)s) in the select in table phase."),
"unexpected-char-after-body":
_(u"Unexpected non-space characters in the after body phase."),
"unexpected-start-tag-after-body":
_(u"Unexpected start tag token (%(name)s)"
u" in the after body phase."),
"unexpected-end-tag-after-body":
_(u"Unexpected end tag token (%(name)s)"
u" in the after body phase."),
"unexpected-char-in-frameset":
_(u"Unepxected characters in the frameset phase. Characters ignored."),
"unexpected-start-tag-in-frameset":
_(u"Unexpected start tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-frameset-in-frameset-innerhtml":
_(u"Unexpected end tag token (frameset) "
u"in the frameset phase (innerHTML)."),
"unexpected-end-tag-in-frameset":
_(u"Unexpected end tag token (%(name)s)"
u" in the frameset phase. Ignored."),
"unexpected-char-after-frameset":
_(u"Unexpected non-space characters in the "
u"after frameset phase. Ignored."),
"unexpected-start-tag-after-frameset":
_(u"Unexpected start tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-frameset":
_(u"Unexpected end tag (%(name)s)"
u" in the after frameset phase. Ignored."),
"unexpected-end-tag-after-body-innerhtml":
_(u"Unexpected end tag after body(innerHtml)"),
"expected-eof-but-got-char":
_(u"Unexpected non-space characters. Expected end of file."),
"expected-eof-but-got-start-tag":
_(u"Unexpected start tag (%(name)s)"
u". Expected end of file."),
"expected-eof-but-got-end-tag":
_(u"Unexpected end tag (%(name)s)"
u". Expected end of file."),
"eof-in-table":
_(u"Unexpected end of file. Expected table content."),
"eof-in-select":
_(u"Unexpected end of file. Expected select content."),
"eof-in-frameset":
_(u"Unexpected end of file. Expected frameset content."),
"eof-in-script-in-script":
_(u"Unexpected end of file. Expected script content."),
"non-void-element-with-trailing-solidus":
_(u"Trailing solidus not allowed on element %(name)s"),
"unexpected-html-element-in-foreign-content":
_(u"Element %(name)s not allowed in a non-html context"),
"unexpected-end-tag-before-html":
_(u"Unexpected end tag (%(name)s) before html."),
"XXX-undefined-error":
(u"Undefined error (this sucks and should be fixed)"),
}
namespaces = {
"html":"http://www.w3.org/1999/xhtml",
"mathml":"http://www.w3.org/1998/Math/MathML",
"svg":"http://www.w3.org/2000/svg",
"xlink":"http://www.w3.org/1999/xlink",
"xml":"http://www.w3.org/XML/1998/namespace",
"xmlns":"http://www.w3.org/2000/xmlns/"
}
scopingElements = frozenset((
(namespaces["html"], "applet"),
(namespaces["html"], "button"),
(namespaces["html"], "caption"),
(namespaces["html"], "html"),
(namespaces["html"], "marquee"),
(namespaces["html"], "object"),
(namespaces["html"], "table"),
(namespaces["html"], "td"),
(namespaces["html"], "th"),
(namespaces["svg"], "foreignObject")
))
formattingElements = frozenset((
(namespaces["html"], "a"),
(namespaces["html"], "b"),
(namespaces["html"], "big"),
(namespaces["html"], "code"),
(namespaces["html"], "em"),
(namespaces["html"], "font"),
(namespaces["html"], "i"),
(namespaces["html"], "nobr"),
(namespaces["html"], "s"),
(namespaces["html"], "small"),
(namespaces["html"], "strike"),
(namespaces["html"], "strong"),
(namespaces["html"], "tt"),
(namespaces["html"], "u")
))
specialElements = frozenset((
(namespaces["html"], "address"),
(namespaces["html"], "area"),
(namespaces["html"], "article"),
(namespaces["html"], "aside"),
(namespaces["html"], "base"),
(namespaces["html"], "basefont"),
(namespaces["html"], "bgsound"),
(namespaces["html"], "blockquote"),
(namespaces["html"], "body"),
(namespaces["html"], "br"),
(namespaces["html"], "center"),
(namespaces["html"], "col"),
(namespaces["html"], "colgroup"),
(namespaces["html"], "command"),
(namespaces["html"], "datagrid"),
(namespaces["html"], "dd"),
(namespaces["html"], "details"),
(namespaces["html"], "dialog"),
(namespaces["html"], "dir"),
(namespaces["html"], "div"),
(namespaces["html"], "dl"),
(namespaces["html"], "dt"),
(namespaces["html"], "embed"),
(namespaces["html"], "event-source"),
(namespaces["html"], "fieldset"),
(namespaces["html"], "figure"),
(namespaces["html"], "footer"),
(namespaces["html"], "form"),
(namespaces["html"], "frame"),
(namespaces["html"], "frameset"),
(namespaces["html"], "h1"),
(namespaces["html"], "h2"),
(namespaces["html"], "h3"),
(namespaces["html"], "h4"),
(namespaces["html"], "h5"),
(namespaces["html"], "h6"),
(namespaces["html"], "head"),
(namespaces["html"], "header"),
(namespaces["html"], "hr"),
(namespaces["html"], "iframe"),
# Note that image is commented out in the spec as "this isn't an
# element that can end up on the stack, so it doesn't matter,"
(namespaces["html"], "image"),
(namespaces["html"], "img"),
(namespaces["html"], "input"),
(namespaces["html"], "isindex"),
(namespaces["html"], "li"),
(namespaces["html"], "link"),
(namespaces["html"], "listing"),
(namespaces["html"], "menu"),
(namespaces["html"], "meta"),
(namespaces["html"], "nav"),
(namespaces["html"], "noembed"),
(namespaces["html"], "noframes"),
(namespaces["html"], "noscript"),
(namespaces["html"], "ol"),
(namespaces["html"], "optgroup"),
(namespaces["html"], "option"),
(namespaces["html"], "p"),
(namespaces["html"], "param"),
(namespaces["html"], "plaintext"),
(namespaces["html"], "pre"),
(namespaces["html"], "script"),
(namespaces["html"], "section"),
(namespaces["html"], "select"),
(namespaces["html"], "spacer"),
(namespaces["html"], "style"),
(namespaces["html"], "tbody"),
(namespaces["html"], "textarea"),
(namespaces["html"], "tfoot"),
(namespaces["html"], "thead"),
(namespaces["html"], "title"),
(namespaces["html"], "tr"),
(namespaces["html"], "ul"),
(namespaces["html"], "wbr")
))
spaceCharacters = frozenset((
u"\t",
u"\n",
u"\u000C",
u" ",
u"\r"
))
tableInsertModeElements = frozenset((
"table",
"tbody",
"tfoot",
"thead",
"tr"
))
asciiLowercase = frozenset(string.ascii_lowercase)
asciiUppercase = frozenset(string.ascii_uppercase)
asciiLetters = frozenset(string.ascii_letters)
digits = frozenset(string.digits)
hexDigits = frozenset(string.hexdigits)
asciiUpper2Lower = dict([(ord(c),ord(c.lower()))
for c in string.ascii_uppercase])
# Heading elements need to be ordered
headingElements = (
"h1",
"h2",
"h3",
"h4",
"h5",
"h6"
)
voidElements = frozenset((
"base",
"command",
"event-source",
"link",
"meta",
"hr",
"br",
"img",
"embed",
"param",
"area",
"col",
"input",
"source"
))
cdataElements = frozenset(('title', 'textarea'))
rcdataElements = frozenset((
'style',
'script',
'xmp',
'iframe',
'noembed',
'noframes',
'noscript'
))
booleanAttributes = {
"": frozenset(("irrelevant",)),
"style": frozenset(("scoped",)),
"img": frozenset(("ismap",)),
"audio": frozenset(("autoplay","controls")),
"video": frozenset(("autoplay","controls")),
"script": frozenset(("defer", "async")),
"details": frozenset(("open",)),
"datagrid": frozenset(("multiple", "disabled")),
"command": frozenset(("hidden", "disabled", "checked", "default")),
"menu": frozenset(("autosubmit",)),
"fieldset": frozenset(("disabled", "readonly")),
"option": frozenset(("disabled", "readonly", "selected")),
"optgroup": frozenset(("disabled", "readonly")),
"button": frozenset(("disabled", "autofocus")),
"input": frozenset(("disabled", "readonly", "required", "autofocus", "checked", "ismap")),
"select": frozenset(("disabled", "readonly", "autofocus", "multiple")),
"output": frozenset(("disabled", "readonly")),
}
# entitiesWindows1252 has to be _ordered_ and needs to have an index. It
# therefore can't be a frozenset.
entitiesWindows1252 = (
8364, # 0x80 0x20AC EURO SIGN
65533, # 0x81 UNDEFINED
8218, # 0x82 0x201A SINGLE LOW-9 QUOTATION MARK
402, # 0x83 0x0192 LATIN SMALL LETTER F WITH HOOK
8222, # 0x84 0x201E DOUBLE LOW-9 QUOTATION MARK
8230, # 0x85 0x2026 HORIZONTAL ELLIPSIS
8224, # 0x86 0x2020 DAGGER
8225, # 0x87 0x2021 DOUBLE DAGGER
710, # 0x88 0x02C6 MODIFIER LETTER CIRCUMFLEX ACCENT
8240, # 0x89 0x2030 PER MILLE SIGN
352, # 0x8A 0x0160 LATIN CAPITAL LETTER S WITH CARON
8249, # 0x8B 0x2039 SINGLE LEFT-POINTING ANGLE QUOTATION MARK
338, # 0x8C 0x0152 LATIN CAPITAL LIGATURE OE
65533, # 0x8D UNDEFINED
381, # 0x8E 0x017D LATIN CAPITAL LETTER Z WITH CARON
65533, # 0x8F UNDEFINED
65533, # 0x90 UNDEFINED
8216, # 0x91 0x2018 LEFT SINGLE QUOTATION MARK
8217, # 0x92 0x2019 RIGHT SINGLE QUOTATION MARK
8220, # 0x93 0x201C LEFT DOUBLE QUOTATION MARK
8221, # 0x94 0x201D RIGHT DOUBLE QUOTATION MARK
8226, # 0x95 0x2022 BULLET
8211, # 0x96 0x2013 EN DASH
8212, # 0x97 0x2014 EM DASH
732, # 0x98 0x02DC SMALL TILDE
8482, # 0x99 0x2122 TRADE MARK SIGN
353, # 0x9A 0x0161 LATIN SMALL LETTER S WITH CARON
8250, # 0x9B 0x203A SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
339, # 0x9C 0x0153 LATIN SMALL LIGATURE OE
65533, # 0x9D UNDEFINED
382, # 0x9E 0x017E LATIN SMALL LETTER Z WITH CARON
376 # 0x9F 0x0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
)
xmlEntities = frozenset(('lt;', 'gt;', 'amp;', 'apos;', 'quot;'))
entities = {
"AElig;": u"\u00C6",
"AElig": u"\u00C6",
"AMP;": u"\u0026",
"AMP": u"\u0026",
"Aacute;": u"\u00C1",
"Aacute": u"\u00C1",
"Acirc;": u"\u00C2",
"Acirc": u"\u00C2",
"Agrave;": u"\u00C0",
"Agrave": u"\u00C0",
"Alpha;": u"\u0391",
"Aring;": u"\u00C5",
"Aring": u"\u00C5",
"Atilde;": u"\u00C3",
"Atilde": u"\u00C3",
"Auml;": u"\u00C4",
"Auml": u"\u00C4",
"Beta;": u"\u0392",
"COPY;": u"\u00A9",
"COPY": u"\u00A9",
"Ccedil;": u"\u00C7",
"Ccedil": u"\u00C7",
"Chi;": u"\u03A7",
"Dagger;": u"\u2021",
"Delta;": u"\u0394",
"ETH;": u"\u00D0",
"ETH": u"\u00D0",
"Eacute;": u"\u00C9",
"Eacute": u"\u00C9",
"Ecirc;": u"\u00CA",
"Ecirc": u"\u00CA",
"Egrave;": u"\u00C8",
"Egrave": u"\u00C8",
"Epsilon;": u"\u0395",
"Eta;": u"\u0397",
"Euml;": u"\u00CB",
"Euml": u"\u00CB",
"GT;": u"\u003E",
"GT": u"\u003E",
"Gamma;": u"\u0393",
"Iacute;": u"\u00CD",
"Iacute": u"\u00CD",
"Icirc;": u"\u00CE",
"Icirc": u"\u00CE",
"Igrave;": u"\u00CC",
"Igrave": u"\u00CC",
"Iota;": u"\u0399",
"Iuml;": u"\u00CF",
"Iuml": u"\u00CF",
"Kappa;": u"\u039A",
"LT;": u"\u003C",
"LT": u"\u003C",
"Lambda;": u"\u039B",
"Mu;": u"\u039C",
"Ntilde;": u"\u00D1",
"Ntilde": u"\u00D1",
"Nu;": u"\u039D",
"OElig;": u"\u0152",
"Oacute;": u"\u00D3",
"Oacute": u"\u00D3",
"Ocirc;": u"\u00D4",
"Ocirc": u"\u00D4",
"Ograve;": u"\u00D2",
"Ograve": u"\u00D2",
"Omega;": u"\u03A9",
"Omicron;": u"\u039F",
"Oslash;": u"\u00D8",
"Oslash": u"\u00D8",
"Otilde;": u"\u00D5",
"Otilde": u"\u00D5",
"Ouml;": u"\u00D6",
"Ouml": u"\u00D6",
"Phi;": u"\u03A6",
"Pi;": u"\u03A0",
"Prime;": u"\u2033",
"Psi;": u"\u03A8",
"QUOT;": u"\u0022",
"QUOT": u"\u0022",
"REG;": u"\u00AE",
"REG": u"\u00AE",
"Rho;": u"\u03A1",
"Scaron;": u"\u0160",
"Sigma;": u"\u03A3",
"THORN;": u"\u00DE",
"THORN": u"\u00DE",
"TRADE;": u"\u2122",
"Tau;": u"\u03A4",
"Theta;": u"\u0398",
"Uacute;": u"\u00DA",
"Uacute": u"\u00DA",
"Ucirc;": u"\u00DB",
"Ucirc": u"\u00DB",
"Ugrave;": u"\u00D9",
"Ugrave": u"\u00D9",
"Upsilon;": u"\u03A5",
"Uuml;": u"\u00DC",
"Uuml": u"\u00DC",
"Xi;": u"\u039E",
"Yacute;": u"\u00DD",
"Yacute": u"\u00DD",
"Yuml;": u"\u0178",
"Zeta;": u"\u0396",
"aacute;": u"\u00E1",
"aacute": u"\u00E1",
"acirc;": u"\u00E2",
"acirc": u"\u00E2",
"acute;": u"\u00B4",
"acute": u"\u00B4",
"aelig;": u"\u00E6",
"aelig": u"\u00E6",
"agrave;": u"\u00E0",
"agrave": u"\u00E0",
"alefsym;": u"\u2135",
"alpha;": u"\u03B1",
"amp;": u"\u0026",
"amp": u"\u0026",
"and;": u"\u2227",
"ang;": u"\u2220",
"apos;": u"\u0027",
"aring;": u"\u00E5",
"aring": u"\u00E5",
"asymp;": u"\u2248",
"atilde;": u"\u00E3",
"atilde": u"\u00E3",
"auml;": u"\u00E4",
"auml": u"\u00E4",
"bdquo;": u"\u201E",
"beta;": u"\u03B2",
"brvbar;": u"\u00A6",
"brvbar": u"\u00A6",
"bull;": u"\u2022",
"cap;": u"\u2229",
"ccedil;": u"\u00E7",
"ccedil": u"\u00E7",
"cedil;": u"\u00B8",
"cedil": u"\u00B8",
"cent;": u"\u00A2",
"cent": u"\u00A2",
"chi;": u"\u03C7",
"circ;": u"\u02C6",
"clubs;": u"\u2663",
"cong;": u"\u2245",
"copy;": u"\u00A9",
"copy": u"\u00A9",
"crarr;": u"\u21B5",
"cup;": u"\u222A",
"curren;": u"\u00A4",
"curren": u"\u00A4",
"dArr;": u"\u21D3",
"dagger;": u"\u2020",
"darr;": u"\u2193",
"deg;": u"\u00B0",
"deg": u"\u00B0",
"delta;": u"\u03B4",
"diams;": u"\u2666",
"divide;": u"\u00F7",
"divide": u"\u00F7",
"eacute;": u"\u00E9",
"eacute": u"\u00E9",
"ecirc;": u"\u00EA",
"ecirc": u"\u00EA",
"egrave;": u"\u00E8",
"egrave": u"\u00E8",
"empty;": u"\u2205",
"emsp;": u"\u2003",
"ensp;": u"\u2002",
"epsilon;": u"\u03B5",
"equiv;": u"\u2261",
"eta;": u"\u03B7",
"eth;": u"\u00F0",
"eth": u"\u00F0",
"euml;": u"\u00EB",
"euml": u"\u00EB",
"euro;": u"\u20AC",
"exist;": u"\u2203",
"fnof;": u"\u0192",
"forall;": u"\u2200",
"frac12;": u"\u00BD",
"frac12": u"\u00BD",
"frac14;": u"\u00BC",
"frac14": u"\u00BC",
"frac34;": u"\u00BE",
"frac34": u"\u00BE",
"frasl;": u"\u2044",
"gamma;": u"\u03B3",
"ge;": u"\u2265",
"gt;": u"\u003E",
"gt": u"\u003E",
"hArr;": u"\u21D4",
"harr;": u"\u2194",
"hearts;": u"\u2665",
"hellip;": u"\u2026",
"iacute;": u"\u00ED",
"iacute": u"\u00ED",
"icirc;": u"\u00EE",
"icirc": u"\u00EE",
"iexcl;": u"\u00A1",
"iexcl": u"\u00A1",
"igrave;": u"\u00EC",
"igrave": u"\u00EC",
"image;": u"\u2111",
"infin;": u"\u221E",
"int;": u"\u222B",
"iota;": u"\u03B9",
"iquest;": u"\u00BF",
"iquest": u"\u00BF",
"isin;": u"\u2208",
"iuml;": u"\u00EF",
"iuml": u"\u00EF",
"kappa;": u"\u03BA",
"lArr;": u"\u21D0",
"lambda;": u"\u03BB",
"lang;": u"\u27E8",
"laquo;": u"\u00AB",
"laquo": u"\u00AB",
"larr;": u"\u2190",
"lceil;": u"\u2308",
"ldquo;": u"\u201C",
"le;": u"\u2264",
"lfloor;": u"\u230A",
"lowast;": u"\u2217",
"loz;": u"\u25CA",
"lrm;": u"\u200E",
"lsaquo;": u"\u2039",
"lsquo;": u"\u2018",
"lt;": u"\u003C",
"lt": u"\u003C",
"macr;": u"\u00AF",
"macr": u"\u00AF",
"mdash;": u"\u2014",
"micro;": u"\u00B5",
"micro": u"\u00B5",
"middot;": u"\u00B7",
"middot": u"\u00B7",
"minus;": u"\u2212",
"mu;": u"\u03BC",
"nabla;": u"\u2207",
"nbsp;": u"\u00A0",
"nbsp": u"\u00A0",
"ndash;": u"\u2013",
"ne;": u"\u2260",
"ni;": u"\u220B",
"not;": u"\u00AC",
"not": u"\u00AC",
"notin;": u"\u2209",
"nsub;": u"\u2284",
"ntilde;": u"\u00F1",
"ntilde": u"\u00F1",
"nu;": u"\u03BD",
"oacute;": u"\u00F3",
"oacute": u"\u00F3",
"ocirc;": u"\u00F4",
"ocirc": u"\u00F4",
"oelig;": u"\u0153",
"ograve;": u"\u00F2",
"ograve": u"\u00F2",
"oline;": u"\u203E",
"omega;": u"\u03C9",
"omicron;": u"\u03BF",
"oplus;": u"\u2295",
"or;": u"\u2228",
"ordf;": u"\u00AA",
"ordf": u"\u00AA",
"ordm;": u"\u00BA",
"ordm": u"\u00BA",
"oslash;": u"\u00F8",
"oslash": u"\u00F8",
"otilde;": u"\u00F5",
"otilde": u"\u00F5",
"otimes;": u"\u2297",
"ouml;": u"\u00F6",
"ouml": u"\u00F6",
"para;": u"\u00B6",
"para": u"\u00B6",
"part;": u"\u2202",
"permil;": u"\u2030",
"perp;": u"\u22A5",
"phi;": u"\u03C6",
"pi;": u"\u03C0",
"piv;": u"\u03D6",
"plusmn;": u"\u00B1",
"plusmn": u"\u00B1",
"pound;": u"\u00A3",
"pound": u"\u00A3",
"prime;": u"\u2032",
"prod;": u"\u220F",
"prop;": u"\u221D",
"psi;": u"\u03C8",
"quot;": u"\u0022",
"quot": u"\u0022",
"rArr;": u"\u21D2",
"radic;": u"\u221A",
"rang;": u"\u27E9",
"raquo;": u"\u00BB",
"raquo": u"\u00BB",
"rarr;": u"\u2192",
"rceil;": u"\u2309",
"rdquo;": u"\u201D",
"real;": u"\u211C",
"reg;": u"\u00AE",
"reg": u"\u00AE",
"rfloor;": u"\u230B",
"rho;": u"\u03C1",
"rlm;": u"\u200F",
"rsaquo;": u"\u203A",
"rsquo;": u"\u2019",
"sbquo;": u"\u201A",
"scaron;": u"\u0161",
"sdot;": u"\u22C5",
"sect;": u"\u00A7",
"sect": u"\u00A7",
"shy;": u"\u00AD",
"shy": u"\u00AD",
"sigma;": u"\u03C3",
"sigmaf;": u"\u03C2",
"sim;": u"\u223C",
"spades;": u"\u2660",
"sub;": u"\u2282",
"sube;": u"\u2286",
"sum;": u"\u2211",
"sup1;": u"\u00B9",
"sup1": u"\u00B9",
"sup2;": u"\u00B2",
"sup2": u"\u00B2",
"sup3;": u"\u00B3",
"sup3": u"\u00B3",
"sup;": u"\u2283",
"supe;": u"\u2287",
"szlig;": u"\u00DF",
"szlig": u"\u00DF",
"tau;": u"\u03C4",
"there4;": u"\u2234",
"theta;": u"\u03B8",
"thetasym;": u"\u03D1",
"thinsp;": u"\u2009",
"thorn;": u"\u00FE",
"thorn": u"\u00FE",
"tilde;": u"\u02DC",
"times;": u"\u00D7",
"times": u"\u00D7",
"trade;": u"\u2122",
"uArr;": u"\u21D1",
"uacute;": u"\u00FA",
"uacute": u"\u00FA",
"uarr;": u"\u2191",
"ucirc;": u"\u00FB",
"ucirc": u"\u00FB",
"ugrave;": u"\u00F9",
"ugrave": u"\u00F9",
"uml;": u"\u00A8",
"uml": u"\u00A8",
"upsih;": u"\u03D2",
"upsilon;": u"\u03C5",
"uuml;": u"\u00FC",
"uuml": u"\u00FC",
"weierp;": u"\u2118",
"xi;": u"\u03BE",
"yacute;": u"\u00FD",
"yacute": u"\u00FD",
"yen;": u"\u00A5",
"yen": u"\u00A5",
"yuml;": u"\u00FF",
"yuml": u"\u00FF",
"zeta;": u"\u03B6",
"zwj;": u"\u200D",
"zwnj;": u"\u200C"
}
replacementCharacters = {
0x0:u"\uFFFD",
0x0d:u"\u000A",
0x80:u"\u20AC",
0x81:u"\u0081",
0x81:u"\u0081",
0x82:u"\u201A",
0x83:u"\u0192",
0x84:u"\u201E",
0x85:u"\u2026",
0x86:u"\u2020",
0x87:u"\u2021",
0x88:u"\u02C6",
0x89:u"\u2030",
0x8A:u"\u0160",
0x8B:u"\u2039",
0x8C:u"\u0152",
0x8D:u"\u008D",
0x8E:u"\u017D",
0x8F:u"\u008F",
0x90:u"\u0090",
0x91:u"\u2018",
0x92:u"\u2019",
0x93:u"\u201C",
0x94:u"\u201D",
0x95:u"\u2022",
0x96:u"\u2013",
0x97:u"\u2014",
0x98:u"\u02DC",
0x99:u"\u2122",
0x9A:u"\u0161",
0x9B:u"\u203A",
0x9C:u"\u0153",
0x9D:u"\u009D",
0x9E:u"\u017E",
0x9F:u"\u0178",
}
encodings = {
'437': 'cp437',
'850': 'cp850',
'852': 'cp852',
'855': 'cp855',
'857': 'cp857',
'860': 'cp860',
'861': 'cp861',
'862': 'cp862',
'863': 'cp863',
'865': 'cp865',
'866': 'cp866',
'869': 'cp869',
'ansix341968': 'ascii',
'ansix341986': 'ascii',
'arabic': 'iso8859-6',
'ascii': 'ascii',
'asmo708': 'iso8859-6',
'big5': 'big5',
'big5hkscs': 'big5hkscs',
'chinese': 'gbk',
'cp037': 'cp037',
'cp1026': 'cp1026',
'cp154': 'ptcp154',
'cp367': 'ascii',
'cp424': 'cp424',
'cp437': 'cp437',
'cp500': 'cp500',
'cp775': 'cp775',
'cp819': 'windows-1252',
'cp850': 'cp850',
'cp852': 'cp852',
'cp855': 'cp855',
'cp857': 'cp857',
'cp860': 'cp860',
'cp861': 'cp861',
'cp862': 'cp862',
'cp863': 'cp863',
'cp864': 'cp864',
'cp865': 'cp865',
'cp866': 'cp866',
'cp869': 'cp869',
'cp936': 'gbk',
'cpgr': 'cp869',
'cpis': 'cp861',
'csascii': 'ascii',
'csbig5': 'big5',
'cseuckr': 'cp949',
'cseucpkdfmtjapanese': 'euc_jp',
'csgb2312': 'gbk',
'cshproman8': 'hp-roman8',
'csibm037': 'cp037',
'csibm1026': 'cp1026',
'csibm424': 'cp424',
'csibm500': 'cp500',
'csibm855': 'cp855',
'csibm857': 'cp857',
'csibm860': 'cp860',
'csibm861': 'cp861',
'csibm863': 'cp863',
'csibm864': 'cp864',
'csibm865': 'cp865',
'csibm866': 'cp866',
'csibm869': 'cp869',
'csiso2022jp': 'iso2022_jp',
'csiso2022jp2': 'iso2022_jp_2',
'csiso2022kr': 'iso2022_kr',
'csiso58gb231280': 'gbk',
'csisolatin1': 'windows-1252',
'csisolatin2': 'iso8859-2',
'csisolatin3': 'iso8859-3',
'csisolatin4': 'iso8859-4',
'csisolatin5': 'windows-1254',
'csisolatin6': 'iso8859-10',
'csisolatinarabic': 'iso8859-6',
'csisolatincyrillic': 'iso8859-5',
'csisolatingreek': 'iso8859-7',
'csisolatinhebrew': 'iso8859-8',
'cskoi8r': 'koi8-r',
'csksc56011987': 'cp949',
'cspc775baltic': 'cp775',
'cspc850multilingual': 'cp850',
'cspc862latinhebrew': 'cp862',
'cspc8codepage437': 'cp437',
'cspcp852': 'cp852',
'csptcp154': 'ptcp154',
'csshiftjis': 'shift_jis',
'csunicode11utf7': 'utf-7',
'cyrillic': 'iso8859-5',
'cyrillicasian': 'ptcp154',
'ebcdiccpbe': 'cp500',
'ebcdiccpca': 'cp037',
'ebcdiccpch': 'cp500',
'ebcdiccphe': 'cp424',
'ebcdiccpnl': 'cp037',
'ebcdiccpus': 'cp037',
'ebcdiccpwt': 'cp037',
'ecma114': 'iso8859-6',
'ecma118': 'iso8859-7',
'elot928': 'iso8859-7',
'eucjp': 'euc_jp',
'euckr': 'cp949',
'extendedunixcodepackedformatforjapanese': 'euc_jp',
'gb18030': 'gb18030',
'gb2312': 'gbk',
'gb231280': 'gbk',
'gbk': 'gbk',
'greek': 'iso8859-7',
'greek8': 'iso8859-7',
'hebrew': 'iso8859-8',
'hproman8': 'hp-roman8',
'hzgb2312': 'hz',
'ibm037': 'cp037',
'ibm1026': 'cp1026',
'ibm367': 'ascii',
'ibm424': 'cp424',
'ibm437': 'cp437',
'ibm500': 'cp500',
'ibm775': 'cp775',
'ibm819': 'windows-1252',
'ibm850': 'cp850',
'ibm852': 'cp852',
'ibm855': 'cp855',
'ibm857': 'cp857',
'ibm860': 'cp860',
'ibm861': 'cp861',
'ibm862': 'cp862',
'ibm863': 'cp863',
'ibm864': 'cp864',
'ibm865': 'cp865',
'ibm866': 'cp866',
'ibm869': 'cp869',
'iso2022jp': 'iso2022_jp',
'iso2022jp2': 'iso2022_jp_2',
'iso2022kr': 'iso2022_kr',
'iso646irv1991': 'ascii',
'iso646us': 'ascii',
'iso88591': 'windows-1252',
'iso885910': 'iso8859-10',
'iso8859101992': 'iso8859-10',
'iso885911987': 'windows-1252',
'iso885913': 'iso8859-13',
'iso885914': 'iso8859-14',
'iso8859141998': 'iso8859-14',
'iso885915': 'iso8859-15',
'iso885916': 'iso8859-16',
'iso8859162001': 'iso8859-16',
'iso88592': 'iso8859-2',
'iso885921987': 'iso8859-2',
'iso88593': 'iso8859-3',
'iso885931988': 'iso8859-3',
'iso88594': 'iso8859-4',
'iso885941988': 'iso8859-4',
'iso88595': 'iso8859-5',
'iso885951988': 'iso8859-5',
'iso88596': 'iso8859-6',
'iso885961987': 'iso8859-6',
'iso88597': 'iso8859-7',
'iso885971987': 'iso8859-7',
'iso88598': 'iso8859-8',
'iso885981988': 'iso8859-8',
'iso88599': 'windows-1254',
'iso885991989': 'windows-1254',
'isoceltic': 'iso8859-14',
'isoir100': 'windows-1252',
'isoir101': 'iso8859-2',
'isoir109': 'iso8859-3',
'isoir110': 'iso8859-4',
'isoir126': 'iso8859-7',
'isoir127': 'iso8859-6',
'isoir138': 'iso8859-8',
'isoir144': 'iso8859-5',
'isoir148': 'windows-1254',
'isoir149': 'cp949',
'isoir157': 'iso8859-10',
'isoir199': 'iso8859-14',
'isoir226': 'iso8859-16',
'isoir58': 'gbk',
'isoir6': 'ascii',
'koi8r': 'koi8-r',
'koi8u': 'koi8-u',
'korean': 'cp949',
'ksc5601': 'cp949',
'ksc56011987': 'cp949',
'ksc56011989': 'cp949',
'l1': 'windows-1252',
'l10': 'iso8859-16',
'l2': 'iso8859-2',
'l3': 'iso8859-3',
'l4': 'iso8859-4',
'l5': 'windows-1254',
'l6': 'iso8859-10',
'l8': 'iso8859-14',
'latin1': 'windows-1252',
'latin10': 'iso8859-16',
'latin2': 'iso8859-2',
'latin3': 'iso8859-3',
'latin4': 'iso8859-4',
'latin5': 'windows-1254',
'latin6': 'iso8859-10',
'latin8': 'iso8859-14',
'latin9': 'iso8859-15',
'ms936': 'gbk',
'mskanji': 'shift_jis',
'pt154': 'ptcp154',
'ptcp154': 'ptcp154',
'r8': 'hp-roman8',
'roman8': 'hp-roman8',
'shiftjis': 'shift_jis',
'tis620': 'cp874',
'unicode11utf7': 'utf-7',
'us': 'ascii',
'usascii': 'ascii',
'utf16': 'utf-16',
'utf16be': 'utf-16-be',
'utf16le': 'utf-16-le',
'utf8': 'utf-8',
'windows1250': 'cp1250',
'windows1251': 'cp1251',
'windows1252': 'cp1252',
'windows1253': 'cp1253',
'windows1254': 'cp1254',
'windows1255': 'cp1255',
'windows1256': 'cp1256',
'windows1257': 'cp1257',
'windows1258': 'cp1258',
'windows936': 'gbk',
'x-x-big5': 'big5'}
tokenTypes = {
"Doctype":0,
"Characters":1,
"SpaceCharacters":2,
"StartTag":3,
"EndTag":4,
"EmptyTag":5,
"Comment":6,
"ParseError":7
}
tagTokenTypes = frozenset((tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]))
prefixes = dict([(v,k) for k,v in namespaces.iteritems()])
prefixes["http://www.w3.org/1998/Math/MathML"] = "math"
class DataLossWarning(UserWarning):
pass
class ReparseException(Exception):
pass
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/constants.py
|
constants.py
|
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
import utils
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([str(item) for item in spaceCharacters])
asciiLettersBytes = frozenset([str(item) for item in asciiLetters])
asciiUppercaseBytes = frozenset([str(item) for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([">", "<"])
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
#Craziness
if len(u"\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
else:
self.reportCharacterErrors = self.characterErrorsUCS2
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
#Flag to indicate we may have a CR LF broken across a data chunk
self._lastChunkEndsWithCR = False
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
import cStringIO
stream = cStringIO.StringIO(str(source))
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count(u'\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind(u'\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line+1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
if not data:
return False
self.reportCharacterErrors(data)
data = data.replace(u"\u0000", u"\ufffd")
#Check for CR LF broken across chunks
if (self._lastChunkEndsWithCR and data[0] == u"\n"):
data = data[1:]
# Stop if the chunk is now empty
if not data:
return False
self._lastChunkEndsWithCR = data[-1] == u"\r"
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in xrange(data.count(u"\u0000")):
self.errors.append("null-character")
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
#Someone picked the wrong compile option
#You lose
for i in xrange(data.count(u"\u0000")):
self.errors.append("null-character")
skip = False
import sys
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
#Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos+2]):
#We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos+2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
#This is still wrong if it is possible for a surrogate pair to break a
#chunk boundary
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def charsUntilEOF(self):
""" Returns a string of characters from the stream up to EOF."""
rv = []
while True:
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class EncodingBytes(str):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
return str.__new__(self, value.lower())
def __init__(self, value):
self._position=-1
def __iter__(self):
return self
def next(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p]
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p+len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.next()
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
data.previous()
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset("/"))
# Step 2
if c in (">", None):
return None
# Step 3
attrName = []
attrValue = []
#Step 4 attribute name
while True:
if c == "=" and attrName:
break
elif c in spaceCharactersBytes:
#Step 6!
c = data.skip()
c = data.next()
break
elif c in ("/", ">"):
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c == None:
return None
else:
attrName.append(c)
#Step 5
c = data.next()
#Step 7
if c != "=":
data.previous()
return "".join(attrName), ""
#Step 8
data.next()
#Step 9
c = data.skip()
#Step 10
if c in ("'", '"'):
#10.1
quoteChar = c
while True:
#10.2
c = data.next()
#10.3
if c == quoteChar:
data.next()
return "".join(attrName), "".join(attrValue)
#10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
#10.5
else:
attrValue.append(c)
elif c == ">":
return "".join(attrName), ""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = data.next()
if c in spacesAngleBrackets:
return "".join(attrName), "".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/inputstream.py
|
inputstream.py
|
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
from collections import deque
except ImportError:
from utils import deque
from constants import spaceCharacters
from constants import entitiesWindows1252, entities
from constants import asciiLowercase, asciiLetters, asciiUpper2Lower
from constants import digits, hexDigits, EOF
from constants import tokenTypes, tagTokenTypes
from constants import replacementCharacters
from inputstream import HTMLInputStream
# Group entities by their first character, for faster lookups
entitiesByFirstChar = {}
for e in entities:
entitiesByFirstChar.setdefault(e[0], []).append(e)
class HTMLTokenizer:
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
# XXX need to fix documentation
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
#Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = u"\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
#Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = unichr(charAsInt)
except ValueError:
char = eval("u'\\U%08x'" % charAsInt)
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != u";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = u"&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, u"<", u"&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == u"#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in (u"x", u"X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
filteredEntityList = entitiesByFirstChar.get(charStack[0], [])
def entitiesStartingWith(name):
return [e for e in filteredEntityList if e.startswith(name)]
while charStack[-1] is not EOF and\
entitiesStartingWith("".join(charStack)):
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
entityName = None
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
for entityLength in xrange(len(charStack)-1, 1, -1):
possibleEntityName = "".join(charStack[:entityLength])
if possibleEntityName in entities:
entityName = possibleEntityName
break
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if entityName[-1] != ";" and fromAttribute and \
(charStack[entityLength] in asciiLetters
or charStack[entityLength] in digits):
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += u"".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = u"&" + u"".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type":tokenTypes["ParseError"],
"data":"self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil((u"&", u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil((u"&", u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil((u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil((u"<"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntilEOF()})
return True
def tagOpenState(self):
data = self.stream.char()
if data == u"!":
self.state = self.markupDeclarationOpenState
elif data == u"/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == u">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<>"})
self.state = self.dataState
elif data == u"?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing":False}
self.state = self.tagNameState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == u"/":
self.state = self.selfClosingStartTagState
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil((u"<-"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u">"})
self.state = self.scriptDataState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing":False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": u"</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u">"})
self.state = self.scriptDataState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": u"/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"=", u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == u"=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == u">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-name"})
self.state = self.dataState
emitToken = True
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"=":
self.state = self.beforeAttributeValueState
elif data == u">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data in (u"'", u'"', u"<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == u"\"":
self.state = self.attributeValueDoubleQuotedState
elif data == u"&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data);
elif data == u"'":
self.state = self.attributeValueSingleQuotedState
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data in (u"=", u"<", u"`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u'"')
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", u"&"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == u"&":
self.processEntityInAttribute(u"'")
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", u"&"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u"&":
self.processEntityInAttribute(">")
elif data == u">":
self.emitCurrentToken()
elif data in (u'"', u"'", u"=", u"<", u"`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.emitCurrentToken()
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset((u"&", u">", u'"', u"'", u"=", u"<", u"`")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == u">":
self.emitCurrentToken()
elif data == u"/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.emitCurrentToken()
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-soldius-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": self.stream.charsUntil(u">")})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def bogusCommentContinuationState(self):
# Like bogusCommentState, but the caller must create the comment token
# and this state just adds more characters to it
self.currentToken["data"] += self.stream.charsUntil(u">")
self.tokenQueue.append(self.currentToken)
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == u"-":
charStack.append(self.stream.char())
if charStack[-1] == u"-":
self.currentToken = {"type": tokenTypes["Comment"], "data": u""}
self.state = self.commentStartState
return True
elif charStack[-1] in (u'd', u'D'):
matched = True
for expected in ((u'o', u'O'), (u'c', u'C'), (u't', u'T'),
(u'y', u'Y'), (u'p', u'P'), (u'e', u'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": u"",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
# charStack[:-2] consists of 'safe' characters ('-', 'd', 'o', etc)
# so they can be copied directly into the bogus comment data, and only
# the last character might be '>' or EOF and needs to be ungetted
self.stream.unget(charStack.pop())
self.currentToken = {"type": tokenTypes["Comment"],
"data": u"".join(charStack)}
self.state = self.bogusCommentContinuationState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data + self.stream.charsUntil(u"-")
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndDashState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + self.stream.charsUntil(u"-")
return True
def commentEndDashState(self):
data = self.stream.char()
if data == u"-":
self.state = self.commentEndState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"-" + data +\
self.stream.charsUntil(u"-")
# Consume the next character which is either a "-" or an EOF as
# well so if there's a "-" directly after the "-" we go nicely to
# the "comment end state" without emitting a ParseError() there.
self.stream.char()
return True
def commentEndState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data in spaceCharacters:
self.currentToken["data"] += "--" + data
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-space-after-double-dash-in-comment"})
self.state = self.commentEndSpaceState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += u"--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += u"--!" + data
self.state = self.commentState
return True
def commentEndSpaceState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == u"-":
self.state = self.commentEndDashState
elif data in spaceCharacters:
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-space-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == u">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in (u"p", u"P"):
matched = True
for expected in ((u"u", u"U"), (u"b", u"B"), (u"l", u"L"),
(u"i", u"I"), (u"c", u"C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in (u"s", u"S"):
matched = True
for expected in ((u"y", u"Y"), (u"s", u"S"), (u"t", u"T"),
(u"e", u"E"), (u"m", u"M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = u""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = u""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == u">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/tokenizer.py
|
tokenizer.py
|
try:
frozenset
except NameError:
#Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name,value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
#Pure python implementation of deque taken from the ASPN Python Cookbook
#Original code by Raymond Hettinger
class deque(object):
def __init__(self, iterable=(), maxsize=-1):
if not hasattr(self, 'data'):
self.left = self.right = 0
self.data = {}
self.maxsize = maxsize
self.extend(iterable)
def append(self, x):
self.data[self.right] = x
self.right += 1
if self.maxsize != -1 and len(self) > self.maxsize:
self.popleft()
def appendleft(self, x):
self.left -= 1
self.data[self.left] = x
if self.maxsize != -1 and len(self) > self.maxsize:
self.pop()
def pop(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
self.right -= 1
elem = self.data[self.right]
del self.data[self.right]
return elem
def popleft(self):
if self.left == self.right:
raise IndexError('cannot pop from empty deque')
elem = self.data[self.left]
del self.data[self.left]
self.left += 1
return elem
def clear(self):
self.data.clear()
self.left = self.right = 0
def extend(self, iterable):
for elem in iterable:
self.append(elem)
def extendleft(self, iterable):
for elem in iterable:
self.appendleft(elem)
def rotate(self, n=1):
if self:
n %= len(self)
for i in xrange(n):
self.appendleft(self.pop())
def __getitem__(self, i):
if i < 0:
i += len(self)
try:
return self.data[i + self.left]
except KeyError:
raise IndexError
def __setitem__(self, i, value):
if i < 0:
i += len(self)
try:
self.data[i + self.left] = value
except KeyError:
raise IndexError
def __delitem__(self, i):
size = len(self)
if not (-size <= i < size):
raise IndexError
data = self.data
if i < 0:
i += size
for j in xrange(self.left+i, self.right-1):
data[j] = data[j+1]
self.pop()
def __len__(self):
return self.right - self.left
def __cmp__(self, other):
if type(self) != type(other):
return cmp(type(self), type(other))
return cmp(list(self), list(other))
def __repr__(self, _track=[]):
if id(self) in _track:
return '...'
_track.append(id(self))
r = 'deque(%r)' % (list(self),)
_track.remove(id(self))
return r
def __getstate__(self):
return (tuple(self),)
def __setstate__(self, s):
self.__init__(s[0])
def __hash__(self):
raise TypeError
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memo={}):
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
result.__init__(deepcopy(tuple(self), memo))
return result
#Some utility functions to dal with weirdness around UCS2 vs UCS4
#python builds
def encodingType():
if len() == 2:
return "UCS2"
else:
return "UCS4"
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/utils.py
|
utils.py
|
import re
from xml.sax.saxutils import escape, unescape
from tokenizer import HTMLTokenizer
from constants import tokenTypes
class HTMLSanitizerMixin(object):
""" sanitization of XHTML+MathML+SVG and of inline style attributes."""
acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area',
'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button',
'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup',
'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn',
'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset',
'figure', 'footer', 'font', 'form', 'header', 'h1', 'h2', 'h3', 'h4',
'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', 'keygen', 'kbd',
'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', 'multicol',
'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', 'p', 'pre',
'progress', 'q', 's', 'samp', 'section', 'select', 'small', 'sound',
'source', 'spacer', 'span', 'strike', 'strong', 'sub', 'sup', 'table',
'tbody', 'td', 'textarea', 'time', 'tfoot', 'th', 'thead', 'tr', 'tt',
'u', 'ul', 'var', 'video']
mathml_elements = ['maction', 'math', 'merror', 'mfrac', 'mi',
'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', 'mphantom',
'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', 'msub',
'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder',
'munderover', 'none']
svg_elements = ['a', 'animate', 'animateColor', 'animateMotion',
'animateTransform', 'clipPath', 'circle', 'defs', 'desc', 'ellipse',
'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern',
'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph',
'mpath', 'path', 'polygon', 'polyline', 'radialGradient', 'rect',
'set', 'stop', 'svg', 'switch', 'text', 'title', 'tspan', 'use']
acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey',
'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis',
'background', 'balance', 'bgcolor', 'bgproperties', 'border',
'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding',
'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff',
'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color',
'cols', 'colspan', 'compact', 'contenteditable', 'controls', 'coords',
'data', 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default',
'delay', 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end',
'face', 'for', 'form', 'frame', 'galleryimg', 'gutter', 'headers',
'height', 'hidefocus', 'hidden', 'high', 'href', 'hreflang', 'hspace',
'icon', 'id', 'inputmode', 'ismap', 'keytype', 'label', 'leftspacing',
'lang', 'list', 'longdesc', 'loop', 'loopcount', 'loopend',
'loopstart', 'low', 'lowsrc', 'max', 'maxlength', 'media', 'method',
'min', 'multiple', 'name', 'nohref', 'noshade', 'nowrap', 'open',
'optimum', 'pattern', 'ping', 'point-size', 'prompt', 'pqg',
'radiogroup', 'readonly', 'rel', 'repeat-max', 'repeat-min',
'replace', 'required', 'rev', 'rightspacing', 'rows', 'rowspan',
'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', 'start',
'step', 'style', 'summary', 'suppress', 'tabindex', 'target',
'template', 'title', 'toppadding', 'type', 'unselectable', 'usemap',
'urn', 'valign', 'value', 'variable', 'volume', 'vspace', 'vrml',
'width', 'wrap', 'xml:lang']
mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign',
'columnalign', 'columnlines', 'columnspacing', 'columnspan', 'depth',
'display', 'displaystyle', 'equalcolumns', 'equalrows', 'fence',
'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', 'lspace',
'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', 'maxsize',
'minsize', 'other', 'rowalign', 'rowalign', 'rowalign', 'rowlines',
'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection',
'separator', 'stretchy', 'width', 'width', 'xlink:href', 'xlink:show',
'xlink:type', 'xmlns', 'xmlns:xlink']
svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic',
'arabic-form', 'ascent', 'attributeName', 'attributeType',
'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height',
'class', 'clip-path', 'color', 'color-rendering', 'content', 'cx',
'cy', 'd', 'dx', 'dy', 'descent', 'display', 'dur', 'end', 'fill',
'fill-opacity', 'fill-rule', 'font-family', 'font-size',
'font-stretch', 'font-style', 'font-variant', 'font-weight', 'from',
'fx', 'fy', 'g1', 'g2', 'glyph-name', 'gradientUnits', 'hanging',
'height', 'horiz-adv-x', 'horiz-origin-x', 'id', 'ideographic', 'k',
'keyPoints', 'keySplines', 'keyTimes', 'lang', 'marker-end',
'marker-mid', 'marker-start', 'markerHeight', 'markerUnits',
'markerWidth', 'mathematical', 'max', 'min', 'name', 'offset',
'opacity', 'orient', 'origin', 'overline-position',
'overline-thickness', 'panose-1', 'path', 'pathLength', 'points',
'preserveAspectRatio', 'r', 'refX', 'refY', 'repeatCount',
'repeatDur', 'requiredExtensions', 'requiredFeatures', 'restart',
'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', 'stop-color',
'stop-opacity', 'strikethrough-position', 'strikethrough-thickness',
'stroke', 'stroke-dasharray', 'stroke-dashoffset', 'stroke-linecap',
'stroke-linejoin', 'stroke-miterlimit', 'stroke-opacity',
'stroke-width', 'systemLanguage', 'target', 'text-anchor', 'to',
'transform', 'type', 'u1', 'u2', 'underline-position',
'underline-thickness', 'unicode', 'unicode-range', 'units-per-em',
'values', 'version', 'viewBox', 'visibility', 'width', 'widths', 'x',
'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole',
'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type',
'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y',
'y1', 'y2', 'zoomAndPan']
attr_val_is_uri = ['href', 'src', 'cite', 'action', 'longdesc',
'xlink:href', 'xml:base']
svg_attr_val_allows_ref = ['clip-path', 'color-profile', 'cursor', 'fill',
'filter', 'marker', 'marker-start', 'marker-mid', 'marker-end',
'mask', 'stroke']
svg_allow_local_href = ['altGlyph', 'animate', 'animateColor',
'animateMotion', 'animateTransform', 'cursor', 'feImage', 'filter',
'linearGradient', 'pattern', 'radialGradient', 'textpath', 'tref',
'set', 'use']
acceptable_css_properties = ['azimuth', 'background-color',
'border-bottom-color', 'border-collapse', 'border-color',
'border-left-color', 'border-right-color', 'border-top-color', 'clear',
'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font',
'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight',
'height', 'letter-spacing', 'line-height', 'overflow', 'pause',
'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness',
'speak', 'speak-header', 'speak-numeral', 'speak-punctuation',
'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent',
'unicode-bidi', 'vertical-align', 'voice-family', 'volume',
'white-space', 'width']
acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue',
'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed',
'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left',
'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive',
'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top',
'transparent', 'underline', 'white', 'yellow']
acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule',
'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin',
'stroke-opacity']
acceptable_protocols = [ 'ed2k', 'ftp', 'http', 'https', 'irc',
'mailto', 'news', 'gopher', 'nntp', 'telnet', 'webcal',
'xmpp', 'callto', 'feed', 'urn', 'aim', 'rsync', 'tag',
'ssh', 'sftp', 'rtsp', 'afs' ]
# subclasses may define their own versions of these constants
allowed_elements = acceptable_elements + mathml_elements + svg_elements
allowed_attributes = acceptable_attributes + mathml_attributes + svg_attributes
allowed_css_properties = acceptable_css_properties
allowed_css_keywords = acceptable_css_keywords
allowed_svg_properties = acceptable_svg_properties
allowed_protocols = acceptable_protocols
# Sanitize the +html+, escaping all elements not in ALLOWED_ELEMENTS, and
# stripping out all # attributes not in ALLOWED_ATTRIBUTES. Style
# attributes are parsed, and a restricted set, # specified by
# ALLOWED_CSS_PROPERTIES and ALLOWED_CSS_KEYWORDS, are allowed through.
# attributes in ATTR_VAL_IS_URI are scanned, and only URI schemes specified
# in ALLOWED_PROTOCOLS are allowed.
#
# sanitize_html('<script> do_nasty_stuff() </script>')
# => <script> do_nasty_stuff() </script>
# sanitize_html('<a href="javascript: sucker();">Click here for $100</a>')
# => <a>Click here for $100</a>
def sanitize_token(self, token):
# accommodate filters which use token_type differently
token_type = token["type"]
if token_type in tokenTypes.keys():
token_type = tokenTypes[token_type]
if token_type in (tokenTypes["StartTag"], tokenTypes["EndTag"],
tokenTypes["EmptyTag"]):
if token["name"] in self.allowed_elements:
if token.has_key("data"):
attrs = dict([(name,val) for name,val in
token["data"][::-1]
if name in self.allowed_attributes])
for attr in self.attr_val_is_uri:
if not attrs.has_key(attr):
continue
val_unescaped = re.sub("[`\000-\040\177-\240\s]+", '',
unescape(attrs[attr])).lower()
#remove replacement characters from unescaped characters
val_unescaped = val_unescaped.replace(u"\ufffd", "")
if (re.match("^[a-z0-9][-+.a-z0-9]*:",val_unescaped) and
(val_unescaped.split(':')[0] not in
self.allowed_protocols)):
del attrs[attr]
for attr in self.svg_attr_val_allows_ref:
if attr in attrs:
attrs[attr] = re.sub(r'url\s*\(\s*[^#\s][^)]+?\)',
' ',
unescape(attrs[attr]))
if (token["name"] in self.svg_allow_local_href and
'xlink:href' in attrs and re.search('^\s*[^#\s].*',
attrs['xlink:href'])):
del attrs['xlink:href']
if attrs.has_key('style'):
attrs['style'] = self.sanitize_css(attrs['style'])
token["data"] = [[name,val] for name,val in attrs.items()]
return token
else:
if token_type == tokenTypes["EndTag"]:
token["data"] = "</%s>" % token["name"]
elif token["data"]:
attrs = ''.join([' %s="%s"' % (k,escape(v)) for k,v in token["data"]])
token["data"] = "<%s%s>" % (token["name"],attrs)
else:
token["data"] = "<%s>" % token["name"]
if token.get("selfClosing"):
token["data"]=token["data"][:-1] + "/>"
if token["type"] in tokenTypes.keys():
token["type"] = "Characters"
else:
token["type"] = tokenTypes["Characters"]
del token["name"]
return token
elif token_type == tokenTypes["Comment"]:
pass
else:
return token
def sanitize_css(self, style):
# disallow urls
style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style)
# gauntlet
if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): return ''
if not re.match("^\s*([-\w]+\s*:[^:;]*(;\s*|$))*$", style): return ''
clean = []
for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style):
if not value: continue
if prop.lower() in self.allowed_css_properties:
clean.append(prop + ': ' + value + ';')
elif prop.split('-')[0].lower() in ['background','border','margin',
'padding']:
for keyword in value.split():
if not keyword in self.acceptable_css_keywords and \
not re.match("^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$",keyword):
break
else:
clean.append(prop + ': ' + value + ';')
elif prop.lower() in self.allowed_svg_properties:
clean.append(prop + ': ' + value + ';')
return ' '.join(clean)
class HTMLSanitizer(HTMLTokenizer, HTMLSanitizerMixin):
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=False, lowercaseAttrName=False):
#Change case matching defaults as we only output lowercase html anyway
#This solution doesn't seem ideal...
HTMLTokenizer.__init__(self, stream, encoding, parseMeta, useChardet,
lowercaseElementName, lowercaseAttrName)
def __iter__(self):
for token in HTMLTokenizer.__iter__(self):
token = self.sanitize_token(token)
if token:
yield token
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/sanitizer.py
|
sanitizer.py
|
import re
baseChar = """[#x0041-#x005A] | [#x0061-#x007A] | [#x00C0-#x00D6] | [#x00D8-#x00F6] | [#x00F8-#x00FF] | [#x0100-#x0131] | [#x0134-#x013E] | [#x0141-#x0148] | [#x014A-#x017E] | [#x0180-#x01C3] | [#x01CD-#x01F0] | [#x01F4-#x01F5] | [#x01FA-#x0217] | [#x0250-#x02A8] | [#x02BB-#x02C1] | #x0386 | [#x0388-#x038A] | #x038C | [#x038E-#x03A1] | [#x03A3-#x03CE] | [#x03D0-#x03D6] | #x03DA | #x03DC | #x03DE | #x03E0 | [#x03E2-#x03F3] | [#x0401-#x040C] | [#x040E-#x044F] | [#x0451-#x045C] | [#x045E-#x0481] | [#x0490-#x04C4] | [#x04C7-#x04C8] | [#x04CB-#x04CC] | [#x04D0-#x04EB] | [#x04EE-#x04F5] | [#x04F8-#x04F9] | [#x0531-#x0556] | #x0559 | [#x0561-#x0586] | [#x05D0-#x05EA] | [#x05F0-#x05F2] | [#x0621-#x063A] | [#x0641-#x064A] | [#x0671-#x06B7] | [#x06BA-#x06BE] | [#x06C0-#x06CE] | [#x06D0-#x06D3] | #x06D5 | [#x06E5-#x06E6] | [#x0905-#x0939] | #x093D | [#x0958-#x0961] | [#x0985-#x098C] | [#x098F-#x0990] | [#x0993-#x09A8] | [#x09AA-#x09B0] | #x09B2 | [#x09B6-#x09B9] | [#x09DC-#x09DD] | [#x09DF-#x09E1] | [#x09F0-#x09F1] | [#x0A05-#x0A0A] | [#x0A0F-#x0A10] | [#x0A13-#x0A28] | [#x0A2A-#x0A30] | [#x0A32-#x0A33] | [#x0A35-#x0A36] | [#x0A38-#x0A39] | [#x0A59-#x0A5C] | #x0A5E | [#x0A72-#x0A74] | [#x0A85-#x0A8B] | #x0A8D | [#x0A8F-#x0A91] | [#x0A93-#x0AA8] | [#x0AAA-#x0AB0] | [#x0AB2-#x0AB3] | [#x0AB5-#x0AB9] | #x0ABD | #x0AE0 | [#x0B05-#x0B0C] | [#x0B0F-#x0B10] | [#x0B13-#x0B28] | [#x0B2A-#x0B30] | [#x0B32-#x0B33] | [#x0B36-#x0B39] | #x0B3D | [#x0B5C-#x0B5D] | [#x0B5F-#x0B61] | [#x0B85-#x0B8A] | [#x0B8E-#x0B90] | [#x0B92-#x0B95] | [#x0B99-#x0B9A] | #x0B9C | [#x0B9E-#x0B9F] | [#x0BA3-#x0BA4] | [#x0BA8-#x0BAA] | [#x0BAE-#x0BB5] | [#x0BB7-#x0BB9] | [#x0C05-#x0C0C] | [#x0C0E-#x0C10] | [#x0C12-#x0C28] | [#x0C2A-#x0C33] | [#x0C35-#x0C39] | [#x0C60-#x0C61] | [#x0C85-#x0C8C] | [#x0C8E-#x0C90] | [#x0C92-#x0CA8] | [#x0CAA-#x0CB3] | [#x0CB5-#x0CB9] | #x0CDE | [#x0CE0-#x0CE1] | [#x0D05-#x0D0C] | [#x0D0E-#x0D10] | [#x0D12-#x0D28] | [#x0D2A-#x0D39] | [#x0D60-#x0D61] | [#x0E01-#x0E2E] | #x0E30 | [#x0E32-#x0E33] | [#x0E40-#x0E45] | [#x0E81-#x0E82] | #x0E84 | [#x0E87-#x0E88] | #x0E8A | #x0E8D | [#x0E94-#x0E97] | [#x0E99-#x0E9F] | [#x0EA1-#x0EA3] | #x0EA5 | #x0EA7 | [#x0EAA-#x0EAB] | [#x0EAD-#x0EAE] | #x0EB0 | [#x0EB2-#x0EB3] | #x0EBD | [#x0EC0-#x0EC4] | [#x0F40-#x0F47] | [#x0F49-#x0F69] | [#x10A0-#x10C5] | [#x10D0-#x10F6] | #x1100 | [#x1102-#x1103] | [#x1105-#x1107] | #x1109 | [#x110B-#x110C] | [#x110E-#x1112] | #x113C | #x113E | #x1140 | #x114C | #x114E | #x1150 | [#x1154-#x1155] | #x1159 | [#x115F-#x1161] | #x1163 | #x1165 | #x1167 | #x1169 | [#x116D-#x116E] | [#x1172-#x1173] | #x1175 | #x119E | #x11A8 | #x11AB | [#x11AE-#x11AF] | [#x11B7-#x11B8] | #x11BA | [#x11BC-#x11C2] | #x11EB | #x11F0 | #x11F9 | [#x1E00-#x1E9B] | [#x1EA0-#x1EF9] | [#x1F00-#x1F15] | [#x1F18-#x1F1D] | [#x1F20-#x1F45] | [#x1F48-#x1F4D] | [#x1F50-#x1F57] | #x1F59 | #x1F5B | #x1F5D | [#x1F5F-#x1F7D] | [#x1F80-#x1FB4] | [#x1FB6-#x1FBC] | #x1FBE | [#x1FC2-#x1FC4] | [#x1FC6-#x1FCC] | [#x1FD0-#x1FD3] | [#x1FD6-#x1FDB] | [#x1FE0-#x1FEC] | [#x1FF2-#x1FF4] | [#x1FF6-#x1FFC] | #x2126 | [#x212A-#x212B] | #x212E | [#x2180-#x2182] | [#x3041-#x3094] | [#x30A1-#x30FA] | [#x3105-#x312C] | [#xAC00-#xD7A3]"""
ideographic = """[#x4E00-#x9FA5] | #x3007 | [#x3021-#x3029]"""
combiningCharacter = """[#x0300-#x0345] | [#x0360-#x0361] | [#x0483-#x0486] | [#x0591-#x05A1] | [#x05A3-#x05B9] | [#x05BB-#x05BD] | #x05BF | [#x05C1-#x05C2] | #x05C4 | [#x064B-#x0652] | #x0670 | [#x06D6-#x06DC] | [#x06DD-#x06DF] | [#x06E0-#x06E4] | [#x06E7-#x06E8] | [#x06EA-#x06ED] | [#x0901-#x0903] | #x093C | [#x093E-#x094C] | #x094D | [#x0951-#x0954] | [#x0962-#x0963] | [#x0981-#x0983] | #x09BC | #x09BE | #x09BF | [#x09C0-#x09C4] | [#x09C7-#x09C8] | [#x09CB-#x09CD] | #x09D7 | [#x09E2-#x09E3] | #x0A02 | #x0A3C | #x0A3E | #x0A3F | [#x0A40-#x0A42] | [#x0A47-#x0A48] | [#x0A4B-#x0A4D] | [#x0A70-#x0A71] | [#x0A81-#x0A83] | #x0ABC | [#x0ABE-#x0AC5] | [#x0AC7-#x0AC9] | [#x0ACB-#x0ACD] | [#x0B01-#x0B03] | #x0B3C | [#x0B3E-#x0B43] | [#x0B47-#x0B48] | [#x0B4B-#x0B4D] | [#x0B56-#x0B57] | [#x0B82-#x0B83] | [#x0BBE-#x0BC2] | [#x0BC6-#x0BC8] | [#x0BCA-#x0BCD] | #x0BD7 | [#x0C01-#x0C03] | [#x0C3E-#x0C44] | [#x0C46-#x0C48] | [#x0C4A-#x0C4D] | [#x0C55-#x0C56] | [#x0C82-#x0C83] | [#x0CBE-#x0CC4] | [#x0CC6-#x0CC8] | [#x0CCA-#x0CCD] | [#x0CD5-#x0CD6] | [#x0D02-#x0D03] | [#x0D3E-#x0D43] | [#x0D46-#x0D48] | [#x0D4A-#x0D4D] | #x0D57 | #x0E31 | [#x0E34-#x0E3A] | [#x0E47-#x0E4E] | #x0EB1 | [#x0EB4-#x0EB9] | [#x0EBB-#x0EBC] | [#x0EC8-#x0ECD] | [#x0F18-#x0F19] | #x0F35 | #x0F37 | #x0F39 | #x0F3E | #x0F3F | [#x0F71-#x0F84] | [#x0F86-#x0F8B] | [#x0F90-#x0F95] | #x0F97 | [#x0F99-#x0FAD] | [#x0FB1-#x0FB7] | #x0FB9 | [#x20D0-#x20DC] | #x20E1 | [#x302A-#x302F] | #x3099 | #x309A"""
digit = """[#x0030-#x0039] | [#x0660-#x0669] | [#x06F0-#x06F9] | [#x0966-#x096F] | [#x09E6-#x09EF] | [#x0A66-#x0A6F] | [#x0AE6-#x0AEF] | [#x0B66-#x0B6F] | [#x0BE7-#x0BEF] | [#x0C66-#x0C6F] | [#x0CE6-#x0CEF] | [#x0D66-#x0D6F] | [#x0E50-#x0E59] | [#x0ED0-#x0ED9] | [#x0F20-#x0F29]"""
extender = """#x00B7 | #x02D0 | #x02D1 | #x0387 | #x0640 | #x0E46 | #x0EC6 | #x3005 | [#x3031-#x3035] | [#x309D-#x309E] | [#x30FC-#x30FE]"""
letter = " | ".join([baseChar, ideographic])
#Without the
name = " | ".join([letter, digit, ".", "-", "_", combiningCharacter,
extender])
nameFirst = " | ".join([letter, "_"])
reChar = re.compile(r"#x([\d|A-F]{4,4})")
reCharRange = re.compile(r"\[#x([\d|A-F]{4,4})-#x([\d|A-F]{4,4})\]")
def charStringToList(chars):
charRanges = [item.strip() for item in chars.split(" | ")]
rv = []
for item in charRanges:
foundMatch = False
for regexp in (reChar, reCharRange):
match = regexp.match(item)
if match is not None:
rv.append([hexToInt(item) for item in match.groups()])
if len(rv[-1]) == 1:
rv[-1] = rv[-1]*2
foundMatch = True
break
if not foundMatch:
assert len(item) == 1
rv.append([ord(item)] * 2)
rv = normaliseCharList(rv)
return rv
def normaliseCharList(charList):
charList = sorted(charList)
for item in charList:
assert item[1] >= item[0]
rv = []
i = 0
while i < len(charList):
j = 1
rv.append(charList[i])
while i + j < len(charList) and charList[i+j][0] <= rv[-1][1] + 1:
rv[-1][1] = charList[i+j][1]
j += 1
i += j
return rv
#We don't really support characters above the BMP :(
max_unicode = int("FFFF", 16)
def missingRanges(charList):
rv = []
if charList[0] != 0:
rv.append([0, charList[0][0] - 1])
for i, item in enumerate(charList[:-1]):
rv.append([item[1]+1, charList[i+1][0] - 1])
if charList[-1][1] != max_unicode:
rv.append([charList[-1][1] + 1, max_unicode])
return rv
def listToRegexpStr(charList):
rv = []
for item in charList:
if item[0] == item[1]:
rv.append(escapeRegexp(unichr(item[0])))
else:
rv.append(escapeRegexp(unichr(item[0])) + "-" +
escapeRegexp(unichr(item[1])))
return "[%s]"%"".join(rv)
def hexToInt(hex_str):
return int(hex_str, 16)
def escapeRegexp(string):
specialCharacters = (".", "^", "$", "*", "+", "?", "{", "}",
"[", "]", "|", "(", ")", "-")
for char in specialCharacters:
string = string.replace(char, "\\" + char)
if char in string:
print string
return string
#output from the above
nonXmlNameBMPRegexp = re.compile(u'[\x00-,/:-@\\[-\\^`\\{-\xb6\xb8-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u02cf\u02d2-\u02ff\u0346-\u035f\u0362-\u0385\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482\u0487-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u0590\u05a2\u05ba\u05be\u05c0\u05c3\u05c5-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u063f\u0653-\u065f\u066a-\u066f\u06b8-\u06b9\u06bf\u06cf\u06d4\u06e9\u06ee-\u06ef\u06fa-\u0900\u0904\u093a-\u093b\u094e-\u0950\u0955-\u0957\u0964-\u0965\u0970-\u0980\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09bd\u09c5-\u09c6\u09c9-\u09ca\u09ce-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09f2-\u0a01\u0a03-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a58\u0a5d\u0a5f-\u0a65\u0a75-\u0a80\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0adf\u0ae1-\u0ae5\u0af0-\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3b\u0b44-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b62-\u0b65\u0b70-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bd6\u0bd8-\u0be6\u0bf0-\u0c00\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c3d\u0c45\u0c49\u0c4e-\u0c54\u0c57-\u0c5f\u0c62-\u0c65\u0c70-\u0c81\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbd\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce2-\u0ce5\u0cf0-\u0d01\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d3d\u0d44-\u0d45\u0d49\u0d4e-\u0d56\u0d58-\u0d5f\u0d62-\u0d65\u0d70-\u0e00\u0e2f\u0e3b-\u0e3f\u0e4f\u0e5a-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0f17\u0f1a-\u0f1f\u0f2a-\u0f34\u0f36\u0f38\u0f3a-\u0f3d\u0f48\u0f6a-\u0f70\u0f85\u0f8c-\u0f8f\u0f96\u0f98\u0fae-\u0fb0\u0fb8\u0fba-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u20cf\u20dd-\u20e0\u20e2-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3004\u3006\u3008-\u3020\u3030\u3036-\u3040\u3095-\u3098\u309b-\u309c\u309f-\u30a0\u30fb\u30ff-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
nonXmlNameFirstBMPRegexp = re.compile(u'[\x00-@\\[-\\^`\\{-\xbf\xd7\xf7\u0132-\u0133\u013f-\u0140\u0149\u017f\u01c4-\u01cc\u01f1-\u01f3\u01f6-\u01f9\u0218-\u024f\u02a9-\u02ba\u02c2-\u0385\u0387\u038b\u038d\u03a2\u03cf\u03d7-\u03d9\u03db\u03dd\u03df\u03e1\u03f4-\u0400\u040d\u0450\u045d\u0482-\u048f\u04c5-\u04c6\u04c9-\u04ca\u04cd-\u04cf\u04ec-\u04ed\u04f6-\u04f7\u04fa-\u0530\u0557-\u0558\u055a-\u0560\u0587-\u05cf\u05eb-\u05ef\u05f3-\u0620\u063b-\u0640\u064b-\u0670\u06b8-\u06b9\u06bf\u06cf\u06d4\u06d6-\u06e4\u06e7-\u0904\u093a-\u093c\u093e-\u0957\u0962-\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09db\u09de\u09e2-\u09ef\u09f2-\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a58\u0a5d\u0a5f-\u0a71\u0a75-\u0a84\u0a8c\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abc\u0abe-\u0adf\u0ae1-\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34-\u0b35\u0b3a-\u0b3c\u0b3e-\u0b5b\u0b5e\u0b62-\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bb6\u0bba-\u0c04\u0c0d\u0c11\u0c29\u0c34\u0c3a-\u0c5f\u0c62-\u0c84\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cdd\u0cdf\u0ce2-\u0d04\u0d0d\u0d11\u0d29\u0d3a-\u0d5f\u0d62-\u0e00\u0e2f\u0e31\u0e34-\u0e3f\u0e46-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eaf\u0eb1\u0eb4-\u0ebc\u0ebe-\u0ebf\u0ec5-\u0f3f\u0f48\u0f6a-\u109f\u10c6-\u10cf\u10f7-\u10ff\u1101\u1104\u1108\u110a\u110d\u1113-\u113b\u113d\u113f\u1141-\u114b\u114d\u114f\u1151-\u1153\u1156-\u1158\u115a-\u115e\u1162\u1164\u1166\u1168\u116a-\u116c\u116f-\u1171\u1174\u1176-\u119d\u119f-\u11a7\u11a9-\u11aa\u11ac-\u11ad\u11b0-\u11b6\u11b9\u11bb\u11c3-\u11ea\u11ec-\u11ef\u11f1-\u11f8\u11fa-\u1dff\u1e9c-\u1e9f\u1efa-\u1eff\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fbd\u1fbf-\u1fc1\u1fc5\u1fcd-\u1fcf\u1fd4-\u1fd5\u1fdc-\u1fdf\u1fed-\u1ff1\u1ff5\u1ffd-\u2125\u2127-\u2129\u212c-\u212d\u212f-\u217f\u2183-\u3006\u3008-\u3020\u302a-\u3040\u3095-\u30a0\u30fb-\u3104\u312d-\u4dff\u9fa6-\uabff\ud7a4-\uffff]')
class InfosetFilter(object):
replacementRegexp = re.compile(r"U[\dA-F]{5,5}")
def __init__(self, replaceChars = None,
dropXmlnsLocalName = False,
dropXmlnsAttrNs = False,
preventDoubleDashComments = False,
preventDashAtCommentEnd = False,
replaceFormFeedCharacters = True):
self.dropXmlnsLocalName = dropXmlnsLocalName
self.dropXmlnsAttrNs = dropXmlnsAttrNs
self.preventDoubleDashComments = preventDoubleDashComments
self.preventDashAtCommentEnd = preventDashAtCommentEnd
self.replaceFormFeedCharacters = replaceFormFeedCharacters
self.replaceCache = {}
def coerceAttribute(self, name, namespace=None):
if self.dropXmlnsLocalName and name.startswith("xmlns:"):
#Need a datalosswarning here
return None
elif (self.dropXmlnsAttrNs and
namespace == "http://www.w3.org/2000/xmlns/"):
return None
else:
return self.toXmlName(name)
def coerceElement(self, name, namespace=None):
return self.toXmlName(name)
def coerceComment(self, data):
if self.preventDoubleDashComments:
while "--" in data:
data = data.replace("--", "- -")
return data
def coerceCharacters(self, data):
if self.replaceFormFeedCharacters:
data = data.replace("\x0C", " ")
#Other non-xml characters
return data
def toXmlName(self, name):
nameFirst = name[0]
nameRest = name[1:]
m = nonXmlNameFirstBMPRegexp.match(nameFirst)
if m:
nameFirstOutput = self.getReplacementCharacter(nameFirst)
else:
nameFirstOutput = nameFirst
nameRestOutput = nameRest
replaceChars = set(nonXmlNameBMPRegexp.findall(nameRest))
for char in replaceChars:
replacement = self.getReplacementCharacter(char)
nameRestOutput = nameRestOutput.replace(char, replacement)
return nameFirstOutput + nameRestOutput
def getReplacementCharacter(self, char):
if char in self.replaceCache:
replacement = self.replaceCache[char]
else:
replacement = self.escapeChar(char)
return replacement
def fromXmlName(self, name):
for item in set(self.replacementRegexp.findall(name)):
name = name.replace(item, self.unescapeChar(item))
return name
def escapeChar(self, char):
replacement = "U" + hex(ord(char))[2:].upper().rjust(5, "0")
self.replaceCache[char] = replacement
return replacement
def unescapeChar(self, charcode):
return unichr(int(charcode[1:], 16))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/ihatexml.py
|
ihatexml.py
|
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
try:
any
except:
# Implement 'any' for python 2.4 and previous
def any(iterable):
for element in iterable:
if element:
return True
return False
try:
"abc".startswith(("a", "b"))
def startswithany(str, prefixes):
return str.startswith(prefixes)
except:
# Python 2.4 doesn't accept a tuple as argument to string startswith
def startswithany(str, prefixes):
for prefix in prefixes:
if str.startswith(prefix):
return True
return False
import sys
import inputstream
import tokenizer
import treebuilders
from treebuilders._base import Marker
from treebuilders import simpletree
import utils
from constants import spaceCharacters, asciiUpper2Lower
from constants import scopingElements, formattingElements, specialElements
from constants import headingElements, tableInsertModeElements
from constants import cdataElements, rcdataElements, voidElements
from constants import tokenTypes, ReparseException, namespaces
def parse(doc, treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="simpletree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree = simpletree.TreeBuilder,
tokenizer = tokenizer.HTMLTokenizer, strict = False,
namespaceHTMLElements = True):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = {
"initial": InitialPhase(self, self.tree),
"beforeHtml": BeforeHtmlPhase(self, self.tree),
"beforeHead": BeforeHeadPhase(self, self.tree),
"inHead": InHeadPhase(self, self.tree),
# XXX "inHeadNoscript": InHeadNoScriptPhase(self, self.tree),
"afterHead": AfterHeadPhase(self, self.tree),
"inBody": InBodyPhase(self, self.tree),
"text": TextPhase(self, self.tree),
"inTable": InTablePhase(self, self.tree),
"inTableText": InTableTextPhase(self, self.tree),
"inCaption": InCaptionPhase(self, self.tree),
"inColumnGroup": InColumnGroupPhase(self, self.tree),
"inTableBody": InTableBodyPhase(self, self.tree),
"inRow": InRowPhase(self, self.tree),
"inCell": InCellPhase(self, self.tree),
"inSelect": InSelectPhase(self, self.tree),
"inSelectInTable": InSelectInTablePhase(self, self.tree),
"inForeignContent": InForeignContentPhase(self, self.tree),
"afterBody": AfterBodyPhase(self, self.tree),
"inFrameset": InFramesetPhase(self, self.tree),
"afterFrameset": AfterFramesetPhase(self, self.tree),
"afterAfterBody": AfterAfterBodyPhase(self, self.tree),
"afterAfterFrameset": AfterAfterFramesetPhase(self, self.tree),
# XXX after after frameset
}
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException, e:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.secondaryPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
def mainLoop(self):
(CharactersToken,
SpaceCharactersToken,
StartTagToken,
EndTagToken,
CommentToken,
DoctypeToken) = (tokenTypes["Characters"],
tokenTypes["SpaceCharacters"],
tokenTypes["StartTag"],
tokenTypes["EndTag"],
tokenTypes["Comment"],
tokenTypes["Doctype"])
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
for token in self.normalizedTokens():
type = token["type"]
if type == CharactersToken:
self.phase.processCharacters(token)
elif type == SpaceCharactersToken:
self.phase.processSpaceCharacters(token)
elif type == StartTagToken:
self.selfClosingAcknowledged = False
self.phase.processStartTag(token)
if (token["selfClosing"]
and not self.selfClosingAcknowledged):
self.parseError("non-void-element-with-trailing-solidus",
{"name":token["name"]})
elif type == EndTagToken:
self.phase.processEndTag(token)
elif type == CommentToken:
self.phase.processComment(token)
elif type == DoctypeToken:
self.phase.processDoctype(token)
else:
self.parseError(token["data"], token.get("datavars", {}))
# When the loop finishes it's EOF
self.phase.processEOF()
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl":"definitionURL"}
for k,v in replacements.iteritems():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename" : "attributeName",
"attributetype" : "attributeType",
"basefrequency" : "baseFrequency",
"baseprofile" : "baseProfile",
"calcmode" : "calcMode",
"clippathunits" : "clipPathUnits",
"contentscripttype" : "contentScriptType",
"contentstyletype" : "contentStyleType",
"diffuseconstant" : "diffuseConstant",
"edgemode" : "edgeMode",
"externalresourcesrequired" : "externalResourcesRequired",
"filterres" : "filterRes",
"filterunits" : "filterUnits",
"glyphref" : "glyphRef",
"gradienttransform" : "gradientTransform",
"gradientunits" : "gradientUnits",
"kernelmatrix" : "kernelMatrix",
"kernelunitlength" : "kernelUnitLength",
"keypoints" : "keyPoints",
"keysplines" : "keySplines",
"keytimes" : "keyTimes",
"lengthadjust" : "lengthAdjust",
"limitingconeangle" : "limitingConeAngle",
"markerheight" : "markerHeight",
"markerunits" : "markerUnits",
"markerwidth" : "markerWidth",
"maskcontentunits" : "maskContentUnits",
"maskunits" : "maskUnits",
"numoctaves" : "numOctaves",
"pathlength" : "pathLength",
"patterncontentunits" : "patternContentUnits",
"patterntransform" : "patternTransform",
"patternunits" : "patternUnits",
"pointsatx" : "pointsAtX",
"pointsaty" : "pointsAtY",
"pointsatz" : "pointsAtZ",
"preservealpha" : "preserveAlpha",
"preserveaspectratio" : "preserveAspectRatio",
"primitiveunits" : "primitiveUnits",
"refx" : "refX",
"refy" : "refY",
"repeatcount" : "repeatCount",
"repeatdur" : "repeatDur",
"requiredextensions" : "requiredExtensions",
"requiredfeatures" : "requiredFeatures",
"specularconstant" : "specularConstant",
"specularexponent" : "specularExponent",
"spreadmethod" : "spreadMethod",
"startoffset" : "startOffset",
"stddeviation" : "stdDeviation",
"stitchtiles" : "stitchTiles",
"surfacescale" : "surfaceScale",
"systemlanguage" : "systemLanguage",
"tablevalues" : "tableValues",
"targetx" : "targetX",
"targety" : "targetY",
"textlength" : "textLength",
"viewbox" : "viewBox",
"viewtarget" : "viewTarget",
"xchannelselector" : "xChannelSelector",
"ychannelselector" : "yChannelSelector",
"zoomandpan" : "zoomAndPan"
}
for originalName in token["data"].keys():
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = {
"xlink:actuate":("xlink", "actuate", namespaces["xlink"]),
"xlink:arcrole":("xlink", "arcrole", namespaces["xlink"]),
"xlink:href":("xlink", "href", namespaces["xlink"]),
"xlink:role":("xlink", "role", namespaces["xlink"]),
"xlink:show":("xlink", "show", namespaces["xlink"]),
"xlink:title":("xlink", "title", namespaces["xlink"]),
"xlink:type":("xlink", "type", namespaces["xlink"]),
"xml:base":("xml", "base", namespaces["xml"]),
"xml:lang":("xml", "lang", namespaces["xml"]),
"xml:space":("xml", "space", namespaces["xml"]),
"xmlns":(None, "xmlns", namespaces["xmlns"]),
"xmlns:xlink":("xmlns", "xlink", namespaces["xmlns"])
}
for originalName in token["data"].iterkeys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select":"inSelect",
"td":"inCell",
"th":"inCell",
"tr":"inRow",
"tbody":"inTableBody",
"thead":"inTableBody",
"tfoot":"inTableBody",
"caption":"inCaption",
"colgroup":"inColumnGroup",
"table":"inTable",
"head":"inBody",
"body":"inBody",
"frameset":"inFrameset"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
if node == self.tree.openElements[0]:
last = True
if nodeName not in ['td', 'th']:
# XXX
assert self.innerHTML
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "frameset"):
# XXX
assert self.innerHTML
if nodeName in newModes:
self.phase = self.phases[newModes[nodeName]]
break
elif node.namespace in (namespaces["mathml"], namespaces["svg"]):
self.phase = self.phases["inForeignContent"]
self.secondaryPhase = self.phases["inBody"]
break
elif nodeName == "html":
if self.tree.headPointer is None:
self.phase = self.phases["beforeHead"]
else:
self.phase = self.phases["afterHead"]
break
elif last:
self.phase = self.phases["inBody"]
break
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
element = self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
class Phase(object):
"""Base class for helper object that implements each phase of processing
"""
# Order should be (they can be omitted):
# * EOF
# * Comment
# * Doctype
# * SpaceCharacters
# * Characters
# * StartTag
# - startTag* methods
# * EndTag
# - endTag* methods
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if self.parser.firstStartTag == False and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId != None or
systemId != None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or startswithany(publicId,
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId == None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (startswithany(publicId,
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or startswithany(publicId,
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId != None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
self.parser.phase.processCharacters(token)
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
self.parser.phase.processEndTag(token)
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
self.parser.phase.processEOF()
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
self.parser.phase.processEOF()
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
self.parser.phase.processCharacters(token)
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
self.parser.phase.processEndTag(token)
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processEOF()
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processStartTag(token)
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "link", "command"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# helper
def appendToHead(self, element):
if self.tree.headPointer is not None:
self.tree.headPointer.appendChild(element)
else:
assert self.parser.innerHTML
self.tree.openElementsw[-1].appendChild(element)
# the real thing
def processEOF (self):
self.anythingElse()
self.parser.phase.processEOF()
def processCharacters(self, token):
self.anythingElse()
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif "content" in attributes:
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
#Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
self.parser.phase.processStartTag(token)
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s"%node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "link", "meta", "noframes", "script", "style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
self.parser.phase.processEOF()
def processCharacters(self, token):
self.anythingElse()
self.parser.phase.processCharacters(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name":token["name"]})
def startTagOther(self, token):
self.anythingElse()
self.parser.phase.processStartTag(token)
def endTagHtmlBodyBr(self, token):
self.anythingElse()
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name":token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
#Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "command", "link", "meta", "noframes", "script", "style",
"title"), self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "datagrid",
"details", "dir", "div", "dl", "fieldset", "figure",
"footer", "header", "hgroup", "menu", "nav", "ol", "p",
"section", "ul"),
self.startTagCloseP),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext",self.startTagPlaintext),
(headingElements, self.startTagHeading),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"),self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "basefont", "bgsound", "br", "embed", "img", "input",
"keygen", "spacer", "wbr"), self.startTagVoidFormatting),
(("param", "source"), self.startTagParamSource),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body",self.endTagBody),
("html",self.endTagHtml),
(("address", "article", "aside", "blockquote", "center", "datagrid",
"details", "dir", "div", "dl", "fieldset", "figure",
"footer", "header", "hgroup", "listing", "menu", "nav", "ol", "pre",
"section", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p",self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "button", "marquee", "object"), self.endTagAppletButtonMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(
self.tree.openElements[-1])
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
#Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
for attr, value in token["data"].iteritems():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError(u"unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p"):
self.endTagP("p")
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li":["li"],
"dt":["dt", "dd"],
"dd":["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in (scopingElements | specialElements) and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
self.parser.phase.processStartTag(token)
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Insert your search keywords here: "
self.processCharacters(
{"type":tokenTypes["Characters"], "data":prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes = attributes,
selfClosing =
token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.elementInScope("option"):
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
while self.tree.openElements[-1].name != "ruby":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if self.parser.phase != self.parser.phases["inForeignContent"]:
self.parser.secondaryPhase = self.parser.phase
self.parser.phase = self.parser.phases["inForeignContent"]
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
#Need to get the parse error right for the case where the token
#has a namespace not equal to the xmlns attribute
if self.parser.phase != self.parser.phases["inForeignContent"]:
self.parser.secondaryPhase = self.parser.phase
self.parser.phase = self.parser.phases["inForeignContent"]
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
#Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
#We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
self.parser.phase.processEndTag(token)
def endTagBlock(self, token):
#Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node.name):
self.parser.parseError("unexpected-end-tag",
{"name":"form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude = token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://www.whatwg.org/specs/web-apps/current-work/#adoptionAgency
# XXX Better parseError messages appreciated.
name = token["name"]
while True:
# Step 1 paragraph 1
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if not formattingElement or (formattingElement in
self.tree.openElements and
not self.tree.elementInScope(
formattingElement.name)):
self.parser.parseError("adoption-agency-1.1", {"name": token["name"]})
return
# Step 1 paragraph 2
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Step 1 paragraph 3
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 2
# Start of the adoption agency algorithm proper
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if (element.nameTuple in
specialElements | scopingElements):
furthestBlock = element
break
# Step 3
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
commonAncestor = self.tree.openElements[afeIndex-1]
# Step 5
#if furthestBlock.parent:
# furthestBlock.parent.removeChild(furthestBlock)
# Step 5
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 12. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 7.4
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 6
lastNode = node = furthestBlock
while True:
# AT replace this with a function and recursion?
# Node is element before node in open elements
node = self.tree.openElements[
self.tree.openElements.index(node)-1]
while node not in self.tree.activeFormattingElements:
tmpNode = node
node = self.tree.openElements[
self.tree.openElements.index(node)-1]
self.tree.openElements.remove(tmpNode)
# Step 6.3
if node == formattingElement:
break
# Step 6.4
if lastNode == furthestBlock:
bookmark = (self.tree.activeFormattingElements.index(node)
+ 1)
# Step 6.5
#cite = node.parent
#if node.hasContent():
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 6.6
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 7.7
lastNode = node
# End of inner loop
# Step 7
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster parent the
# lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
commonAncestor.appendChild(lastNode)
# Step 8
clone = formattingElement.cloneNode()
# Step 9
furthestBlock.reparentChildren(clone)
# Step 10
furthestBlock.appendChild(clone)
# Step 11
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 12
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletButtonMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if (node.nameTuple in
specialElements | scopingElements):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
self.tree.openElements[-1].name)
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
self.parser.phase.processEOF()
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode"%name
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
#The rest of this method is all stuff that only happens if
#document.write works
def endTagOther(self, token):
node = self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
def getCurrentTable(self):
i = -1
while -i <= len(self.tree.openElements) and self.tree.openElements[i].name != "table":
i -= 1
if -i > len(self.tree.openElements):
return self.tree.openElements[0]
else:
return self.tree.openElements[i]
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
#Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.characterTokens.append(token)
def processCharacters(self, token):
#If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
self.parser.phase.processStartTag(token)
def startTagStyleScript(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
if "tainted" not in self.getCurrentTable()._flags:
self.getCurrentTable()._flags.append("tainted")
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
if "tainted" not in self.getCurrentTable()._flags:
self.getCurrentTable()._flags.append("tainted")
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type":tokenTypes["Characters"], "data":data}
self.originalPhase.processCharacters(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processComment(token)
def processEOF(self):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processEOF()
def processCharacters(self, token):
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
#pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processStartTag(token)
def processEndTag(self, token):
self.flushCharacters()
self.phase = self.originalPhase
self.phase.processEndTag(token)
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
#XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processEOF()
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
self.parser.phase.processCharacters(token)
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup("colgroup")
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
#self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
self.parser.phase.processStartTag(token)
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
self.parser.phase.processStartTag(token)
else:
# innerHTML case
self.parser.parseError()
def startTagOther(self, token):
self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
self.parser.phase.processEndTag(token)
else:
# innerHTML case
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr("tr")
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr("tr")
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
self.parser.phase.processEndTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr("tr")
self.parser.phase.processEndTag(token)
else:
# innerHTML case
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
self.parser.phase.processStartTag(token)
else:
# innerHTML case
self.parser.parseError()
def startTagOther(self, token):
self.parser.phases["inBody"].processStartTag(token)
# Optimize this for subsequent invocations. Can't do this initially
# because self.phases doesn't really exist at that point.
self.startTagHandler.default =\
self.parser.phases["inBody"].processStartTag
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
self.parser.phase.processEndTag(token)
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
self.parser.phases["inBody"].processEndTag(token)
# Optimize this for subsequent invocations. Can't do this initially
# because self.phases doesn't really exist at that point.
self.endTagHandler.default = self.parser.phases["inBody"].processEndTag
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect),
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td",
"th"), self.endTagTableElements)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect("select")
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="table"):
self.endTagSelect("select")
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="table"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
self.parser.parseError()
def endTagTableElements(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagSelect("select")
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
self.parser.phase.processStartTag(token)
def startTagOther(self, token):
self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
self.parser.phase.processEndTag(token)
def endTagOther(self, token):
self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "font", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def nonHTMLElementInScope(self):
for element in self.tree.openElements[::-1]:
if element.namespace == self.tree.defaultNamespace:
return self.tree.elementInScope(element)
assert False
for item in self.tree.openElements[::-1]:
if item.namespace == self.tree.defaultNamespace:
return True
elif item.nameTuple in scopingElements:
return False
return False
def adjustSVGTagNames(self, token):
replacements = {"altglyph":"altGlyph",
"altglyphdef":"altGlyphDef",
"altglyphitem":"altGlyphItem",
"animatecolor":"animateColor",
"animatemotion":"animateMotion",
"animatetransform":"animateTransform",
"clippath":"clipPath",
"feblend":"feBlend",
"fecolormatrix":"feColorMatrix",
"fecomponenttransfer":"feComponentTransfer",
"fecomposite":"feComposite",
"feconvolvematrix":"feConvolveMatrix",
"fediffuselighting":"feDiffuseLighting",
"fedisplacementmap":"feDisplacementMap",
"fedistantlight":"feDistantLight",
"feflood":"feFlood",
"fefunca":"feFuncA",
"fefuncb":"feFuncB",
"fefuncg":"feFuncG",
"fefuncr":"feFuncR",
"fegaussianblur":"feGaussianBlur",
"feimage":"feImage",
"femerge":"feMerge",
"femergenode":"feMergeNode",
"femorphology":"feMorphology",
"feoffset":"feOffset",
"fepointlight":"fePointLight",
"fespecularlighting":"feSpecularLighting",
"fespotlight":"feSpotLight",
"fetile":"feTile",
"feturbulence":"feTurbulence",
"foreignobject":"foreignObject",
"glyphref":"glyphRef",
"lineargradient":"linearGradient",
"radialgradient":"radialGradient",
"textpath":"textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processEOF(self):
pass
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (currentNode.namespace == self.tree.defaultNamespace or
(currentNode.namespace == namespaces["mathml"] and
token["name"] not in frozenset(["mglyph", "malignmark"]) and
currentNode.name in frozenset(["mi", "mo", "mn",
"ms", "mtext"])) or
(currentNode.namespace == namespaces["mathml"] and
currentNode.name == "annotation-xml" and
token["name"] == "svg") or
(currentNode.namespace == namespaces["svg"] and
currentNode.name in frozenset(["foreignObject",
"desc", "title"])
)):
assert self.parser.secondaryPhase != self
self.parser.secondaryPhase.processStartTag(token)
if self.parser.phase == self and self.nonHTMLElementInScope():
self.parser.phase = self.parser.secondaryPhase
elif token["name"] in self.breakoutElements:
self.parser.parseError("unexpected-html-element-in-foreign-content",
token["name"])
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace):
self.tree.openElements.pop()
self.parser.phase = self.parser.secondaryPhase
self.parser.phase.processStartTag(token)
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
self.adjustSVGTagNames(token)
self.parser.secondaryPhase.processEndTag(token)
if self.parser.phase == self and self.nonHTMLElementInScope():
self.parser.phase = self.parser.secondaryPhase
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def endTagHtml(self,name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset),
("noframes", self.endTagNoframes)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagNoframes(self, token):
self.parser.phases["inBody"].processEndTag(token)
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
#Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processCharacters(token)
def startTagHtml(self, token):
self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processStartTag(token)
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
self.parser.phase.processEndTag(token)
def impliedTagToken(name, type="EndTag", attributes = None,
selfClosing = False):
if attributes is None:
attributes = {}
return {"type":tokenTypes[type], "name":name, "data":attributes,
"selfClosing":selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/html5parser.py
|
html5parser.py
|
from amara.thirdparty.html5lib.constants import scopingElements, tableInsertModeElements, namespaces
try:
frozenset
except NameError:
# Import from the sets module for python 2.3
from sets import Set as set
from sets import ImmutableSet as frozenset
# The scope markers are inserted when entering buttons, object elements,
# marquees, table cells, and table captions, and are used to prevent formatting
# from "leaking" into tables, buttons, object elements, and marquees.
Marker = None
class Node(object):
def __init__(self, name):
"""Node representing an item in the tree.
name - The tag name associated with the node
parent - The parent of the current node (or None for the document node)
value - The value of the current node (applies to text nodes and
comments
attributes - a dict holding name, value pairs for attributes of the node
childNodes - a list of child nodes of the current node. This must
include all elements but not necessarily other node types
_flags - A list of miscellaneous flags that can be set on the node
"""
self.name = name
self.parent = None
self.value = None
self.attributes = {}
self.childNodes = []
self._flags = []
def __unicode__(self):
attributesStr = " ".join(["%s=\"%s\""%(name, value)
for name, value in
self.attributes.iteritems()])
if attributesStr:
return "<%s %s>"%(self.name,attributesStr)
else:
return "<%s>"%(self.name)
def __repr__(self):
return "<%s>" % (self.name)
def appendChild(self, node):
"""Insert node as a child of the current node
"""
raise NotImplementedError
def insertText(self, data, insertBefore=None):
"""Insert data as text in the current node, positioned before the
start of node insertBefore or to the end of the node's text.
"""
raise NotImplementedError
def insertBefore(self, node, refNode):
"""Insert node as a child of the current node, before refNode in the
list of child nodes. Raises ValueError if refNode is not a child of
the current node"""
raise NotImplementedError
def removeChild(self, node):
"""Remove node from the children of the current node
"""
raise NotImplementedError
def reparentChildren(self, newParent):
"""Move all the children of the current node to newParent.
This is needed so that trees that don't store text as nodes move the
text in the correct way
"""
#XXX - should this method be made more general?
for child in self.childNodes:
newParent.appendChild(child)
self.childNodes = []
def cloneNode(self):
"""Return a shallow copy of the current node i.e. a node with the same
name and attributes but with no parent or child nodes
"""
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text, false otherwise
"""
raise NotImplementedError
class TreeBuilder(object):
"""Base treebuilder implementation
documentClass - the class to use for the bottommost node of a document
elementClass - the class to use for HTML Elements
commentClass - the class to use for comments
doctypeClass - the class to use for doctypes
"""
#Document class
documentClass = None
#The class to use for creating a node
elementClass = None
#The class to use for creating comments
commentClass = None
#The class to use for creating doctypes
doctypeClass = None
#Fragment class
fragmentClass = None
def __init__(self, namespaceHTMLElements):
if namespaceHTMLElements:
self.defaultNamespace = "http://www.w3.org/1999/xhtml"
else:
self.defaultNamespace = None
self.reset()
def reset(self):
self.openElements = []
self.activeFormattingElements = []
#XXX - rename these to headElement, formElement
self.headPointer = None
self.formPointer = None
self.insertFromTable = False
self.document = self.documentClass()
def elementInScope(self, target, variant=None):
# Exit early when possible.
listElementsMap = {
None:scopingElements,
"list":scopingElements | set([(namespaces["html"], "ol"),
(namespaces["html"], "ul")]),
"table":set([(namespaces["html"], "html"),
(namespaces["html"], "table")])
}
listElements = listElementsMap[variant]
for node in reversed(self.openElements):
if node.name == target:
return True
elif node.nameTuple in listElements:
return False
assert False # We should never reach this point
def reconstructActiveFormattingElements(self):
# Within this algorithm the order of steps described in the
# specification is not quite the same as the order of steps in the
# code. It should still do the same though.
# Step 1: stop the algorithm when there's nothing to do.
if not self.activeFormattingElements:
return
# Step 2 and step 3: we start with the last element. So i is -1.
i = len(self.activeFormattingElements) - 1
entry = self.activeFormattingElements[i]
if entry == Marker or entry in self.openElements:
return
# Step 6
while entry != Marker and entry not in self.openElements:
if i == 0:
#This will be reset to 0 below
i = -1
break
i -= 1
# Step 5: let entry be one earlier in the list.
entry = self.activeFormattingElements[i]
while True:
# Step 7
i += 1
# Step 8
entry = self.activeFormattingElements[i]
clone = entry.cloneNode() #Mainly to get a new copy of the attributes
# Step 9
element = self.insertElement({"type":"StartTag",
"name":clone.name,
"namespace":clone.namespace,
"data":clone.attributes})
# Step 10
self.activeFormattingElements[i] = element
# Step 11
if element == self.activeFormattingElements[-1]:
break
def clearActiveFormattingElements(self):
entry = self.activeFormattingElements.pop()
while self.activeFormattingElements and entry != Marker:
entry = self.activeFormattingElements.pop()
def elementInActiveFormattingElements(self, name):
"""Check if an element exists between the end of the active
formatting elements and the last marker. If it does, return it, else
return false"""
for item in self.activeFormattingElements[::-1]:
# Check for Marker first because if it's a Marker it doesn't have a
# name attribute.
if item == Marker:
break
elif item.name == name:
return item
return False
def insertRoot(self, token):
element = self.createElement(token)
self.openElements.append(element)
self.document.appendChild(element)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = self.doctypeClass(name, publicId, systemId)
self.document.appendChild(doctype)
def insertComment(self, token, parent=None):
if parent is None:
parent = self.openElements[-1]
parent.appendChild(self.commentClass(token["data"]))
def createElement(self, token):
"""Create an element but don't insert it anywhere"""
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
return element
def _getInsertFromTable(self):
return self._insertFromTable
def _setInsertFromTable(self, value):
"""Switch the function used to insert an element from the
normal one to the misnested table one and back again"""
self._insertFromTable = value
if value:
self.insertElement = self.insertElementTable
else:
self.insertElement = self.insertElementNormal
insertFromTable = property(_getInsertFromTable, _setInsertFromTable)
def insertElementNormal(self, token):
name = token["name"]
namespace = token.get("namespace", self.defaultNamespace)
element = self.elementClass(name, namespace)
element.attributes = token["data"]
self.openElements[-1].appendChild(element)
self.openElements.append(element)
return element
def insertElementTable(self, token):
"""Create an element and insert it into the tree"""
element = self.createElement(token)
if self.openElements[-1].name not in tableInsertModeElements:
return self.insertElementNormal(token)
else:
#We should be in the InTable mode. This means we want to do
#special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
if insertBefore is None:
parent.appendChild(element)
else:
parent.insertBefore(element, insertBefore)
self.openElements.append(element)
return element
def insertText(self, data, parent=None):
"""Insert text data."""
if parent is None:
parent = self.openElements[-1]
if (not self.insertFromTable or (self.insertFromTable and
self.openElements[-1].name
not in tableInsertModeElements)):
parent.insertText(data)
else:
# We should be in the InTable mode. This means we want to do
# special magic element rearranging
parent, insertBefore = self.getTableMisnestedNodePosition()
parent.insertText(data, insertBefore)
def getTableMisnestedNodePosition(self):
"""Get the foster parent element, and sibling to insert before
(or None) when inserting a misnested table node"""
# The foster parent element is the one which comes before the most
# recently opened table element
# XXX - this is really inelegant
lastTable=None
fosterParent = None
insertBefore = None
for elm in self.openElements[::-1]:
if elm.name == "table":
lastTable = elm
break
if lastTable:
# XXX - we should really check that this parent is actually a
# node here
if lastTable.parent:
fosterParent = lastTable.parent
insertBefore = lastTable
else:
fosterParent = self.openElements[
self.openElements.index(lastTable) - 1]
else:
fosterParent = self.openElements[0]
return fosterParent, insertBefore
def generateImpliedEndTags(self, exclude=None):
name = self.openElements[-1].name
# XXX td, th and tr are not actually needed
if (name in frozenset(("dd", "dt", "li", "p", "td", "th", "tr"))
and name != exclude):
self.openElements.pop()
# XXX This is not entirely what the specification says. We should
# investigate it more closely.
self.generateImpliedEndTags(exclude)
def getDocument(self):
"Return the final tree"
return self.document
def getFragment(self):
"Return the final fragment"
#assert self.innerHTML
fragment = self.fragmentClass()
self.openElements[0].reparentChildren(fragment)
return fragment
def testSerializer(self, node):
"""Serialize the subtree of node in the format required by unit tests
node - the node from which to start serializing"""
raise NotImplementedError
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/treebuilders/_base.py
|
_base.py
|
import _base
from amara.thirdparty.html5lib.constants import voidElements, namespaces, prefixes
from xml.sax.saxutils import escape
# Really crappy basic implementation of a DOM-core like thing
class Node(_base.Node):
type = -1
def __init__(self, name):
self.name = name
self.parent = None
self.value = None
self.childNodes = []
self._flags = []
def __iter__(self):
for node in self.childNodes:
yield node
for item in node:
yield item
def __unicode__(self):
return self.name
def toxml(self):
raise NotImplementedError
def printTree(self, indent=0):
tree = '\n|%s%s' % (' '* indent, unicode(self))
for child in self.childNodes:
tree += child.printTree(indent + 2)
return tree
def appendChild(self, node):
if (isinstance(node, TextNode) and self.childNodes and
isinstance(self.childNodes[-1], TextNode)):
self.childNodes[-1].value += node.value
else:
self.childNodes.append(node)
node.parent = self
def insertText(self, data, insertBefore=None):
if insertBefore is None:
self.appendChild(TextNode(data))
else:
self.insertBefore(TextNode(data), insertBefore)
def insertBefore(self, node, refNode):
index = self.childNodes.index(refNode)
if (isinstance(node, TextNode) and index > 0 and
isinstance(self.childNodes[index - 1], TextNode)):
self.childNodes[index - 1].value += node.value
else:
self.childNodes.insert(index, node)
node.parent = self
def removeChild(self, node):
try:
self.childNodes.remove(node)
except:
# XXX
raise
node.parent = None
def cloneNode(self):
raise NotImplementedError
def hasContent(self):
"""Return true if the node has children or text"""
return bool(self.childNodes)
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class Document(Node):
type = 1
def __init__(self):
Node.__init__(self, None)
def __unicode__(self):
return "#document"
def appendChild(self, child):
Node.appendChild(self, child)
def toxml(self, encoding="utf=8"):
result = ""
for child in self.childNodes:
result += child.toxml()
return result.encode(encoding)
def hilite(self, encoding="utf-8"):
result = "<pre>"
for child in self.childNodes:
result += child.hilite()
return result.encode(encoding) + "</pre>"
def printTree(self):
tree = unicode(self)
for child in self.childNodes:
tree += child.printTree(2)
return tree
def cloneNode(self):
return Document()
class DocumentFragment(Document):
type = 2
def __unicode__(self):
return "#document-fragment"
def cloneNode(self):
return DocumentFragment()
class DocumentType(Node):
type = 3
def __init__(self, name, publicId, systemId):
Node.__init__(self, name)
self.publicId = publicId
self.systemId = systemId
def __unicode__(self):
if self.publicId or self.systemId:
publicId = self.publicId or ""
systemId = self.systemId or ""
return """<!DOCTYPE %s "%s" "%s">"""%(
self.name, publicId, systemId)
else:
return u"<!DOCTYPE %s>" % self.name
toxml = __unicode__
def hilite(self):
return '<code class="markup doctype"><!DOCTYPE %s></code>' % self.name
def cloneNode(self):
return DocumentType(self.name, self.publicId, self.systemId)
class TextNode(Node):
type = 4
def __init__(self, value):
Node.__init__(self, None)
self.value = value
def __unicode__(self):
return u"\"%s\"" % self.value
def toxml(self):
return escape(self.value)
hilite = toxml
def cloneNode(self):
return TextNode(self.value)
class Element(Node):
type = 5
def __init__(self, name, namespace=None):
Node.__init__(self, name)
self.namespace = namespace
self.attributes = {}
def __unicode__(self):
if self.namespace == None:
return u"<%s>" % self.name
else:
return u"<%s %s>"%(prefixes[self.namespace], self.name)
def toxml(self):
result = '<' + self.name
if self.attributes:
for name,value in self.attributes.iteritems():
result += u' %s="%s"' % (name, escape(value,{'"':'"'}))
if self.childNodes:
result += '>'
for child in self.childNodes:
result += child.toxml()
result += u'</%s>' % self.name
else:
result += u'/>'
return result
def hilite(self):
result = '<<code class="markup element-name">%s</code>' % self.name
if self.attributes:
for name, value in self.attributes.iteritems():
result += ' <code class="markup attribute-name">%s</code>=<code class="markup attribute-value">"%s"</code>' % (name, escape(value, {'"':'"'}))
if self.childNodes:
result += ">"
for child in self.childNodes:
result += child.hilite()
elif self.name in voidElements:
return result + ">"
return result + '</<code class="markup element-name">%s</code>>' % self.name
def printTree(self, indent):
tree = '\n|%s%s' % (' '*indent, unicode(self))
indent += 2
if self.attributes:
for name, value in self.attributes.iteritems():
if isinstance(name, tuple):
name = "%s %s"%(name[0], name[1])
tree += '\n|%s%s="%s"' % (' ' * indent, name, value)
for child in self.childNodes:
tree += child.printTree(indent)
return tree
def cloneNode(self):
newNode = Element(self.name)
if hasattr(self, 'namespace'):
newNode.namespace = self.namespace
for attr, value in self.attributes.iteritems():
newNode.attributes[attr] = value
return newNode
class CommentNode(Node):
type = 6
def __init__(self, data):
Node.__init__(self, None)
self.data = data
def __unicode__(self):
return "<!-- %s -->" % self.data
def toxml(self):
return "<!--%s-->" % self.data
def hilite(self):
return '<code class="markup comment"><!--%s--></code>' % escape(self.data)
def cloneNode(self):
return CommentNode(self.data)
class TreeBuilder(_base.TreeBuilder):
documentClass = Document
doctypeClass = DocumentType
elementClass = Element
commentClass = CommentNode
fragmentClass = DocumentFragment
def testSerializer(self, node):
return node.printTree()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/treebuilders/simpletree.py
|
simpletree.py
|
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - A generic builder for DOM implementations, defaulting to
a xml.dom.minidom based implementation for the sake of
backwards compatibility (as releases up until 0.10 had a
builder called "dom" that was a minidom implemenation).
"etree" - A generic builder for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"beautifulsoup" - Beautiful soup (if installed)
implementation - (Currently applies to the "etree" and "dom" tree types). A
module implementing the tree type e.g.
xml.etree.ElementTree or lxml.etree."""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
import dom
# XXX: Keep backwards compatibility by using minidom if no implementation is given
if implementation == None:
from xml.dom import minidom
implementation = minidom
# XXX: NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "simpletree":
import simpletree
treeBuilderCache[treeType] = simpletree.TreeBuilder
elif treeType == "beautifulsoup":
import soup
treeBuilderCache[treeType] = soup.TreeBuilder
elif treeType == "lxml":
import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
# Come up with a sane default
if implementation == None:
try:
import xml.etree.cElementTree as ET
except ImportError:
try:
import xml.etree.ElementTree as ET
except ImportError:
try:
import cElementTree as ET
except ImportError:
import elementtree.ElementTree as ET
implementation = ET
import etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """%treeType)
return treeBuilderCache.get(treeType)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/thirdparty/html5lib/treebuilders/__init__.py
|
__init__.py
|
Expat, Release 2.0.0
This is Expat, a C library for parsing XML, written by James Clark.
Expat is a stream-oriented XML parser. This means that you register
handlers with the parser before starting the parse. These handlers
are called when the parser discovers the associated structures in the
document being parsed. A start tag is an example of the kind of
structures for which you may register handlers.
Windows users should use the expat_win32bin package, which includes
both precompiled libraries and executables, and source code for
developers.
Expat is free software. You may copy, distribute, and modify it under
the terms of the License contained in the file COPYING distributed
with this package. This license is the same as the MIT/X Consortium
license.
Versions of Expat that have an odd minor version (the middle number in
the release above), are development releases and should be considered
as beta software. Releases with even minor version numbers are
intended to be production grade software.
If you are building Expat from a check-out from the CVS repository,
you need to run a script that generates the configure script using the
GNU autoconf and libtool tools. To do this, you need to have
autoconf 2.52 or newer and libtool 1.4 or newer. Run the script like
this:
./buildconf.sh
Once this has been done, follow the same instructions as for building
from a source distribution.
To build Expat from a source distribution, you first run the
configuration shell script in the top level distribution directory:
./configure
There are many options which you may provide to configure (which you
can discover by running configure with the --help option). But the
one of most interest is the one that sets the installation directory.
By default, the configure script will set things up to install
libexpat into /usr/local/lib, expat.h into /usr/local/include, and
xmlwf into /usr/local/bin. If, for example, you'd prefer to install
into /home/me/mystuff/lib, /home/me/mystuff/include, and
/home/me/mystuff/bin, you can tell configure about that with:
./configure --prefix=/home/me/mystuff
Another interesting option is to enable 64-bit integer support for
line and column numbers and the over-all byte index:
./configure CPPFLAGS=-DXML_LARGE_SIZE
After running the configure script, the "make" command will build
things and "make install" will install things into their proper
location. Have a look at the "Makefile" to learn about additional
"make" options. Note that you need to have write permission into
the directories into which things will be installed.
If you are interested in building Expat to provide document
information in UTF-16 rather than the default UTF-8, follow these
instructions:
1. For UTF-16 output as unsigned short (and version/error
strings as char), run:
./configure CPPFLAGS=-DXML_UNICODE
For UTF-16 output as wchar_t (incl. version/error strings),
run:
./configure CFLAGS="-g -O2 -fshort-wchar" \
CPPFLAGS=-DXML_UNICODE_WCHAR_T
2. Edit the MakeFile, changing:
LIBRARY = libexpat.la
to:
LIBRARY = libexpatw.la
(Note the additional "w" in the library name.)
3. Run "make buildlib" (which builds the library only).
4. Run "make installlib" (which installs the library only).
Note for Solaris users: The "ar" command is usually located in
"/usr/ccs/bin", which is not in the default PATH. You will need to
add this to your path for the "make" command, and probably also switch
to GNU make (the "make" found in /usr/ccs/bin does not seem to work
properly -- appearantly it does not understand .PHONY directives). If
you're using ksh or bash, use this command to build:
PATH=/usr/ccs/bin:$PATH make
When using Expat with a project using autoconf for configuration, you
can use the probing macro in conftools/expat.m4 to determine how to
include Expat. See the comments at the top of that file for more
information.
A reference manual is available in the file doc/reference.html in this
distribution.
The homepage for this project is http://www.libexpat.org/. There
are links there to connect you to the bug reports page. If you need
to report a bug when you don't have access to a browser, you may also
send a bug report by email to [email protected].
Discussion related to the direction of future expat development takes
place on [email protected]. Archives of this list and
other Expat-related lists may be found at:
http://mail.libexpat.org/mailman/listinfo/
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/src/expat/README
|
README
|
cDomlette - technical details
=============================
Overview
--------
cDomlette is a DOM-like interface for Python implemented in C.
Features
--------
* DOM-based node creation and manipulation API
* Expat parser for creating cDomlette documents from XML text
- XInclude processing (http://www.w3.org/TR/xinclude)
* Support for xml:base (http://www.w3.org/TR/xmlbase)
* string pooling (using intern semantics) for efficiency
The state machine
-----------------
The Expat parser uses an explicit state machine in its operation. The
core of the state machine is in state_machine.c and state_machine.h.
Character encoding notes
------------------------
Domlette registers an unknown-encoding handler with Expat in order to
process documents that are in encodings other than those that Expat
supports natively (UTF-16, UTF-16LE, UTF-16BE, UTF-8, ISO-8859-1, and
US-ASCII). It uses Python's codecs.lookup(). Unfortunately, not all
encodings supported by Python are supported by Expat. In particular,
the encodings must meet the following criteria:
- The encoding must not be stateful (i.e., each unique byte sequence
must represent the same character every time).
- Every ASCII character that can appear in a well-formed XML document,
other than the characters $@\^`{}~ must be represented by a single
byte, and that byte must be the same byte that represents that
character in ASCII.
- No character may require more than 4 bytes to encode.
- All characters encoded must have Unicode scalar values <= 0xFFFF,
(i.e., characters that would be encoded by surrogates in
UTF-16 are not allowed). Note that this restriction doesn't
apply to the built-in support for UTF-8 and UTF-16.
- No Unicode character may be encoded by more than one distinct
sequence of bytes.
NOTE: this is subject to change after Expat 2.0 is released.
Other parsers that we considered using have constraints of their own.
libxml doesn't support stateful encodings unless iconv is used (which
is an issue for Windows). libxml and Xerces-C both expect the codec
to differentiate between incomplete sequence and invalid sequence
(Python doesn't).
In order to support multi-byte encodings, the most complete solution
would be to use an additional codecs package which has an API that
more closely matches what XML parsers are looking for. To that end,
there are two good options (probably more but these are well
supported): IBM's ICU and GNU's iconv/libiconv. As we would be
providing all the encodings which would be available to the XML
parser, ICU is more appealing, as its license is an "X License",
whereas libiconv is LGPL. ICU considers our target platforms to be
"reference" platforms, which is something that cannot be said of
libiconv. No decisions along these lines have been made yet.
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/src/domlette/README
|
README
|
from amara.lib.xmlstring import splitqname
from amara.xpath import datatypes
from amara.xupdate import XUpdateError, xupdate_primitive
__all__ = [
'element_instruction', 'attribute_instruction', 'text_instruction',
'processing_instruction_instruction', 'comment_instruction',
'value_of_instruction', 'literal_element'
]
class xupdate_instruction(xupdate_primitive):
pass
class element_instruction(xupdate_instruction):
__slots__ = ('namespaces', 'name', 'namespace')
def __init__(self, namespaces, name, namespace=None):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `name` attribute
self.name = name
# optional `namespace` attribute
self.namespace = namespace
return
def __repr__(self):
return '<element name=%s, namespace=%s, children=%s>' % (
self.name, self.namespace, xupdate_instruction.__repr__(self))
def instantiate(self, context):
context.namespaces = self.namespaces
name = self.name.evaluate_as_string(context)
if self.namespace:
namespace = self.namespace.evaluate_as_string(context)
else:
prefix, local = splitqname(name)
try:
namespace = self.namespaces[prefix]
except KeyError:
if not prefix:
prefix = '#default'
raise XUpdateError(XUpdateError.UNDEFINED_PREFIX,
prefix=prefix)
context.start_element(name, namespace)
for child in self:
child.instantiate(context)
context.end_element(name, namespace)
return
class attribute_instruction(xupdate_instruction):
__slots__ = ('namespaces', 'name', 'namespace')
def __init__(self, namespaces, name, namespace=None):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `name` attribute
self.name = name
# optional `namespace` attribute
self.namespace = namespace
return
def __repr__(self):
return '<attribute name=%s, namespace=%s, children=%s>' % (
self.name, self.namespace, xupdate_instruction.__repr__(self))
def instantiate(self, context):
context.namespaces = self.namespaces
name = self.name.evaluate_as_string(context)
if self.namespace:
namespace = self.namespace.evaluate_as_string(context)
else:
prefix, local = splitqname(name)
if prefix:
try:
namespace = self.namespaces[prefix]
except KeyError:
raise XUpdateError(XUpdateError.UNDEFINED_PREFIX,
prefix=prefix)
else:
namespace = None
context.push_string_writer(errors=False)
for child in self:
child.instantiate(context)
writer = context.pop_writer()
context.attribute(name, writer.get_result(), namespace)
return
class text_instruction(xupdate_instruction):
def __repr__(self):
return '<text children=%s>' % xupdate_instruction.__repr__(self)
def instantiate(self, context):
context.text(self[0])
class processing_instruction_instruction(xupdate_instruction):
__slots__ = ('namespaces', 'name',)
def __init__(self, namespaces, name):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `name` attribute
self.name = name
return
def __repr__(self):
return '<processing-instruction name=%s, children=%s>' % (
self.name, xupdate_instruction.__repr__(self))
def instantiate(self, context):
context.namespaces = self.namespaces
name = self.name.evaluate_as_string(context)
context.push_string_writer(errors=False)
for child in self:
child.instantiate(context)
writer = context.pop_writer()
context.processing_instruction(name, writer.get_result())
return
class comment_instruction(xupdate_instruction):
def __repr__(self):
return '<comment children=%s>' % xupdate_instruction.__repr__(self)
def instantiate(self, context):
context.push_string_writer(errors=False)
for child in self:
child.instantiate(context)
writer = context.pop_writer()
context.comment(writer.get_result())
return
class value_of_instruction(xupdate_instruction):
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
return
def __repr__(self):
return '<value-of select=%s>' % (self.select,)
def instantiate(self, context):
context.namespaces = self.namespaces
result = self.select.evaluate(context)
if isinstance(result, datatypes.nodeset):
for node in result:
context.copy_node(node)
else:
context.text(datatypes.string(result))
class literal_element(xupdate_instruction):
__slots__ = ('name', 'namespace', 'attributes')
def __init__(self, name, namespace, attributes):
self.name = name
self.namespace = namespace
self.attributes = attributes
def __repr__(self):
return '<literal name=%s, namespace=%s, attributes=%s, children=%s>' % (
self.name, self.namespace, self.attributes,
xupdate_instruction.__repr__(self))
def instantiate(self, context):
context.start_element(self.name, self.namespace)
for namespace, name, value in self.attributes:
context.attribute(name, value, namespace)
for child in self:
child.instantiate(context)
context.end_element(self.name, self.namespace)
return
class literal_text(unicode):
def instantiate(self, context):
context.text(self)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xupdate/instructions.py
|
instructions.py
|
from amara import tree
from amara.lib.xmlstring import splitqname
from amara.xpath import context
from amara.xupdate import XUpdateError, xupdate_primitive
__all__ = [
'insert_before_command', 'insert_after_command', 'append_command',
'update_command', 'rename_command', 'remove_command',
'variable_command', 'if_command'
]
# XQuery Update to XUpdate mapping
# upd:insertBefore xupdate:insert-before
# upd:insertAfter xupdate:insert-after
# upd:insertInto xupdate:append
# upd:insertIntoAsFirst xupdate:append (@child=1)
# upd:insertIntoAsLast xupdate:append
# upd:insertAttributes xupdate:append (content=xupdate:attribute)
# upd:delete xupdate:remove
# upd:replaceNode <pending>
# upd:replaceValue xupdate:update (@select=[attr,text,comment,pi])
# upd:replaceElementContent xupdate:update (@select=[element])
# upd:rename xupdate:rename
class modifications_list(xupdate_primitive):
def apply_updates(self, document):
ctx = context(document)
for command in self:
command.instantiate(ctx)
return document
def __repr__(self):
return '<modifications: %s>' % list.__repr__(self)
class xupdate_command(xupdate_primitive):
pass
class append_command(xupdate_command):
pipeline = 1
__slots__ = ('namespaces', 'select', 'child')
def __init__(self, namespaces, select, child=None):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
# optional `child` attribute
self.child = child
return
def __repr__(self):
return '<append: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
context.namespaces = self.namespaces
targets = self.select.evaluate_as_nodeset(context)
if not targets:
raise XUpdateError(XUpdateError.INVALID_SELECT)
for target in targets:
context.push_tree_writer(target.xml_base)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
tr = writer.get_result()
if self.child:
focus = context.node, context.position, context.size
try:
context.node = target
context.position = 1
size = context.size = len(target.xml_children)
position = int(self.child.evaluate_as_number(context))
finally:
context.node, context.position, context.size = focus
if position < size:
refnode = target.xml_children[position]
else:
refnode = None
else:
refnode = None
while tr.xml_first_child:
if refnode is not None:
offset = target.xml_index(refnode)
target.xml_insert(offset, tr.xml_first_child)
else:
target.xml_append(tr.xml_first_child)
return
class rename_command(xupdate_command):
pipeline = 1
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
def __repr__(self):
return '<rename: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
context.push_string_writer(errors=False)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
name = writer.get_result()
prefix, local = splitqname(name)
if prefix:
namespace = self.namespaces[prefix]
else:
namespace = None
context.namespaces = self.namespaces
targets = self.select.evaluate_as_nodeset(context)
if not targets:
raise XUpdateError(XUpdateError.INVALID_SELECT)
for target in targets:
parent = target.xml_parent
if target.xml_type == tree.attribute.xml_type:
parent.xml_attributes[namespace, name] = target.xml_value
elif target.xml_type == tree.processing_instruction.xml_type:
pi = tree.processing_instruction(name, target.xml_data)
parent.xml_replace-child(pi, target)
elif target.xml_type == tree.element.xml_type:
#FIXME: Use regular constructor. No more DOM factory
element = tree.element(namespace, name)
# Copy any existing attributes to the newly created element
if target.xml_attributes:
for (ns, qname), value in target.xml_attributes.iteritems():
element.xml_attributes[ns, qname] = value
# Now copy any children as well
while target.xml_first_child:
element.xml_append(target.xml_first_child)
parent.xml_replace(target, element)
return
class insert_before_command(xupdate_command):
pipeline = 2
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
return
def __repr__(self):
return '<insert-before: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
context.namespaces = self.namespaces
for target in self.select.evaluate_as_nodeset(context):
context.push_tree_writer(target.xml_base)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
tr = writer.get_result()
parent = target.xml_parent
while tr.xml_first_child:
offset = parent.xml_index(target)
parent.xml_insert(offset, tr.xml_first_child)
return
class insert_after_command(xupdate_command):
pipeline = 2
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
return
def __repr__(self):
return '<insert-after: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
context.namespaces = self.namespaces
for target in self.select.evaluate_as_nodeset(context):
context.push_tree_writer(target.xml_base)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
tree = writer.get_result()
parent = target.xml_parent
target = target.xml_following_sibling
while tree.xml_first_child:
if target is not None:
offset = parent.xml_index(target)
parent.xml_insert(offset, tree.xml_first_child)
else:
parent.xml_append(tree.xml_first_child)
return
class update_command(xupdate_command):
pipeline = 3
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
return
def __repr__(self):
return '<update: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
context.namespaces = self.namespaces
targets = self.select.evaluate_as_nodeset(context)
if not targets:
raise XUpdateError(XUpdateError.INVALID_SELECT)
for target in targets:
if target.xml_type == tree.element.xml_type:
context.push_tree_writer(target.xml_base)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
tr = writer.get_result()
while target.xml_first_child:
target.xml_remove(target.xml_first_child)
while tr.xml_first_child:
target.xml_append(tr.xml_first_child)
elif target.xml_type in (tree.attribute.xml_type,
tree.text.xml_type,
tree.comment.xml_type,
tree.processing_instruction.xml_type):
context.push_string_writer(errors=False)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
value = writer.get_result()
if not value and target.xml_type == tree.text.xml_type:
target.xml_parent.xml_remove(target)
else:
target.xml_value = value
return
class remove_command(xupdate_command):
pipeline = 4
__slots__ = ('namespaces', 'select',)
def __init__(self, namespaces, select):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# required `select` attribute
self.select = select
return
def __repr__(self):
return '<remove select=%s>' % (self.select,)
def instantiate(self, context):
context.namespaces = self.namespaces
for target in self.select.evaluate_as_nodeset(context):
parent = target.xml_parent
if parent:
if target.xml_type == tree.attribute.xml_type:
del parent.xml_attributes[target]
else:
parent.xml_remove(target)
return
class variable_command(xupdate_command):
__slots__ = ('namespaces', 'name', 'select')
def __init__(self, namespaces, name, select=None):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# require `name` attribute
self.name = name
# optional `select` attribute
self.select = select
return
def __repr__(self):
return '<variable: %s>' % xupdate_command.__repr__(self)
def instantiate(self, context):
if self.select:
context.namespaces = self.namespaces
value = self.select.evaluate(context)
else:
context.push_string_writer(errors=False)
for primitive in self:
primitive.instantiate(context)
writer = context.pop_writer()
value = writer.get_result()
context.variables[self.name] = value
return
class if_command(xupdate_command):
__slots__ = ('namespaces', 'test',)
def __init__(self, namespaces, test):
# save in-scope namespaces for XPath
self.namespaces = namespaces
# require `test` attribute
self.test = test
return
def __repr__(self):
return '<if: %s>' % xupdate_command.__repr__(self)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xupdate/commands.py
|
commands.py
|
import operator
from amara import XML_NAMESPACE
from amara._expat import ContentModel, Handler, Reader
from amara.lib.xmlstring import splitqname, isqname
from amara.xpath import XPathError
from amara.xpath.parser import parse as parse_expression
from amara.xupdate import XUpdateError, XUPDATE_NAMESPACE
from amara.xupdate import commands, instructions
from amara.xupdate.expressions import *
# -- validation models -------------------------------------------------
def qname_type(namespace, name):
prefix, local = splitqname(name)
return ContentModel(ContentModel.TYPE_NAME, (namespace, local), label=name)
_end_event = ContentModel.FINAL_EVENT
_document_content = qname_type(XUPDATE_NAMESPACE, 'xupdate:modifications')
_document_model = _document_content.compile()
_empty_event = '/empty/'
_empty_content = ContentModel(ContentModel.TYPE_NAME, _empty_event,
ContentModel.QUANT_REP, label='/empty/')
_empty_model = _empty_content.compile()
_text_event = '#PCDATA'
_text_content = ContentModel(ContentModel.TYPE_NAME, _text_event,
ContentModel.QUANT_REP, label='#PCDATA')
_text_model = _text_content.compile()
_literal_event = (None, None)
_literal_content = ContentModel(ContentModel.TYPE_NAME, _literal_event,
ContentModel.QUANT_REP,
label='/literal-elements/')
_char_template_content = (
_text_content,
#qname_type(XUPDATE_NAMESPACE, 'xupdate:if'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:text'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:value-of'),
)
_char_template_content = ContentModel(ContentModel.TYPE_ALT,
_char_template_content,
ContentModel.QUANT_REP,
label='/char-template/')
_char_template_model = _char_template_content.compile()
_template_content = (
_char_template_content,
qname_type(XUPDATE_NAMESPACE, 'xupdate:element'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:attribute'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:processing-instruction'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:comment'),
_literal_content,
)
_template_content = ContentModel(ContentModel.TYPE_ALT, _template_content,
ContentModel.QUANT_REP, label='/template/')
_template_model = _template_content.compile()
_toplevel_content = (
qname_type(XUPDATE_NAMESPACE, 'xupdate:insert-before'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:insert-after'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:append'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:update'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:remove'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:rename'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:variable'),
qname_type(XUPDATE_NAMESPACE, 'xupdate:if'),
_literal_content)
_toplevel_content = ContentModel(ContentModel.TYPE_ALT, _toplevel_content,
ContentModel.QUANT_REP,
label='/top-level-elements/')
_toplevel_model = _toplevel_content.compile()
# -- element types -----------------------------------------------------
_elements = {}
def autodispatch(element, model):
"""Decorator for adding content-model information"""
def decorator(func):
_elements[element] = (func, model)
return func
return decorator
@autodispatch('modifications', _toplevel_model)
def modifications_element(tagname, namespaces, attributes):
try:
version = attributes[None, 'version']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='version')
else:
if version != '1.0':
raise XUpdateError(XUpdateError.UNSUPPORTED_VERSION,
version=version)
return commands.modifications_list()
@autodispatch('insert-before', _template_model)
def insert_before_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return commands.insert_before_command(namespaces, select)
@autodispatch('insert-after', _template_model)
def insert_after_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return commands.insert_after_command(namespaces, select)
@autodispatch('append', _template_model)
def append_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
# optional `child` attribute
if (None, 'child') in attributes:
child = attributes[None, 'child']
try:
child = parse_expression(child)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=child, text=str(error))
else:
child = None
return commands.append_command(namespaces, select, child)
@autodispatch('update', _char_template_model)
def update_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return commands.update_command(namespaces, select)
@autodispatch('remove', _empty_model)
def remove_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return commands.remove_command(namespaces, select)
@autodispatch('rename', _char_template_model)
def rename_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return commands.rename_command(namespaces, select)
@autodispatch('variable', _template_model)
def variable_element(tagname, namespaces, attributes):
# required `name` attribute
try:
name = attributes[None, 'name']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='name')
else:
if not isqname(name):
raise XUpdateError(XUpdateError.INVALID_QNAME_ATTR,
attribute='name', value=name)
prefix, name = splitqname(name)
if prefix:
try:
namespace = namespaces[prefix]
except KeyError:
raise XUpdateError(XUpdateError.UNDEFINED_PREFIX,
prefix=prefix)
else:
namespace = None
name = (namespace, name)
# optional `select` attribute
if (None, 'select') in attributes:
select = attributes[None, 'select']
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
else:
select = None
return commands.variable_command(namespaces, name, select)
@autodispatch('if', _toplevel_model)
def if_element(tagname, namespaces, attributes):
# required `test` attribute
try:
test = attributes[None, 'test']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='test')
else:
try:
test = parse_expression(test)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=test, text=str(error))
return commands.if_command(namespaces, test)
@autodispatch('element', _template_model)
def element_element(tagname, namespaces, attributes):
# required `name` attribute
try:
name = attributes[None, 'name']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='name')
else:
name = qname_avt('name', name)
# optional `namespace` attribute
if (None, 'namespace') in attributes:
namespace = namespace_avt(attributes[None, 'namespace'])
else:
namespace = None
return instructions.element_instruction(namespaces, name, namespace)
@autodispatch('attribute', _char_template_model)
def attribute_element(tagname, namespaces, attributes):
# required `name` attribute
try:
name = attributes[None, 'name']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='name')
else:
name = qname_avt('name', name)
# optional `namespace` attribute
if (None, 'namespace') in attributes:
namespace = namespace_avt(attributes[None, 'namespace'])
else:
namespace = None
return instructions.attribute_instruction(namespaces, name, namespace)
@autodispatch('text', _text_model)
def text_element(tagname, namespaces, attributes):
return instructions.text_instruction()
@autodispatch('processing-instruction', _char_template_model)
def processing_instruction_element(tagname, namespaces, attributes):
# required `name` attribute
try:
name = attributes[None, 'name']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='name')
else:
name = ncname_avt('name', name)
return instructions.processing_instruction_instruction(namespaces, name)
@autodispatch('comment', _char_template_model)
def comment_element(tagname, namespaces, attributes):
return instructions.comment_instruction()
@autodispatch('value-of', _empty_model)
def value_of_element(tagname, namespaces, attributes):
# required `select` attribute
try:
select = attributes[None, 'select']
except KeyError:
raise XUpdateError(XUpdateError.MISSING_REQUIRED_ATTRIBUTE,
element=tagname, attribute='select')
else:
try:
select = parse_expression(select)
except XPathError, error:
raise XUpdateError(XUpdateError.SYNTAX_ERROR,
expression=select, text=str(error))
return instructions.value_of_instruction(namespaces, select)
class handler_state(object):
__slots__ = ('item', 'name', 'namespaces', 'validation')
def __init__(self, item, name, namespaces, validation):
self.item = item
self.name = name
self.namespaces = namespaces
self.validation = validation
class xupdate_handler(Handler):
def __init__(self):
self._modifications = []
self._dispatch = _elements
self._state_stack = [
handler_state(self._modifications, '#document',
{'xml': XML_NAMESPACE, None: None}, _document_model)
]
self._push_state = self._state_stack.append
modifications = property(operator.attrgetter('_modifications'))
def start_element(self, expandedName, tagName, namespaces, attributes):
parent_state = self._state_stack[-1]
# update in-scope namespaces (copy-on-demand)
if namespaces:
inscope_namespaces = parent_state.namespaces.copy()
inscope_namespaces.update(namespaces)
else:
inscope_namespaces = parent_state.namespaces
# get the class defining this element
namespace, local = expandedName
if namespace == XUPDATE_NAMESPACE:
try:
factory, validation = self._dispatch[local]
except KeyError:
raise XUpdateError(XUpdateError.ILLEGAL_ELEMENT,
element=tagName)
else:
item = factory(tagName, inscope_namespaces, attributes)
validation_event = expandedName
else:
qname = attributes.getQNameByName
attrs = [ (name[0], qname(name), attributes[name])
for name in attributes ]
item = instructions.literal_element(tagName, namespace, attrs)
validation = _template_model
validation_event = _literal_event
# verify that this element can be declared here
try:
next = parent_state.validation[validation_event]
except KeyError:
raise XUpdateError(XUpdateError.ILLEGAL_ELEMENT_CHILD,
element=parent_state.name, child=tagName)
else:
# save the new state for the next event check
parent_state.validation = next
new_state = handler_state(item, tagName, inscope_namespaces, validation)
self._push_state(new_state)
return
def end_element(self, expandedName, tagName):
current_state = self._state_stack[-1]
del self._state_stack[-1]
parent_state = self._state_stack[-1]
item = current_state.item
# verify that the element has all required content
try:
current_state.validation[_end_event]
except KeyError:
raise XUpdateError(XUpdateError.INCOMPLETE_ELEMENT,
element=tagName)
## let the XUpdate primitive perform any add'l setup, if needed
#if item.has_setup:
# item.setup()
# update parent state
parent_state.item.append(item)
return
def characters(self, data):
current_state = self._state_stack[-1]
# verify that the current node can have text content
is_whitespace = (data.strip() == '')
if not is_whitespace:
try:
next = current_state.validation[_text_event]
except KeyError:
raise XUpdateError(XUpdateError.INVALID_TEXT,
element=current_state.name)
else:
current_state.validation = next
current_state.item.append(instructions.literal_text(data))
def parse(source):
handler = xupdate_handler()
reader = Reader(handler)
reader.parse(source)
return handler.modifications[0]
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xupdate/reader.py
|
reader.py
|
from amara import Error
__all__ = ['XUPDATE_NAMESPACE', 'XUpdateError']
XUPDATE_NAMESPACE = u'http://www.xmldb.org/xupdate'
class XUpdateError(Error):
"""
Exception class for errors specific to XUpdate processing
"""
SYNTAX_ERROR = 1
ILLEGAL_ELEMENT = 2
ILLEGAL_ELEMENT_CHILD = 3
MISSING_REQUIRED_ATTRIBUTE = 4
INVALID_TEXT = 5
UNSUPPORTED_VERSION = 10
INVALID_SELECT = 11
UNDEFINED_PREFIX = 12
@classmethod
def _load_messages(cls):
from gettext import gettext as _
return {
XUpdateError.SYNTAX_ERROR: _(
'Syntax error in expression %(expression)r: %(text)s'),
XUpdateError.ILLEGAL_ELEMENT: _(
"Illegal element '%(element)s' in XUpdate namespace"),
XUpdateError.ILLEGAL_ELEMENT_CHILD: _(
"Illegal child '%(child)s' within element '%(element)s'"),
XUpdateError.MISSING_REQUIRED_ATTRIBUTE: _(
"Element '%(element)s' missing required attribute "
"'%(attribute)s'"),
XUpdateError.INVALID_TEXT: _(
"Character data not allowed in the content of element "
"'%(element)s'"),
XUpdateError.UNSUPPORTED_VERSION: _(
"XUpdate version ''%(version)s' unsupported by this "
"implementation"),
XUpdateError.INVALID_SELECT: _(
'select expression "%(expr)s" must evaluate to a non-empty '
'node-set'),
XUpdateError.UNDEFINED_PREFIX: _(
'Undefined namespace prefix %(prefix)r'),
}
class xupdate_primitive(list):
# Note, no need to call `list.__init__` if there are no initial
# items to be added. `list.__new__` takes care of the setup for
# new empty lists.
# `pipeline` indicates in which processing stage a command should be
# executed. The default value of `0` indicates a non-command and thus
# will be ignored.
pipeline = 0
has_setup = False
def setup(self):
pass
def instantiate(self, context):
raise NotImplementedError("subclass '%s' must override" %
self.__class__.__name__)
# -- High-level API ----------------------------------------------------
from amara import tree
from amara.xupdate import reader
def apply_xupdate(source, xupdate):
xupdate = reader.parse(xupdate)
source = tree.parse(source)
return xupdate.apply_updates(source)
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xupdate/__init__.py
|
__init__.py
|
from amara import tree
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE
class _Visitor:
"""
Provides functions to recursively walk a DOM or Domlette object and
generate SAX-like event calls for each node encountered. See the
printer classes (XMLPrinter, HTMLPrinter, etc.) for the event
handlers.
"""
def __init__(self, printer,
ns_hints=None,
added_attributes=None):
"""
Initializes an instance of the class.
ns_hints, if given, is a dictionary of namespace mappings that
help determine if namespace declarations need to be emitted when
visiting the first Element node.
"""
self.printer = printer
# Namespaces
self._namespaces = [{'xml' : XML_NAMESPACE}]
self._ns_hints = ns_hints
self._added_attributes = added_attributes or {}
_dispatch = {}
def visit(self, node):
"""
Starts walking the tree at the given node.
"""
try:
node_type = node.xml_type
except AttributeError:
raise ValueError('Not a valid Amara node %r' % node)
try:
visit = self._dispatch[node_type]
except KeyError:
# unknown node type, try and get a "pretty" name for the error
#FIXME: Not ported for Amara 2
node_types = {}
for name in dir(Node):
if name.endswith('_NODE'):
node_types[getattr(Node, name)] = name
node_type = node_types.get(node.node_type, node.node_type)
raise ValueError('Unknown node type %r' % node_type)
else:
visit(self, node)
def visit_not_implemented(self, node):
"""
Called when an known but unsupported type of node is
encountered, always raising a NotImplementedError exception. The
unsupported node types are those that require DTD subset
support: entity nodes, entity reference nodes, and notation
nodes.
"""
raise NotImplementedError('Printing of %r' % node)
def visit_document(self, node):
"""
Called when an Entity node is encountered (e.g. may or may not be a full XML document entity).
Work on DTDecl details, if any, and then to the children.
"""
self.printer.start_document()
if node.xml_system_id:
for child in node.xml_children:
if child.xml_type == tree.element.xml_type:
self.printer.doctype(child.xml_qname, node.xml_public_id, node.xml_system_id)
break
for child in node.xml_children:
self.visit(child)
self.printer.end_document()
return
_dispatch[tree.entity.xml_type] = visit_document
def visit_element(self, node):
"""
Called when an Element node is encountered. Generates for the
printer a startElement event, events for the node's children
(including attributes), and an endElement event.
"""
##print "visit_element", node.xml_name
current_nss = self._namespaces[-1].copy()
# Gather the namespaces and attributes for writing
namespaces = node.xml_namespaces.copy()
del namespaces[u'xml']
if self._ns_hints:
for prefix, namespaceUri in self._ns_hints.items():
# See if this namespace needs to be emitted
if current_nss.get(prefix, 0) != namespaceUri:
namespaces[prefix or u''] = namespaceUri
self._ns_hints = None
if self._added_attributes:
attributes = self._added_attributes
self._added_attributes = None
else:
attributes = {}
for attr in node.xml_attributes.nodes():
# xmlns="uri" or xmlns:foo="uri"
if attr.xml_namespace == XMLNS_NAMESPACE:
if not attr.xml_prefix:
# xmlns="uri"
prefix = None
else:
# xmlns:foo="uri"
prefix = attr.xml_local
if current_nss.get(prefix, 0) != attr.xml_value:
namespaces[prefix] = attr.xml_value
else:
attributes[attr.xml_qname] = attr.xml_value
# The element's namespaceURI/prefix mapping takes precedence
if node.xml_namespace or namespaces.get(None, 0):
if namespaces.get(node.xml_prefix or None, 0) != node.xml_namespace:
namespaces[node.xml_prefix or None] = node.xml_namespace or u""
#The
kill_prefixes = []
for prefix in namespaces:
if prefix in current_nss and current_nss[prefix] == namespaces[prefix]:
kill_prefixes.append(prefix)
for prefix in kill_prefixes:
del namespaces[prefix]
self.printer.start_element(node.xml_namespace, node.xml_qname, namespaces.iteritems(),
attributes.iteritems())
# Update in scope namespaces with those we emitted
current_nss.update(namespaces)
self._namespaces.append(current_nss)
# Write out this node's children
for child in node.xml_children:
self.visit(child)
self.printer.end_element(node.xml_namespace, node.xml_qname)
del self._namespaces[-1]
_dispatch[tree.element.xml_type] = visit_element
def visit_text(self, node):
"""
Called when a Text node is encountered. Generates a text event
for the printer.
"""
self.printer.text(node.xml_value)
_dispatch[tree.text.xml_type] = visit_text
def visit_comment(self, node):
"""
Called when a Comment node is encountered. Generates a comment
event for the printer.
"""
self.printer.comment(node.xml_value)
return
_dispatch[tree.comment.xml_type] = visit_comment
def visit_processing_instruction(self, node):
"""
Called when a ProcessingInstruction node is encountered.
Generates a processingInstruction event for the printer.
"""
self.printer.processing_instruction(node.xml_target, node.xml_data)
return
_dispatch[tree.processing_instruction.xml_type] = visit_processing_instruction
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/node.py
|
node.py
|
import re
from amara.namespaces import XHTML_NAMESPACE
from amara.writers import _xmlstream, htmlentities
from amara.writers._xmlprinters import xmlprinter
__all__ = ('xhtmlprinter', 'xhtmlprettyprinter')
class xhtmlprinter(xmlprinter):
"""
An `xhtmlprinter` instance provides functions for serializing an XML
or XML-like document to a stream, based on SAX-like event calls.
The methods in this subclass of `xmlprinter` attempt to emit a
document conformant to the XHTML 1.0 syntax, with no extra
whitespace added for visual formatting. The degree of correctness
of the output depends on the data supplied in the event calls; no
checks are done for conditions that would result in syntax errors,
such as two attributes with the same name, "--" in a comment, etc.
"""
def _translate_attributes(self, element, attributes):
uri_attrs = self._uri_attributes
for name, value in attributes:
if name in uri_attrs and element in uri_attrs[name]:
# From HTML 4.0 Section B.2.1
# We recommend that user agents adopt the following convention
# for handling non-ASCII characters:
# 1. Represent each character in UTF-8 (see [RFC2279]) as one
# or more bytes.
# 2. Escape these bytes with the URI escaping mechanism
# (i.e., by converting each byte to %HH, where HH is the
# hexadecimal notation of the byte value).
# (Although this recommendation is for HTML user agents
# that encounter HTML with improperly escaped URI refs,
# we implement it in order to comply with XSLT's html
# output method, and because there's no compelling reason
# not to do it for non-XSLT serializations as well)
value = unicode(re.sub('[\x80-\xff]',
lambda match: '%%%02X' % ord(match.group()),
value.encode('UTF-8')))
yield (name, value)
return
def start_element(self, namespace, name, namespaces, attributes):
"""
Handles a start-tag event.
"""
xhtml = (namespace == XHTML_NAMESPACE)
if xhtml:
attributes = self._translate_attributes(name, attributes)
xmlprinter.start_element(self, namespace, name, namespaces,
attributes)
if xhtml and name not in self._empty_elements:
# Check for XHTML tags which should not be in minimized form
# ('<tag/>')
self.write_ascii('>')
self._element_name = None
return
def end_element(self, namespace, name):
"""
Handles an end-tag event.
Differs from the overridden method in that an end tag is not
generated for certain elements.
"""
if (self._element_name and
name in self._empty_elements and
namespace == XHTML_NAMESPACE):
# EMPTY element content, use minimized form (with space before /)
self.write_ascii(' />')
else:
xmlprinter.end_element(self, namespace, name)
return
# Elements for which end tags must not be emitted
_empty_elements = frozenset([
'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param',
])
# URI attributes that can have non-ASCII characters escaped
_uri_attributes = {
'action' : ['form'],
'archive' : ['object'],
'background': ['body'],
'cite' : ['blockquote', 'del', 'ins', 'q'],
'classid' : ['object'],
'codebase' : ['applet', 'object'],
'data' : ['object'],
'datasrc' : ['button', 'div', 'input', 'object', 'select', 'span',
'table', 'textarea'],
'for' : ['script'],
'href' : ['a', 'area', 'base', 'link'],
'longdesc' : ['frame', 'iframe', 'img'],
'name' : ['a'],
'profile' : ['head'],
'src' : ['frame', 'iframe', 'img', 'input', 'script'],
'usemap' : ['img', 'input', 'object'],
}
_text_entities = {'<': '<',
'>': '>',
'&': '&',
'\r': ' ',
}
_text_entities.update(htmlentities.ENTITIES_XHTML_10)
_text_entities = _xmlstream.entitymap(_text_entities)
_attr_entities_quot = {'&' : '&',
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
'"' : '"',
}
_attr_entities_quot.update(htmlentities.ENTITIES_XHTML_10)
_attr_entities_quot = _xmlstream.entitymap(_attr_entities_quot)
_attr_entities_apos = {'&' : '&',
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
"'" : ''', # no ' in HTML
}
_attr_entities_apos.update(htmlentities.ENTITIES_XHTML_10)
_attr_entities_apos = _xmlstream.entitymap(_attr_entities_apos)
class xhtmlprettyprinter(xhtmlprinter):
"""
An `xhtmlprettyprinter` instance provides functions for serializing an
XML or XML-like document to a stream, based on SAX-like event calls.
The methods in this subclass of `xhtmlprinter` attempt to emit a
document conformant to the XHTML 1.0 syntax, with extra whitespace
added for visual formatting. The indent attribute is the string used
for each level of indenting. It defaults to 2 spaces.
"""
# The amount of indent for each level of nesting
indent = ' '
def __init__(self, stream, encoding):
xhtmlprinter.__init__(self, stream, encoding)
self._level = 0
# indenting control variables
self._is_inline = [1] # prevent newline before first element
self._in_no_indent = [0]
self._indent_forbidden = 0
self._indent_end_tag = False
return
def start_element(self, namespace, name, namespaces, attributes):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# Create the lookup key for the various lookup tables
if namespace == XHTML_NAMESPACE:
key = name
else:
key = None
# Get the inline flag for this element
inline = key in self._inline_elements
if (not inline and not self._is_inline[-1]
and not self._indent_forbidden):
self.write_ascii('\n' + (self.indent * self._level))
xhtmlprinter.start_element(self, namespace, name, namespaces,
attributes)
# Setup indenting rules for this element
self._is_inline.append(inline)
self._in_no_indent.append(key in self._no_indent_elements)
self._indent_forbidden += self._in_no_indent[-1]
self._level += 1
self._indent_end_tag = False
return
def end_element(self, namespace, name):
# Undo changes to indenting rules for this element
self._level -= 1
is_inline = self._is_inline[-1]
del self._is_inline[-1]
if self._element_name:
# An empty non-null namespace element (use XML short form)
self.write_ascii('/>')
self._element_name = None
else:
if (not is_inline and not self._indent_forbidden and
self._indent_end_tag):
self.write_ascii('\n' + (self.indent * self._level))
xhtmlprinter.end_element(self, namespace, name)
no_indent = self._in_no_indent[-1]
del self._in_no_indent[-1]
self._indent_forbidden -= no_indent
self._indent_end_tag = not is_inline
return
def processing_instruction(self, target, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# OK to indent end-tag
self._indent_end_tag = True
# try to indent
if not self._is_inline[-1] and not self._indent_forbidden:
self.write_ascii('\n' + (self.indent * self._level))
xhtmlprinter.processing_instruction(self, target, data)
return
def comment(self, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# OK to indent end-tag
self._indent_end_tag = True
# try to indent
if not self._is_inline[-1] and not self._indent_forbidden:
self.write_ascii('\n' + (self.indent * self._level))
xhtmlprinter.comment(self, data)
return
# Elements that should never be emitted on a new line.
_inline_elements = frozenset([
'tt', 'i', 'b', 'u', 's', 'strike', 'big', 'small', 'em', 'strong',
'dfn', 'code', 'samp', 'kbd', 'var', 'cite', 'abbr', 'acronym', 'a',
'img', 'applet', 'object', 'font', 'basefont', 'script', 'map', 'q',
'sub', 'sup', 'span', 'bdo', 'iframe', 'input', 'select', 'textarea',
'label', 'button',
])
# Elements that should never be emitted with additional
# whitespace in their content; i.e., once you're inside
# one, you don't do any more indenting.
_no_indent_elements = frozenset([
'script', 'style', 'pre', 'textarea', 'xmp',
])
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/_xhtmlprinters.py
|
_xhtmlprinters.py
|
import re
from amara.lib.xmlstring import isspace
from amara.writers import _xmlstream, htmlentities
from amara.writers._xmlprinters import xmlprinter
__all__ = ('htmlprinter', 'htmlprettyprinter')
class htmlprinter(xmlprinter):
"""
An `htmlprinter` instance provides functions for serializing an XML
or XML-like document to a stream, based on SAX-like event calls.
The methods in this subclass of `xmlprinter` attempt to emit a
document conformant to the HTML 4.01 syntax, with no extra
whitespace added for visual formatting. The degree of correctness
of the output depends on the data supplied in the event calls; no
checks are done for conditions that would result in syntax errors,
such as two attributes with the same name, "--" in a comment, etc.
"""
_disable_ouput_escaping = 0
_raze_namespaces = False
def __init__(self, stream, encoding):
xmlprinter.__init__(self, stream, encoding)
def start_document(self, version='4.0', standalone=None):
"""
Handles a start-document event.
Differs from the overridden method in that no XML declaration
is written.
"""
# Set the entity maps to the particular version of HTML being output.
# If the version isn't one we know how to handle, fallback to 4.0.
try:
entities = self._versioned_entities[version]
except KeyError:
entities = self._versioned_entities['4.0']
(self._text_entities,
self._attr_entities_quot,
self._attr_entities_apos) = entities
return
def doctype(self, name, publicid, systemid):
"""
Handles a doctype event.
Extends the overridden method by adding support for the case
when there is a publicid and no systemid, which is allowed in
HTML but not in XML.
"""
if publicid and not systemid:
self.write_ascii('<!DOCTYPE ')
self.write_encode(name, 'document type name')
self.write_ascii(' PUBLIC "')
self.write_encode(publicid, 'document type public-id')
self.write_ascii('">\n')
else:
xmlprinter.doctype(self, name, publicid, systemid)
return
def _translate_attributes(self, element, attributes):
bool_attrs, uri_attrs = self._boolean_attributes, self._uri_attributes
for name, value in attributes:
attr = name.lower()
if attr in bool_attrs and element in bool_attrs[attr]:
if attr == value.lower():
# A boolean attribute, just write out the name
value = None
elif attr in uri_attrs and element in uri_attrs[attr]:
# From HTML 4.0 Section B.2.1
# We recommend that user agents adopt the following convention
# for handling non-ASCII characters:
# 1. Represent each character in UTF-8 (see [RFC2279]) as one
# or more bytes.
# 2. Escape these bytes with the URI escaping mechanism
# (i.e., by converting each byte to %HH, where HH is the
# hexadecimal notation of the byte value).
# (Although this recommendation is for HTML user agents
# that encounter HTML with improperly escaped URI refs,
# we implement it in order to comply with XSLT's html
# output method, and because there's no compelling reason
# not to do it for non-XSLT serializations as well)
value = unicode(re.sub('[\x80-\xff]',
lambda match: '%%%02X' % ord(match.group()),
value.encode('UTF-8')))
yield (name, value)
return
def start_element(self, namespace, name, namespaces, attributes):
"""
Handles a start-tag event.
Extends the overridden method by disabling output escaping for
the content of certain elements (SCRIPT and STYLE).
"""
if namespace is not None:
if self._raze_namespaces:
namespace, namespaces = None, {}
xmlprinter.start_element(self, namespace, name, namespaces,
attributes)
return
element = name.lower()
if element in self._no_escape_elements:
self._disable_ouput_escaping += 1
# Translate attribute values as required
if namespace is None:
attributes = self._translate_attributes(element, attributes)
xmlprinter.start_element(self, namespace, name, namespaces,
attributes)
# HTML tags are never in minimized form ('<tag/>')
self.write_ascii('>')
self._element_name = None
return
def end_element(self, namespace, name):
"""
Handles an end-tag event.
Differs from the overridden method in that an end tag is not
generated for certain elements.
"""
if namespace is not None:
if self._raze_namespaces:
namespace, namespaces = None, {}
xmlprinter.end_element(self, namespace, name)
return
element = name.lower()
if element not in self._forbidden_end_elements:
self.write_ascii('</')
self.write_encode(name, 'element name')
self.write_ascii('>')
# Restore normal escaping if closing a no-escape element.
if element in self._no_escape_elements:
self._disable_ouput_escaping -= 1
return
def processing_instruction(self, target, data):
"""
Handles a processingInstruction event.
Differs from the overridden method by writing the tag with
no "?" at the end.
"""
if self._element_name:
self.write_ascii('>')
self._element_name = None
self.write_ascii('<?')
self.write_encode(target, 'processing-instruction target')
if data:
self.write_ascii(' ')
self.write_encode(data, 'processing-instruction data')
self.write_ascii('>')
return
# Elements for which end tags must not be emitted
_forbidden_end_elements = frozenset([
'area', 'base', 'basefont', 'br', 'col', 'frame', 'hr',
'img', 'input', 'isindex', 'link', 'meta', 'param',
])
# Elements in which character data is not escaped
#
# FIXME: According to HTML 4.01 section B.3.2, "</" and unencodable
# characters within the content of a SCRIPT or STYLE slement must
# be escaped according to the conventions of the script or style
# language in use.
_no_escape_elements = frozenset(['script', 'style'])
# Boolean attributes that can be minimized
_boolean_attributes = {
'checked' : ['input'],
'compact' : ['dl', 'ol', 'ul', 'dir', 'menu', 'li'],
'declare' : ['object'],
'defer' : ['script'],
'disabled' : ['input', 'select', 'optgroup', 'option', 'textarea',
'button'],
'ismap' : ['img', 'input'],
'multiple' : ['select'],
'nohref' : ['area'],
'noresize' : ['frame'],
'noshade' : ['hr'],
'nowrap' : ['th', 'td'],
'readonly' : ['input', 'textarea'],
'selected' : ['option'],
}
# URI attributes that can have non-ASCII characters escaped
_uri_attributes = {
'action' : ['form'],
'background' : ['body'],
'cite' : ['blockquote', 'del', 'ins', 'q'],
'classid' : ['object'],
'codebase' : ['applet', 'object'],
'data' : ['object'],
'href' : ['a', 'area', 'base', 'link'],
'longdesc' : ['frame', 'iframe', 'img'],
'profile' : ['head'],
'src' : ['frame', 'iframe', 'img', 'input', 'script'],
'usemap' : ['img', 'input', 'object'],
}
_versioned_entities = {
'3.2' : [],
'4.0' : [],
}
_text_entities = {'<': '<',
'>': '>',
'&': '&',
'\r': ' ',
}
_text_entities.update(htmlentities.ENTITIES_HTML_32)
_versioned_entities['3.2'].append(_xmlstream.entitymap(_text_entities))
_text_entities.update(htmlentities.ENTITIES_HTML_40)
# The default entities are those for HTML 4.01
_text_entities = _xmlstream.entitymap(_text_entities)
_versioned_entities['4.0'].append(_text_entities)
# For HTML attribute values:
# 1. do not escape '<' (see XSLT 1.0 section 16.2)
# 2. only escape '&' if not followed by '{'
def _attr_amp_escape(string, offset):
if string.startswith('&{', offset):
return '&'
else:
return '&'
_attr_entities_quot = {'&' : _attr_amp_escape,
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
'"' : '"',
}
_attr_entities_quot.update(htmlentities.ENTITIES_HTML_32)
_versioned_entities['3.2'].append(_xmlstream.entitymap(_attr_entities_quot))
_attr_entities_quot.update(htmlentities.ENTITIES_HTML_40)
# The default entities are those for HTML 4.01
_attr_entities_quot = _xmlstream.entitymap(_attr_entities_quot)
_versioned_entities['4.0'].append(_attr_entities_quot)
_attr_entities_apos = {'&' : _attr_amp_escape,
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
"'" : ''', # no ' in HTML
}
_attr_entities_apos.update(htmlentities.ENTITIES_HTML_32)
_versioned_entities['3.2'].append(_xmlstream.entitymap(_attr_entities_apos))
_attr_entities_apos.update(htmlentities.ENTITIES_HTML_40)
# The default entities are those for HTML 4.01
_attr_entities_apos = _xmlstream.entitymap(_attr_entities_apos)
_versioned_entities['4.0'].append(_attr_entities_apos)
del _attr_amp_escape
class htmlprettyprinter(htmlprinter):
"""
An `htmlprettyprinter` instance provides functions for serializing an
XML or XML-like document to a stream, based on SAX-like event calls.
The methods in this subclass of `htmlprinter` attempt to emit a
document conformant to the HTML 4.01 syntax, with extra whitespace
added for visual formatting. The indent attribute is the string used
for each level of indenting. It defaults to 2 spaces.
"""
# The amount of indent for each level of nesting
indent = ' '
def __init__(self, stream, encoding):
htmlprinter.__init__(self, stream, encoding)
self._level = 0
# indenting control variables
self._is_inline = [1] # prevent newline before first element
self._in_no_indent = [0]
self._indent_forbidden = 0
self._indent_end_tag = False
return
def start_element(self, namespace, name, namespaces, attributes):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# Create the lookup key for the various lookup tables
if namespace is None:
key = name.lower()
else:
key = '#bogus'
# Get the inline flag for this element
inline = key in self._inline_elements
if (not inline and not self._is_inline[-1]
and not self._indent_forbidden):
self.write_ascii('\n' + (self.indent * self._level))
htmlprinter.start_element(self, namespace, name, namespaces,
attributes)
# Setup indenting rules for this element
self._is_inline.append(inline)
self._in_no_indent.append(key in self._no_indent_elements)
self._indent_forbidden += self._in_no_indent[-1]
self._level += 1
self._indent_end_tag = False
return
def end_element(self, namespace, name):
# Undo changes to indenting rules for this element
self._level -= 1
is_inline = self._is_inline[-1]
del self._is_inline[-1]
if self._element_name:
# An empty non-null namespace element (use XML short form)
self.write_ascii('/>')
self._element_name = None
else:
if (not is_inline and not self._indent_forbidden and
self._indent_end_tag):
self.write_ascii('\n' + (self.indent * self._level))
htmlprinter.end_element(self, namespace, name)
no_indent = self._in_no_indent[-1]
del self._in_no_indent[-1]
self._indent_forbidden -= no_indent
self._indent_end_tag = not is_inline
return
def text(self, data, disable_escaping=False):
# OK to indent end-tag if just whitespace is written
self._indent_end_tag = isspace(data)
htmlprinter.text(self, data, disable_escaping)
def processing_instruction(self, target, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# OK to indent end-tag
self._indent_end_tag = True
# try to indent
if not self._is_inline[-1] and not self._indent_forbidden:
self.write_ascii('\n' + (self.indent * self._level))
htmlprinter.processing_instruction(self, target, data)
return
def comment(self, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
# OK to indent end-tag
self._indent_end_tag = True
# try to indent
if not self._is_inline[-1] and not self._indent_forbidden:
self.write_ascii('\n' + (self.indent * self._level))
htmlprinter.comment(self, data)
return
# Elements that should never be emitted on a new line.
_inline_elements = frozenset([
'tt', 'i', 'b', 'u', 's', 'strike', 'big', 'small', 'em', 'strong',
'dfn', 'code', 'samp', 'kbd', 'var', 'cite', 'abbr', 'acronym', 'a',
'img', 'applet', 'object', 'font', 'basefont', 'br', 'script', 'map',
'q', 'sub', 'sup', 'span', 'bdo', 'iframe', 'input', 'select',
'textarea', 'label', 'button',
])
# Elements that should never be emitted with additional
# whitespace in their content; i.e., once you're inside
# one, you don't do any more indenting.
_no_indent_elements = frozenset([
'script', 'style', 'pre', 'textarea', 'xmp',
])
#
class html_ns_stripper(htmlprinter):
"""
Specialization of `htmlprinter` that forcibly suppresses any specified namespaces
"""
_raze_namespaces = True
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/_htmlprinters.py
|
_htmlprinters.py
|
class outputparameters(object):
boolean_attribues = [
'omit_xml_declaration', 'standalone', 'indent', 'byte_order_mark',
'canonical_form',
]
string_attributes = [
'method', 'version', 'encoding', 'doctype_system', 'doctype_public',
'media_type',
]
sequence_attributes = [
'cdata_section_elements',
]
class __metaclass__(type):
def __new__(cls, name, bases, members):
attrs = []
if 'boolean_attribues' in members:
attrs += members['boolean_attribues']
if 'string_attributes' in members:
attrs += members['string_attributes']
if 'sequence_attributes' in members:
attrs += members['sequence_attributes']
members['__slots__'] = tuple(attrs)
return type.__new__(cls, name, bases, members)
def __init__(self, method=None, version=None, encoding=None,
omit_xml_declaration=None, standalone=None,
doctype_public=None, doctype_system=None,
cdata_section_elements=(), indent=None, media_type=None,
byte_order_mark=None, canonical_form=None):
self.method = method
self.version = version
self.encoding = encoding
self.omit_xml_declaration = omit_xml_declaration
self.standalone = standalone
self.doctype_public = doctype_public
self.doctype_system = doctype_system
if cdata_section_elements:
cdata_section_elements = tuple(cdata_section_elements)
self.cdata_section_elements = cdata_section_elements
self.indent = indent
self.media_type = media_type
self.byte_order_mark = byte_order_mark
self.canonical_form = canonical_form
def clone(self):
attrs = {}
for name in self.__slots__:
value = getattr(self, name)
if value is not None:
attrs[name] = value
setattr(clone, name, value)
return self.__class__(**attrs)
def update(self, other):
if not isinstance(other, outputparameters):
raise TypeError(other)
for name in self.__slots__:
value = getattr(other, name)
if value is not None:
if name in self.sequence_attributes:
value += getattr(self, name)
setattr(self, name, value)
return
def setdefault(self, attr, default):
value = getattr(self, attr)
if value is not None:
return value
setattr(self, attr, default)
return default
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/outputparameters.py
|
outputparameters.py
|
import sys
from itertools import *
import amara
from amara.lib.util import coroutine
from amara import tree
from amara.writers import WriterError
from amara import XML_NAMESPACE
from amara.lib.xmlstring import *
import _xmlprinters, _htmlprinters
UNSPECIFIED_NAMESPACE = u":"
__all__ = ['structwriter', 'structencoder', 'E', 'NS', 'ROOT', 'RAW', 'E_CURSOR']
class StructWriterError(WriterError):
ATTRIBUTE_ADDED_TOO_LATE = 1
ATTRIBUTE_ADDED_TO_NON_ELEMENT = 2
def get_printer(stream, encoding='utf-8', ns_hints=None, is_html=False,
indent=False, canonical=False, added_attributes=None,
removed_ns_decls=None):
"""
Initializes an instance of the class, selecting the appropriate
printer to use, depending on the isHtml and indent flags.
ns_hints, if given, is a dictionary of namespace mappings that
help determine if namespace declarations need to be emitted when
visiting the first Element node.
"""
if indent and is_html:
printer = _htmlprinters.htmlprettyprinter(stream, encoding)
elif indent:
printer = _xmlprinters.xmlprettyprinter(stream, encoding)
elif is_html:
printer = _htmlprinters.htmlprinter(stream, encoding)
elif canonical:
printer = _xmlprinters.CanonicalXmlPrinter(stream, encoding)
else:
printer = _xmlprinters.xmlprinter(stream, encoding)
return printer
class structwriter(object):
"""
XML/HTML writer (serializer) that works on Python objects that proxy
XML structure (similar to, but simpler and more and directly geared to
serialization than amara.tree and bindery nodes). Writes the serialized
result to an output stream (sys.stdout by default).
Usage examples:
from amara.writers.struct import structwriter, E, NS, ROOT, RAW
output = structwriter(indent=u"yes")
output.feed(
ROOT(
E(u'a', (E(u'b', unicode(i)) for i in range(10)))
))
Using namespaces:
from amara.writers.struct import structwriter, E, NS, ROOT, RAW
from amara.namespaces import ATOM_NAMESPACE
output = structwriter(stream=open("spam.xml"), indent=u"yes")
output.feed(
ROOT(
E((ATOM_NAMESPACE, u'entry'),
E((ATOM_NAMESPACE, u'id'), u'urn:bogus:x'),
E((ATOM_NAMESPACE, u'title'), u'Hello world'),
E((ATOM_NAMESPACE, u'link'), {u'href': u'http://example.org'}),
)
))
Using coroutine mode, and a cursor:
from amara.writers.struct import structwriter, E, NS, ROOT, RAW, E_CURSOR
output = structwriter()
f = output.cofeed(ROOT(E(u'a', E_CURSOR(u'b', {u'attr1': u'val1'}))))
f.send(E(u'c', u'1'))
f.send(E(u'c', u'2'))
f.send(E(u'c', u'3'))
f.close()
Output:
<?xml version="1.0" encoding="utf-8"?>
<a><b attr1="val1"><c>1</c><c>2</c><c>3</c></b></a>
See documentation for more extensive examples.
"""
def __init__(self, stream=sys.stdout, **kwargs):
#self.printer = writer or xmlwriter()
self.printer = get_printer(stream, **kwargs)
def feed(self, obj, prefixes=None):
"""
Feed a structure to the writer. The structure is interpreted as XML and
serialized.
obj - XML node proxy structure (or iterator thereof), such as
amara.writers.struct.ROOT (proxy for a root (entity) node) or
amara.writers.struct.E (proxy for an element).
See documentation for other proxy node classes
"""
prefixes = prefixes or {}
if isinstance(obj, ROOT):
self.printer.start_document()
for subobj in obj.content:
self.feed(subobj)
self.printer.end_document()
return
if isinstance(obj, NS):
return
if isinstance(obj, RAW):
doc = amara.parse(obj.content)
from amara.writers._treevisitor import visitor
v = visitor(printer=self.printer)
for child in doc.xml_children:
v.visit(child)
return
if isinstance(obj, E):
#First attempt used tee. Seems we ran into the warning at
#http://www.python.org/doc/2.4.3/whatsnew/node13.html
#"Note that tee() has to keep copies of the values returned by the iterator;
#in the worst case, it may need to keep all of them.
#This should therefore be used carefully if the leading iterator can run
#far ahead of the trailing iterator in a long stream of inputs.
#If the separation is large, then you might as well use list() instead.
#When the iterators track closely with one another, tee()" is ideal. Possible
#applications include bookmarking, windowing, or lookahead iterators.
#(Contributed by Raymond Hettinger.)"
#obj.namespaces = {}
new_prefixes = []
lead = None
content = iter(obj.content)
for subobj in content:
if isinstance(subobj, NS):
new_prefixes.append((subobj.prefix, subobj.namespace))
else:
lead = subobj
break
prefix, local = splitqname(obj.qname)
prefix = prefix or u''
if obj.ns == UNSPECIFIED_NAMESPACE:
obj.ns = prefixes.get(prefix, u'')
elif prefix not in prefixes or prefixes[prefix] != obj.ns:
new_prefixes.append((prefix, obj.ns or u''))
attrs = [ a for a in obj.attributes.itervalues() ] if obj.attributes else ()
if new_prefixes:
prefixes = prefixes.copy()
prefixes.update(dict(new_prefixes))
self.printer.start_element(obj.ns, obj.qname, new_prefixes, attrs)
if lead:
self.feed(lead, prefixes)
for subobj in content:
self.feed(subobj, prefixes)
self.printer.end_element(obj.ns, obj.qname)
return
if isinstance(obj, basestring):
self.printer.text(U(obj))
return
if isinstance(obj, tree.element):
#Be smart about bindery nodes
self.printer.text(unicode(obj))
return
try:
obj = iter(obj)
except TypeError, e:
if callable(obj):
self.feed(obj(), prefixes)
else:
#Just try to make it text, i.e. punt
self.feed(unicode(obj), prefixes)
else:
for subobj in obj:
self.feed(subobj, prefixes)
return
@coroutine
def cofeed(self, obj, prefixes=None):
"""
Feed a structure to the writer, including a cursor. The structure is
interpreted as XML and serialized. The initially fed structure becomes
the outer envelope of the serialized XML, and then the operation is
suspended (this method engenders a coroutine). The user can then send additional
substructures to the coroutine, which get serialized at the point of the cursor,
until the user closes the coroutine, at which point the serialization is
completed.
obj - XML node proxy structure (or iterator thereof), such as
amara.writers.struct.ROOT (proxy for a root (entity) node),
amara.writers.struct.E (proxy for an element), or
amara.writers.struct.E_CURSOR (proxy for a cursor element, whose children
are then later provided by sending proxy nodes to the coroutine).
See documentation for other proxy node classes
"""
#This method is largely a dupe of feed, but rather than calling self.feed to
#recursively deal with compound structures, it sets up a child coroutine
#and forwards values sent by the parent. There is a lot of inelegant
#duplication because we often can't tidy things up with functions without
#Breaking the character of cofeed as a coroutine
#this is one are where Python could very much use cpp-style macros
#FIXME. There is some inelegant duplication that might well be refatcored
#away, even without the benefit of cpp-style macros
prefixes = prefixes or {}
if isinstance(obj, ROOT):
self.printer.start_document()
for subobj in obj.content:
try:
buf = self.cofeed(subobj, prefixes=None)
try:
while True:
val = (yield)
buf.send(val)
except GeneratorExit:
buf.close()
except StopIteration:
pass
#self.feed(subobj)
self.printer.end_document()
return
if isinstance(obj, NS):
return
if isinstance(obj, RAW):
doc = amara.parse(obj.content)
from amara.writers._treevisitor import visitor
v = visitor(printer=self.printer)
for child in doc.xml_children:
v.visit(child)
return
if isinstance(obj, E_CURSOR):
new_prefixes = []
prefix, local = splitqname(obj.qname)
prefix = prefix or u''
if obj.ns == UNSPECIFIED_NAMESPACE:
obj.ns = prefixes.get(prefix, u'')
elif prefix not in prefixes or prefixes[prefix] != obj.ns:
new_prefixes.append((prefix, obj.ns or u''))
attrs = [ a for a in obj.attributes.itervalues() ] if obj.attributes else ()
if new_prefixes:
prefixes = prefixes.copy()
prefixes.update(dict(new_prefixes))
self.printer.start_element(obj.ns, obj.qname, new_prefixes, attrs)
try:
buf = obj.do(self)
while True:
val = (yield)
buf.send(val)
except GeneratorExit:
buf.close()
self.printer.end_element(obj.ns, obj.qname)
return
if isinstance(obj, E):
#First attempt used tee. Seems we ran into the warning at
#http://www.python.org/doc/2.4.3/whatsnew/node13.html
#"Note that tee() has to keep copies of the values returned by the iterator;
#in the worst case, it may need to keep all of them.
#This should therefore be used carefully if the leading iterator can run
#far ahead of the trailing iterator in a long stream of inputs.
#If the separation is large, then you might as well use list() instead.
#When the iterators track closely with one another, tee()" is ideal. Possible
#applications include bookmarking, windowing, or lookahead iterators.
#(Contributed by Raymond Hettinger.)"
#obj.namespaces = {}
new_prefixes = []
lead = None
content = iter(obj.content)
for subobj in content:
if isinstance(subobj, NS):
new_prefixes.append((subobj.prefix, subobj.namespace))
else:
lead = subobj
break
prefix, local = splitqname(obj.qname)
prefix = prefix or u''
if obj.ns == UNSPECIFIED_NAMESPACE:
obj.ns = prefixes.get(prefix, u'')
elif prefix not in prefixes or prefixes[prefix] != obj.ns:
new_prefixes.append((prefix, obj.ns or u''))
attrs = [ a for a in obj.attributes.itervalues() ] if obj.attributes else ()
if new_prefixes:
prefixes = prefixes.copy()
prefixes.update(dict(new_prefixes))
self.printer.start_element(obj.ns, obj.qname, new_prefixes, attrs)
if lead:
if isinstance(lead, E_CURSOR) or isinstance(lead, E):
try:
buf = self.cofeed(lead, prefixes=None)
try:
while True:
val = (yield)
buf.send(val)
except GeneratorExit:
buf.close()
except StopIteration:
pass
else:
self.feed(lead, prefixes)
for subobj in content:
if isinstance(subobj, E_CURSOR) or isinstance(subobj, E):
try:
buf = self.cofeed(subobj, prefixes=None)
try:
while True:
val = (yield)
buf.send(val)
except GeneratorExit:
buf.close()
except StopIteration:
pass
else:
self.feed(subobj, prefixes)
self.printer.end_element(obj.ns, obj.qname)
return
if isinstance(obj, basestring):
self.printer.text(U(obj))
return
if isinstance(obj, tree.element):
#Be smart about bindery nodes
self.printer.text(unicode(obj))
return
try:
obj = iter(obj)
except TypeError, e:
if callable(obj):
self.feed(obj(), prefixes)
else:
#Just try to make it text, i.e. punt
self.feed(unicode(obj), prefixes)
else:
for subobj in obj:
self.feed(subobj, prefixes)
return
class structencoder(structwriter):
"""
XML/HTML writer (serializer) that works on Python objects that proxy
XML structure (similar to, but simpler and more and directly geared to
serialization than amara.tree and bindery nodes). Buffers the serialized
result, for retrieval as a string using the read() method.
Usage examples:
from amara.writers.struct import structencoder, E, NS, ROOT, RAW
output = structencoder(indent=u"yes")
output.feed(
ROOT(
E(u'a', (E(u'b', unicode(i)) for i in range(10)))
))
print output.read()
Using namespaces:
from amara.writers.struct import structencoder, E, NS, ROOT, RAW
from amara.namespaces import ATOM_NAMESPACE
output = structencoder(indent=u"yes")
output.feed(
ROOT(
E((ATOM_NAMESPACE, u'entry'),
E((ATOM_NAMESPACE, u'id'), u'urn:bogus:x'),
E((ATOM_NAMESPACE, u'title'), u'Hello world'),
E((ATOM_NAMESPACE, u'link'), {u'href': u'http://example.org'}),
)
))
print output.read()
Using coroutine mode, and a cursor:
from amara.writers.struct import structencoder, E, NS, ROOT, RAW, E_CURSOR
output = structencoder()
f = output.cofeed(ROOT(E(u'a', E_CURSOR(u'b', {u'attr1': u'val1'}))))
f.send(E(u'c', u'1'))
f.send(E(u'c', u'2'))
f.send(E(u'c', u'3'))
f.close()
print output.read()
Outputs:
<?xml version="1.0" encoding="utf-8"?>
<a><b attr1="val1"><c>1</c><c>2</c><c>3</c></b></a>
See documentation for more extensive examples.
"""
def __init__(self, **kwargs):
self.chunks = []
structwriter.__init__(self, stream=self, **kwargs)
def write(self, chunk):
self.chunks.append(chunk)
def read(self):
chunks = self.chunks
self.chunks = []
return ''.join(chunks)
class E(object):
def __init__(self, name, *items):
if isinstance(name, tuple):
self.ns, self.qname = imap(U, name)
else:
self.ns, self.qname = UNSPECIFIED_NAMESPACE, U(name)
if items and isinstance(items[0], dict):
attributes = items[0]
self.content = items[1:]
else:
self.content = items
attributes = None
#XXX: Move to dictionary set expr in 2.6 or 3.0
self.attributes = None
if attributes:
self.attributes = {}
for name, value in attributes.iteritems():
if isinstance(name, tuple):
ns, qname = imap(U, name)
else:
ns, qname = None, U(name)
#Unicode value coercion to help make it a bit smarter
self.attributes[ns, qname] = qname, U(value)
class E_CURSOR(E):
def __init__(self, name, *items):
if len(items) > 1:
#FIXME: L10N
raise ValueError('E_COROUTINE objects can only be initialized with a name and optional attribute mapping')
E.__init__(self, name, *items)
@coroutine
def do(self, sink):
while True:
try:
obj = (yield)
sink.feed(obj)
except GeneratorExit:
break
class NS(object):
def __init__(self, prefix, namespace):
self.prefix = prefix
self.namespace = namespace
class RAW(object):
'''
>>> from amara.writers.struct import *
>>> w = structwriter(indent=u"yes").feed(ROOT(
E((u'urn:x-bogus1', u'n:a'), {(u'urn:x-bogus1', u'n:x'): u'1'},
E((u'urn:x-bogus2', u'b'), u'c'), RAW('<test />')
)))
'''
def __init__(self, *content):
#Eventually use incremental parse and feed()
self.content = ''.join(content)
class ROOT(object):
def __init__(self, *content):
self.content = content
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/struct.py
|
struct.py
|
import itertools
from amara import tree
from amara.writers import _xmlstream
from amara.namespaces import NULL_NAMESPACE, XML_NAMESPACE, XMLNS_NAMESPACE
__all__ = ('xmlprinter', 'xmlprettyprinter', "BASIC", "EXCLUSIVE")
BASIC = "BASIC"
EXCLUSIVE = "EXCLUSIVE"
class xmlprinter(object):
"""
An `xmlprinter` instance provides functions for serializing an XML or
XML-like document to a stream, based on SAX-like event calls
initiated by a _Visitor instance.
The methods in this base class attempt to emit a well-formed parsed
general entity conformant to XML 1.0 syntax, with no extra
whitespace added for visual formatting. Subclasses may emit
documents conformant to other syntax specifications or with
additional whitespace for indenting.
The degree of well-formedness of the output depends on the data
supplied in the event calls; no checks are done for conditions that
would result in syntax errors, such as two attributes with the same
name, "--" in a comment, etc. However, attribute() will do nothing
if the previous event was not startElement(), thus preventing
spurious attribute serializations.
`canonical_form` must be None, BASIC or EXCLUSIVE
It controls c14n of the serialization, according to
http://www.w3.org/TR/xml-c14n ("basic") or
http://www.w3.org/TR/xml-exc-c14n/ ("exclusive")
"""
_canonical_form = None
def __init__(self, stream, encoding):
"""
`stream` must be a file-like object open for writing binary
data. `encoding` specifies the encoding which is to be used for
writing to the stream.
"""
self.stream = xs = _xmlstream.xmlstream(stream, encoding)
self.encoding = encoding
self.write_ascii = xs.write_ascii
self.write_encode = xs.write_encode
self.write_escape = xs.write_escape
self._element_name = None
self.omit_declaration = False
def start_document(self, version='1.0', standalone=None):
"""
Handles a startDocument event.
Writes XML declaration or text declaration to the stream.
"""
if not self.omit_declaration:
self.write_ascii('<?xml version="%s" encoding="%s"' % (version,
self.encoding))
if standalone is not None:
self.write_ascii(' standalone="%s"' % standalone)
self.write_ascii('?>\n')
return
def end_document(self):
"""
Handles an endDocument event.
Writes any necessary final output to the stream.
"""
if self._element_name:
if self._canonical_form:
self.write_ascii('</')
self.write_encode(self._element_name, 'end-tag name')
self.write_ascii('>')
else:
# No element content, use minimized form
self.write_ascii('/>')
self._element_name = None
return
def doctype(self, name, publicid, systemid):
"""
Handles a doctype event.
Writes a document type declaration to the stream.
"""
if self._canonical_form:
return
if self._element_name:
self.write_ascii('>')
self._element_name = None
if publicid and systemid:
self.write_ascii('<!DOCTYPE ')
self.write_encode(name, 'document type name')
self.write_ascii(' PUBLIC "')
self.write_encode(publicid, 'document type public-id')
self.write_ascii('" "')
self.write_encode(systemid, 'document type system-id')
self.write_ascii('">\n')
elif systemid:
self.write_ascii('<!DOCTYPE ')
self.write_encode(name, 'document type name')
self.write_ascii(' SYSTEM "')
self.write_encode(systemid, 'document type system-id')
self.write_ascii('">\n')
return
def start_element(self, namespace, name, nsdecls, attributes):
"""
Handles a start-tag event.
Writes part of an element's start-tag or empty-element tag to
the stream, and closes the start tag of the previous element,
if one remained open. Writes the xmlns attributes for the given
sequence of prefix/namespace-uri pairs, and invokes attribute() as
neeeded to write the given sequence of attribute qname/value pairs.
Note, the `namespace` argument is ignored in this class.
"""
write_ascii, write_escape, write_encode = (
self.write_ascii, self.write_escape, self.write_encode)
if self._element_name:
# Close current start tag
write_ascii('>')
self._element_name = name
write_ascii('<')
write_encode(name, 'start-tag name')
if self._canonical_form:
# Create the namespace "attributes"
namespace_attrs = [ (prefix and u'xmlns:' + prefix or u'xmlns', uri)
for prefix, uri in nsdecls
]
namespace_attrs.sort()
# Write the namespaces decls, in alphabetical order of prefixes, with
# the default coming first (easy since None comes before any actual
# Unicode value)
#sorted_attributes = [ name, value for (name, value) in sorted(attributes) ]
sorted_attributes = sorted(attributes)
#FIXME: attributes must be sorted using nsuri / local combo (where nsuri is "" for null namespace)
for name, value in namespace_attrs:
#FIXME: check there are no redundant NSDecls
write_ascii(' ')
write_encode(name, 'attribute name')
# Replace characters illegal in attribute values and wrap
# the value with quotes (") in accordance with Canonical XML.
write_ascii('="')
write_escape(value, self._attr_entities_quot)
write_ascii('"')
for name, value in sorted_attributes:
write_ascii(' ')
write_encode(name, 'attribute name')
# Replace characters illegal in attribute values and wrap
# the value with quotes (") in accordance with Canonical XML.
write_ascii('="')
write_escape(value, self._attr_entities_quot)
write_ascii('"')
else:
# Create the namespace "attributes"
nsdecls = [ (prefix and u'xmlns:' + prefix or u'xmlns', uri)
for prefix, uri in nsdecls
]
# Merge the namespace and attribute sequences for output
attributes = itertools.chain(nsdecls, attributes)
for name, value in attributes:
# Writes an attribute to the stream as a space followed by
# the name, '=', and quote-delimited value. It is the caller's
# responsibility to ensure that this is called in the correct
# context, if well-formed output is desired.
# Preference is given to quotes (") around attribute values,
# in accordance with the DomWriter interface in DOM Level 3
# Load and Save (25 July 2002 WD), although a value that
# contains quotes but no apostrophes will be delimited by
# apostrophes (') instead.
write_ascii(" ")
write_encode(name, 'attribute name')
# Special case for HTML boolean attributes (just a name)
if value is not None:
# Replace characters illegal in attribute values and wrap
# the value with appropriate quoting in accordance with
# DOM Level 3 Load and Save:
# 1. Attributes not containing quotes are serialized in
# quotes.
# 2. Attributes containing quotes but no apostrophes are
# serialized in apostrophes.
# 3. Attributes containing both forms of quotes are
# serialized in quotes, with quotes within the value
# represented by the predefined entity `"`.
if '"' in value and "'" not in value:
# Use apostrophes (#2)
entitymap = self._attr_entities_apos
quote = "'"
else:
# Use quotes (#1 and #3)
entitymap = self._attr_entities_quot
quote = '"'
write_ascii("=")
write_ascii(quote)
write_escape(value, entitymap)
write_ascii(quote)
return
def end_element(self, namespace, name):
"""
Handles an end-tag event.
Writes the closing tag for an element to the stream, or, if the
element had no content, finishes writing the empty element tag.
Note, the `namespace` argument is ignored in this class.
"""
if self._element_name:
self._element_name = None
if self._canonical_form:
self.write_ascii('>')
else:
# No element content, use minimized form
self.write_ascii('/>')
return
self.write_ascii('</')
self.write_encode(name, 'end-tag name')
self.write_ascii('>')
return
def text(self, text, disable_escaping=False):
"""
Handles a text event.
Writes character data to the stream. If the `disable_escaping` flag
is not set, then unencodable characters are replaced with
numeric character references; "&" and "<" are escaped as "&"
and "<"; and ">" is escaped as ">" if it is preceded by
"]]". If the `disable_escaping` flag is set, then the characters
are written to the stream with no escaping of any kind, which
will result in an exception if there are unencodable characters.
"""
if self._canonical_form:
text.replace(u'\r\n', u'\r')
if self._element_name:
self.write_ascii('>')
self._element_name = None
if disable_escaping:
# Try to write the raw encoded string (may throw exception)
self.write_encode(text, 'text')
else:
# FIXME: only escape ">" if after "]]"
# (may not be worth the trouble)
self.write_escape(text, self._text_entities)
return
def cdata_section(self, data):
"""
Handles a cdataSection event.
Writes character data to the stream as a CDATA section.
"""
if self._element_name:
self.write_ascii('>')
self._element_name = None
if self._canonical_form:
#No CDATA sections in c14n
text.replace(u'\r\n', u'\r')
self.write_escape(data, self._text_entities)
else:
sections = data.split(u']]>')
self.write_ascii('<![CDATA[')
self.write_encode(sections[0], 'CDATA section')
for section in sections[1:]:
self.write_ascii(']]]]><![CDATA[>')
self.write_encode(section, 'CDATA section')
self.write_ascii(']]>')
return
def processing_instruction(self, target, data):
"""
Handles a processingInstruction event.
Writes a processing instruction to the stream.
"""
if self._element_name:
self.write_ascii('>')
self._element_name = None
self.write_ascii('<?')
self.write_encode(target, 'processing instruction target')
if data:
self.write_ascii(' ')
self.write_encode(data, 'processing instruction data')
self.write_ascii('?>')
return
def comment(self, data):
"""
Handles a comment event.
Writes a comment to the stream.
"""
if self._element_name:
self.write_ascii('>')
self._element_name = None
self.write_ascii("<!--")
self.write_encode(data, 'comment')
self.write_ascii("-->")
return
# Entities as defined by Canonical XML 1.0 (http://www.w3.org/TR/xml-c14n)
# For XML 1.1, add u'\u0085': '…' and u'\u2028': '
' to all
_text_entities = _xmlstream.entitymap({'<' : '<',
'>' : '>',
'&' : '&',
'\r' : ' ',
})
_attr_entities_quot = _xmlstream.entitymap({'<' : '<',
'&' : '&',
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
'"' : '"',
})
_attr_entities_apos = _xmlstream.entitymap({'<' : '<',
'&' : '&',
'\t' : '	',
'\n' : ' ',
'\r' : ' ',
"'" : ''',
})
class canonicalxmlprinter(xmlprinter):
"""
`canonicalxmlprinter` emits only c14n XML:
http://www.ibm.com/developerworks/xml/library/x-c14n/
http://www.w3.org/TR/xml-c14n
Does not yet:
* Normalize all attribute values
* Specify all default attributes
Note: this class is fully compatible with exclusive c14n:
http://www.w3.org/TR/xml-exc-c14n/
whether or not the operation is exclusive depends on preprocessing
operations appplied by the caller.
"""
# Enable canonical-form output.
_canonical_form = True
#FIXME: A bit inelegant to require the encoding, then throw it away. Perhaps
#we should at least issue a warning if they send a non-UTF8 encoding
def __init__(self, stream, encoding):
"""
`stream` must be a file-like object open for writing binary
data.
"""
xmlprinter.__init__(self, stream, 'utf-8')
self.omit_declaration = True
def prepare(self, node, kwargs):
"""
`inclusive_prefixes` is a list (or None) of namespaces representing the
"InclusiveNamespacesPrefixList" list in exclusive c14n.
It may only be a non-empty list if `canonical_form` == EXCLUSIVE
"""
exclusive = kwargs.get("exclusive", False)
nshints = kwargs.get("nshints", {})
inclusive_prefixes = kwargs.get("inclusive_prefixes", [])
added_attributes = {} #All the contents should be XML NS attrs
if not exclusive:
#Roll in ancestral xml:* attributes
parent_xml_attrs = node.xml_select(u'ancestor::*/@xml:*')
for attr in parent_xml_attrs:
aname = (attr.xml_namespace, attr.xml_qname)
if (aname not in added_attributes
and aname not in node.xml_attributes):
added_attributes[attr.xml_qname] = attr.xml_value
nsnodes = node.xml_select('namespace::*')
inclusive_prefixes = inclusive_prefixes or []
if u'#default' in inclusive_prefixes:
inclusive_prefixes.remove(u'#default')
inclusive_prefixes.append(u'')
decls_to_remove = []
if exclusive:
used_prefixes = [ n.xml_prefix for n in node.xml_select('self::*|@*') ]
declared_prefixes = []
for ans, anodename in node.xml_attributes:
if ans == XMLNS_NAMESPACE:
attr = node.xml_attributes[ans, anodename]
prefix = attr.xml_local
declared_prefixes.append(prefix)
#print attr.prefix, attr.localName, attr.nodeName
if attr.xml_local not in used_prefixes:
decls_to_remove.append(prefix)
#for prefix in used_prefixes:
# if prefix not in declared_prefixes:
# nshints[ns.nodeName] = ns.value
#Roll in ancestral NS nodes
for ns in nsnodes:
#prefix = ns.xml_qname if isinstance(ns, tree.namespace) else ns.xml_qname
#print (ns.xml_name, ns.xml_value)
prefix = ns.xml_name
if (ns.xml_value != XML_NAMESPACE
and ns.xml_name not in node.xml_namespaces
and (not exclusive or ns.xml_name in inclusivePrefixes)):
#added_attributes[(XMLNS_NAMESPACE, ns.nodeName)] = ns.value
nshints[prefix] = ns.xml_value
elif (exclusive
and prefix in used_prefixes
and prefix not in declared_prefixes):
nshints[prefix] = ns.xml_value
kwargs["nshints"] = nshints
if "inclusive_prefixes" in kwargs: del kwargs["inclusive_prefixes"]
if "exclusive" in kwargs: del kwargs["exclusive"]
if "nshints" in kwargs: del kwargs["nshints"]
#FIXME: nshints not yet actually used. Required for c14n of nodes below the top-level
self._nshints = nshints or {}
return kwargs
class xmlprettyprinter(xmlprinter):
"""
An xmlprettyprinter instance provides functions for serializing an
XML or XML-like document to a stream, based on SAX-like event calls.
The methods in this subclass of xmlprinter produce the same output
as the base class, but with extra whitespace added for visual
formatting. The indent attribute is the string used for each level
of indenting. It defaults to 2 spaces.
"""
# The amount of indent for each level of nesting
indent = ' '
def __init__(self, stream, encoding):
xmlprinter.__init__(self, stream, encoding)
self._level = 0
self._can_indent = False # don't indent first element
def start_element(self, namespace, name, namespaces, attributes):
if self._element_name:
self.write_ascii('>')
self._element_name = None
if self._can_indent:
self.write_ascii('\n' + (self.indent * self._level))
xmlprinter.start_element(self, namespace, name, namespaces,
attributes)
self._level += 1
self._can_indent = True
return
def end_element(self, namespace, name):
self._level -= 1
# Do not break short tag form (<tag/>)
if self._can_indent and not self._element_name:
self.write_ascii('\n' + (self.indent * self._level))
xmlprinter.end_element(self, namespace, name)
# Allow indenting after endtags
self._can_indent = True
return
def text(self, data, disable_escaping=False):
xmlprinter.text(self, data, disable_escaping)
# Do not allow indenting for elements with mixed content
self._can_indent = False
return
def cdata_section(self, data):
xmlprinter.cdata_section(self, data)
# Do not allow indenting for elements with mixed content
self._can_indent = False
return
def processing_instruction(self, target, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
if self._can_indent:
self.write_ascii('\n' + (self.indent * self._level))
xmlprinter.processing_instruction(self, target, data)
# Allow indenting after processing instructions
self._can_indent = True
return
def comment(self, data):
if self._element_name:
self.write_ascii('>')
self._element_name = None
if self._can_indent:
self.write_ascii('\n' + (self.indent * self._level))
xmlprinter.comment(self, data)
# Allow indenting after comments
self._can_indent = True
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/_xmlprinters.py
|
_xmlprinters.py
|
__all__ = ['ENTITIES_HTML_32', 'ENTITIES_HTML_40', 'ENTITIES_XHTML_10']
# HTML 3.2 defined character entities
ENTITIES_HTML_32 = {
# Sect 24.2 -- ISO 8859-1
u'\u00A0' : ' ',
u'\u00A1' : '¡',
u'\u00A2' : '¢',
u'\u00A3' : '£',
u'\u00A4' : '¤',
u'\u00A5' : '¥',
u'\u00A6' : '¦',
u'\u00A7' : '§',
u'\u00A8' : '¨',
u'\u00A9' : '©',
u'\u00AA' : 'ª',
u'\u00AB' : '«',
u'\u00AC' : '¬',
u'\u00AD' : '­',
u'\u00AE' : '®',
u'\u00AF' : '¯',
u'\u00B0' : '°',
u'\u00B1' : '±',
u'\u00B2' : '²',
u'\u00B3' : '³',
u'\u00B4' : '´',
u'\u00B5' : 'µ',
u'\u00B6' : '¶',
u'\u00B7' : '·',
u'\u00B8' : '¸',
u'\u00B9' : '¹',
u'\u00BA' : 'º',
u'\u00BB' : '»',
u'\u00BC' : '¼',
u'\u00BD' : '½',
u'\u00BE' : '¾',
u'\u00BF' : '¿',
u'\u00C0' : 'À',
u'\u00C1' : 'Á',
u'\u00C2' : 'Â',
u'\u00C3' : 'Ã',
u'\u00C4' : 'Ä',
u'\u00C5' : 'Å',
u'\u00C6' : 'Æ',
u'\u00C7' : 'Ç',
u'\u00C8' : 'È',
u'\u00C9' : 'É',
u'\u00CA' : 'Ê',
u'\u00CB' : 'Ë',
u'\u00CC' : 'Ì',
u'\u00CD' : 'Í',
u'\u00CE' : 'Î',
u'\u00CF' : 'Ï',
u'\u00D0' : 'Ð',
u'\u00D1' : 'Ñ',
u'\u00D2' : 'Ò',
u'\u00D3' : 'Ó',
u'\u00D4' : 'Ô',
u'\u00D5' : 'Õ',
u'\u00D6' : 'Ö',
u'\u00D7' : '×',
u'\u00D8' : 'Ø',
u'\u00D9' : 'Ù',
u'\u00DA' : 'Ú',
u'\u00DB' : 'Û',
u'\u00DC' : 'Ü',
u'\u00DD' : 'Ý',
u'\u00DE' : 'Þ',
u'\u00DF' : 'ß',
u'\u00E0' : 'à',
u'\u00E1' : 'á',
u'\u00E2' : 'â',
u'\u00E3' : 'ã',
u'\u00E4' : 'ä',
u'\u00E5' : 'å',
u'\u00E6' : 'æ',
u'\u00E7' : 'ç',
u'\u00E8' : 'è',
u'\u00E9' : 'é',
u'\u00EA' : 'ê',
u'\u00EB' : 'ë',
u'\u00EC' : 'ì',
u'\u00ED' : 'í',
u'\u00EE' : 'î',
u'\u00EF' : 'ï',
u'\u00F0' : 'ð',
u'\u00F1' : 'ñ',
u'\u00F2' : 'ò',
u'\u00F3' : 'ó',
u'\u00F4' : 'ô',
u'\u00F5' : 'õ',
u'\u00F6' : 'ö',
u'\u00F7' : '÷',
u'\u00F8' : 'ø',
u'\u00F9' : 'ù',
u'\u00FA' : 'ú',
u'\u00FB' : 'û',
u'\u00FC' : 'ü',
u'\u00FD' : 'ý',
u'\u00FE' : 'þ',
u'\u00FF' : 'ÿ',
}
# HTML 4.01 defined character entities
ENTITIES_HTML_40 = {
# Sect 24.3 -- Symbols, Mathematical Symbols, and Greek Letters
# Latin Extended-B
u'\u0192' : 'ƒ',
# Greek
u'\u0391' : 'Α',
u'\u0392' : 'Β',
u'\u0393' : 'Γ',
u'\u0394' : 'Δ',
u'\u0395' : 'Ε',
u'\u0396' : 'Ζ',
u'\u0397' : 'Η',
u'\u0398' : 'Θ',
u'\u0399' : 'Ι',
u'\u039A' : 'Κ',
u'\u039B' : 'Λ',
u'\u039C' : 'Μ',
u'\u039D' : 'Ν',
u'\u039E' : 'Ξ',
u'\u039F' : 'Ο',
u'\u03A0' : 'Π',
u'\u03A1' : 'Ρ',
u'\u03A3' : 'Σ',
u'\u03A4' : 'Τ',
u'\u03A5' : 'Υ',
u'\u03A6' : 'Φ',
u'\u03A7' : 'Χ',
u'\u03A8' : 'Ψ',
u'\u03A9' : 'Ω',
u'\u03B1' : 'α',
u'\u03B2' : 'β',
u'\u03B3' : 'γ',
u'\u03B4' : 'δ',
u'\u03B5' : 'ε',
u'\u03B6' : 'ζ',
u'\u03B7' : 'η',
u'\u03B8' : 'θ',
u'\u03B9' : 'ι',
u'\u03BA' : 'κ',
u'\u03BB' : 'λ',
u'\u03BC' : 'μ',
u'\u03BD' : 'ν',
u'\u03BE' : 'ξ',
u'\u03BF' : 'ο',
u'\u03C0' : 'π',
u'\u03C1' : 'ρ',
u'\u03C2' : 'ς',
u'\u03C3' : 'σ',
u'\u03C4' : 'τ',
u'\u03C5' : 'υ',
u'\u03C6' : 'φ',
u'\u03C7' : 'χ',
u'\u03C8' : 'ψ',
u'\u03C9' : 'ω',
u'\u03D1' : 'ϑ',
u'\u03D2' : 'ϒ',
u'\u03D6' : 'ϖ',
# General Punctuation
u'\u2022' : '•', # bullet
u'\u2026' : '…', # horizontal ellipsis
u'\u2032' : '′', # prime (minutes/feet)
u'\u2033' : '″', # double prime (seconds/inches)
u'\u203E' : '‾', # overline (spacing overscore)
u'\u203A' : '⁄', # fractional slash
# Letterlike Symbols
u'\u2118' : '℘', # script capital P (power set/Weierstrass p)
u'\u2111' : 'ℑ', # blackletter capital I (imaginary part)
u'\u211C' : 'ℜ', # blackletter capital R (real part)
u'\u2122' : '™', # trademark
u'\u2135' : 'ℵ', # alef symbol (first transfinite cardinal)
# Arrows
u'\u2190' : '←', # leftwards arrow
u'\u2191' : '↑', # upwards arrow
u'\u2192' : '→', # rightwards arrow
u'\u2193' : '↓', # downwards arrow
u'\u2194' : '↔', # left right arrow
u'\u21B5' : '↵', # downwards arrow with corner leftwards
u'\u21D0' : '⇐', # leftwards double arrow
u'\u21D1' : '⇑', # upwards double arrow
u'\u21D2' : '⇒', # rightwards double arrow
u'\u21D3' : '⇓', # downwards double arrow
u'\u21D4' : '⇔', # left right double arrow
# Mathematical Operators
u'\u2200' : '∀', # for all
u'\u2202' : '∂', # partial differential
u'\u2203' : '∃', # there exists
u'\u2205' : '∅', # empty set, null set, diameter
u'\u2207' : '∇', # nabla, backward difference
u'\u2208' : '∈', # element of
u'\u2209' : '∉', # not an element of
u'\u220B' : '∋', # contains as member
u'\u220F' : '∏', # n-ary product, product sign
u'\u2211' : '∑', # n-ary sumation
u'\u2212' : '−', # minus sign
u'\u2217' : '∗', # asterisk operator
u'\u221A' : '√', # square root, radical sign
u'\u221D' : '∝', # proportional to
u'\u221E' : '∞', # infinity
u'\u2220' : '∠', # angle
u'\u2227' : '∧', # logical and, wedge
u'\u2228' : '∨', # logical or, vee
u'\u2229' : '∩', # intersection, cap
u'\u222A' : '∪', # union, cup
u'\u222B' : '∫', # integral
u'\u2234' : '∴', # therefore
u'\u223C' : '∼', # tilde operator, varies with, similar to
u'\u2245' : '≅', # approximately equal to
u'\u2248' : '≈', # almost equal to, asymptotic to
u'\u2260' : '≠', # not equal to
u'\u2261' : '≡', # identical to
u'\u2264' : '≤', # less-than or equal to
u'\u2265' : '≥', # greater-than or equal to
u'\u2282' : '⊂', # subset of
u'\u2283' : '⊃', # superset of
u'\u2284' : '⊄', # not subset of
u'\u2286' : '⊆', # subset of or equal to
u'\u2287' : '⊇', # superset of or equal to
u'\u2295' : '⊕', # circled plus, direct sum
u'\u2297' : '⊗', # circled times, vector product
u'\u22A5' : '⊥', # up tack, orthogonal to, perpendicular
u'\u22C5' : '⋅', # dot operator
u'\u2308' : '⌈', # left ceiling, apl upstile
u'\u2309' : '⌉', # right ceiling
u'\u230A' : '⌊', # left floor, apl downstile
u'\u230B' : '⌋', # right floor
u'\u2329' : '⟨', # left-pointing angle bracket, bra
u'\u232A' : '⟩', # right-pointing angle bracket, ket
u'\u25CA' : '◊', # lozenge
# Miscellaneous Symbols
u'\u2660' : '♠',
u'\u2663' : '♣',
u'\u2665' : '♥',
u'\u2666' : '♦',
# Sect 24.4 -- Markup Significant and Internationalization
# Latin Extended-A
u'\u0152' : 'Œ', # capital ligature OE
u'\u0153' : 'œ', # small ligature oe
u'\u0160' : 'Š', # capital S with caron
u'\u0161' : 'š', # small s with caron
u'\u0178' : 'Ÿ', # capital Y with diaeresis
# Spacing Modifier Letters
u'\u02C6' : 'ˆ', # circumflexx accent
u'\u02DC' : '&tidle;', # small tilde
# General Punctuation
u'\u2002' : ' ', # en space
u'\u2003' : ' ', # em space
u'\u2009' : ' ', # thin space
u'\u200C' : '‌', # zero-width non-joiner
u'\u200D' : '‍', # zero-width joiner
u'\u200E' : '‎', # left-to-right mark
u'\u200F' : '‏', # right-to-left mark
u'\u2013' : '–', # en dash
u'\u2014' : '—', # em dash
u'\u2018' : '‘', # left single quotation mark
u'\u2019' : '’', # right single quotation mark
u'\u201A' : '‚', # single low-9 quotation mark
u'\u201C' : '“', # left double quotation mark
u'\u201D' : '”', # right double quotation mark
u'\u201E' : '„', # double low-9 quotation mark
u'\u2020' : '†', # dagger
u'\u2021' : '‡', # double dagger
u'\u2030' : '‰', # per mille sign
u'\u2039' : '‹', # single left-pointing angle quotation mark
u'\u203A' : '›', # single right-pointing angle quotation mark
u'\u20AC' : '€', # euro sign
}
ENTITIES_HTML_40.update(ENTITIES_HTML_32)
ENTITIES_XHTML_10 = ENTITIES_HTML_40.copy()
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/htmlentities.py
|
htmlentities.py
|
"""
The writers architecture needs a major overhaul. It's presently a confusing jumble of
layers with all sorts of inefficiency.
Principles of redesign:
* eliminate writer/printer distinction. Everything is a writer
* What are now printers become the main code base for the lowest level writers,
which are really no more than a set of ultra-efficient routines for e.g. character encoding
* Lowest-level writer (a "writerbase" class?) manages no state, and requires a smart caller
For example it would do nothing about namespace consistenct, and would rely on
intelligence by the caller
* Higher level writers do try to modularize as much as possible, but with less misuse of OO
More callbacks & decorators & such
"""
import sys
from amara import Error
from amara.lib.xmlstring import *
__all__ = ['WriterError', 'writer', 'streamwriter',
'HTML_W', 'XML_W', 'XHTML_W',
'lookup', 'register', 'xml_print'
]
# Constants for the most common writers
HTML_W = 'html'
XML_W = 'xml'
XHTML_W = 'xhtml'
_lookup_table = {}
class WriterError(Error):
ATTRIBUTE_ADDED_TOO_LATE = 1
ATTRIBUTE_ADDED_TO_NON_ELEMENT = 2
@classmethod
def _load_messages(cls):
from gettext import gettext as _
return {
WriterError.ATTRIBUTE_ADDED_TOO_LATE: _(
'Children were added to the element'),
WriterError.ATTRIBUTE_ADDED_TO_NON_ELEMENT: _(
'Attempted to add attribute to non-element'),
}
def _init_lookup_table():
from amara.writers import _xmlprinters, _htmlprinters, _xhtmlprinters
_lookup_table.update({XML_W: _xmlprinters.xmlprinter,
XML_W + '-indent': _xmlprinters.xmlprettyprinter,
XML_W + '-canonical': _xmlprinters.canonicalxmlprinter,
HTML_W: _htmlprinters.htmlprinter,
HTML_W + '-indent': _htmlprinters.htmlprettyprinter,
XHTML_W: _xhtmlprinters.xhtmlprinter,
XHTML_W + '-indent': _xhtmlprinters.xhtmlprettyprinter,
HTML_W + '-nsstrip': _htmlprinters.html_ns_stripper,
HTML_W + '-nsstrip-indent': _htmlprinters.html_ns_stripper,
})
def lookup(printer_name):
"""(str): Printer class
Return a printer object for writing DOM elements.
Currently the following values for 'printer_name' are supported:
xml html xhtml
"""
if not _lookup_table:
_init_lookup_table()
if printer_name in _lookup_table:
return _lookup_table[printer_name]
else:
raise ValueError("Unknown printer class %r" % printer_name)
def register(printer_name, printer_class):
"""(str, class)
Record a printer class so that future calls to lookup() will
return it.
"""
if not _lookup_table:
_init_lookup_table()
if printer_name in _lookup_table:
raise ValueError("Already a printer registered for name %r"
% printer_name)
_lookup_table[printer_name] = printer_class
def _xml_write(N, writer=XML_W, stream=None, encoding='UTF-8', **kwargs):
"""(node, file, str, class): None
INTERNAL function.
Serializes an XML tree, writing it to the specified 'stream' object.
"""
from amara.writers import node
if isinstance(writer, str):
writer_class = lookup(writer)
else:
# Assume provided writer is a writer class
writer_class = writer
if stream is None:
import sys
stream = sys.stdout
writer = writer_class(stream, encoding)
if hasattr(writer, "prepare"):
#Do any writer-specific massaging of the arguments,
#for example applying exclusive c14n rules
kwargs = writer.prepare(N, kwargs)
v = node._Visitor(writer)
v.visit(N)
def _xml_encode(N, writer=XML_W, encoding='UTF-8', **kwargs):
"""(node, Writer): None
"""
import cStringIO
stream = cStringIO.StringIO()
_xml_write(N, writer, stream, encoding, **kwargs)
return stream.getvalue()
# Backward-compatibility alias
#FIXME: Remove this function when amara goes beta
def xml_print(root, stream=None, encoding='UTF-8', **kwargs):
import warnings
warnings.warn("xml_print() function is deprecated; use xml_write() or xml_encode() method instead")
_xml_write(root, XML_W, stream, encoding)
class writer(object):
# Note, any changes to __slots__ require a change in treewriter.c as well
__slots__ = ('output_parameters',)
def __init__(self, output_parameters):
self.output_parameters = output_parameters
def get_result(self):
return None
def start_document(self):
"""
Called once at the beginning of output writing.
"""
return
def end_document(self):
"""
Called once at the end of output writing.
"""
return
def start_element(self, name, namespace=None, namespaces=None,
attributes=None):
"""
Called when an element node is generated in the result tree.
Subsequent method calls generate the element's attributes and content.
name - the local name.
namespace - the namespace URI.
namespaces - new namespace bindings (dictionary of prefixes to URIs)
established by this element.
attributes - mapping of qualified-name to attribute-value
"""
return
def end_element(self, name, namespace=None):
"""
Called at the end of element node generation.
name - the local name.
namespace - the namespace URI.
"""
return
def namespace(self, prefix, namespace):
"""
Called when a namespace node is explicitly generated in the result tree
(as by the xsl:namespace instruction).
prefix - the prefix.
namespace - the namespace URI.
"""
return
def attribute(self, name, value, namespace=None):
"""
Called when an attribute node is generated in the result tree.
name - the local name.
value - the attribute value.
namespace - the namespace URI.
"""
return
def text(self, data, disable_escaping=False):
"""
Called when a text node is generated in the result tree.
data - content of the text node
disable_escaping - if true, no escaping of characters is performed
"""
return
def processing_instruction(self, target, data):
"""
Called when an processing instruction node is generated in the result tree.
target - the instruction target.
data - the instruction.
"""
return
def comment(self, body):
"""
Called when a comment node is generated in the result tree.
body - comment text.
"""
return
class streamwriter(writer):
def __init__(self, output_parameters, stream):
"""
output_parameters - instance of
`amara.writers.outputparameters.outputparameters`
stream - a stream that takes a byte stream (not a unicode object)
"""
self.output_parameters = output_parameters
self.stream = stream
class _userwriter(object):
def start_element(self, name, namespace=None, namespaces=None,
attributes=None):
"""
Create a start tag with optional attributes. Must eventually
be matched with an endElement call
Note: all "strings" in these parameters must be unicode objects
name - qualified name of the element (must be unicode)
namespace - optional namespace URI
attributes - optional dictionary mapping name to unicode value
the name can either be a unicode QName or a tuple
of (QName, namespace URI)
namespaces - optional dictionary (defaults to an empty one) that
creates additional namespace declarations that the
user wants to place on the specific element. Each key
is a ns prefix, and each value a ns name (URI).
You do not need to use extraNss if you will be using
a similar namespace parameter. In fact, most people
will never need this parameter.
"""
name = U(name)
normalized_attrs = {}
if attributes is not None:
normalized_attrs = dict((
(((U(aname[0]), U(aname[1])), U(value))
if isinstance(aname, tuple) else ((U(aname), None), U(value)))
for (aname, value) in attributes.iteritems()
))
#Be careful, viz. http://fuhm.net/super-harmful/ but should be safe here
super(_userwriter, self).start_element(name, namespace, namespaces, normalized_attrs)
return
def simple_element(self, name, namespace=None, namespaces=None,
attributes=None, content=u""):
"""
Create a simple tag with optional attributes and content. The
complete element, start tag, optional text content, end tag, will
all be generated by this one call. Must *not* be matched with
an endElement call.
Note: all "strings" in these parameters must be unicode objects
tagName - qualified name of the element
namespace - optional namespace URI
attributes - optional dictionary mapping name to unicode value
the name can either be a unicode QName or a tuple
of (QName, namespace URI)
content - optional unicode object with the text body of the
simple element
namespaces - optional dictionary (defaults to an empty one) that
creates additional namespace declarations that the
user wants to place on the specific element. Each key
is a ns prefix, and each value a ns name (URI).
You do not need to use namespaces if you will be using
a similar namespace parameter. In fact, most people
will never need this parameter.
"""
if name.startswith('xml:'):
#We can use such a raw test because of the very special case
#nature of the XML prefix
namespace = XML_NAMESPACE
self.start_element(name, namespace, namespaces, attributes)
if content:
self.text(U(content))
self.end_element(name, namespace)
return
def xml_fragment(self, fragment):
"""
Incorporate a well-formed general entity into the output.
fragment of
fragment - string (must not be a Unicode object) to be incorporated
verbatim into the output, after testing for wellp-formedness
"""
raise NotImplementedErr
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/__init__.py
|
__init__.py
|
import sys
from xml.dom import Node
from amara import tree
from amara.namespaces import XML_NAMESPACE, XMLNS_NAMESPACE
#import amara.writers.xmlwriter
#import XmlPrinter, XmlPrettyPrinter, HtmlPrinter, HtmlPrettyPrinter
import _xmlprinters, _htmlprinters
class visitor:
"""
Provides functions to recursively walk a DOM or Domlette object and
generate SAX-like event calls for each node encountered. See the
printer classes (XMLPrinter, HTMLPrinter, etc.) for the event
handlers.
"""
def __init__(self, stream=sys.stdout, encoding='utf-8', printer=None, ns_hints=None, is_html=False,
indent=False, canonical=False, added_attributes=None,
removed_ns_decls=None):
"""
Initializes an instance of the class, selecting the appropriate
printer to use, depending on the isHtml and indent flags.
ns_hints, if given, is a dictionary of namespace mappings that
help determine if namespace declarations need to be emitted when
visiting the first Element node.
"""
if printer:
self.printer = printer
elif indent and is_html:
self.printer = _htmlprinters.htmlprettyprinter(stream, encoding)
elif indent:
self.printer = _xmlprinters.xmlprettyprinter(stream, encoding)
elif is_html:
self.printer = _htmlprinters.htmlprinter(stream, encoding)
elif canonical:
self.printer = _xmlprinters.CanonicalXmlPrinter(stream, encoding)
else:
self.printer = _xmlprinters.xmlprinter(stream, encoding)
# Namespaces
self._namespaces = [{'xml' : XML_NAMESPACE}]
self._ns_hints = ns_hints
self._added_attributes = added_attributes or {}
self._removed_ns_decls = removed_ns_decls or []
return
_dispatch = {}
def visit(self, node):
"""
Starts walking the tree at the given node.
"""
try:
node_type = node.xml_type
except AttributeError:
raise ValueError('Not a valid Amara node %r' % node)
try:
visit = self._dispatch[node_type]
except KeyError:
# unknown node type, try and get a "pretty" name for the error
#FIXME: Not ported for Amara 2
node_types = {}
for name in dir(Node):
if name.endswith('_NODE'):
node_types[getattr(Node, name)] = name
node_type = node_types.get(node.node_type, node.node_type)
raise ValueError('Unknown node type %r' % node_type)
else:
visit(self, node)
return
def visit_not_implemented(self, node):
"""
Called when an known but unsupported type of node is
encountered, always raising a NotImplementedError exception. The
unsupported node types are those that require DTD subset
support: entity nodes, entity reference nodes, and notation
nodes.
"""
raise NotImplementedError('Printing of %r' % node)
#_dispatch[tree.entity.xml_type] = visitNotImplemented
#_dispatch[tree.entity.xml_type] = visitNotImplemented
def visit_document(self, node):
"""
Called when an Entity node is encountered (e.g. may or may not be a full XML document entity).
Work on DTDecl details, if any, and then to the children.
"""
self.printer.start_document()
if node.xml_system_id:
for child in node.xml_children:
if child.xml_type == tree.element.xml_type:
self.printer.doctype(child.xml_qname, node.xml_public_id, node.xml_system_id)
break
#hasDocTypeNode = False
#if hasattr(node, 'doctype'):
# DOM Level 1/2/3
# if node.doctype:
# hasDocTypeNode = True
# self.visitDocumentType(node.doctype)
# children = [ x for x in node.childNodes if x != node.doctype ]
#if not hasDocTypeNode and hasattr(node, 'systemId'):
# Domlette
# if node.documentElement:
# self.printer.doctype(node.documentElement.tagName,
# node.publicId, node.systemId)
# children = node.childNodes
for child in node.xml_children:
self.visit(child)
self.printer.end_document()
return
_dispatch[tree.entity.xml_type] = visit_document
#def visit_document_type(self, node):
# """
# Called when a DocumentType node is encountered. Generates a
# doctype event for the printer.
# """
# self.printer.doctype(node.name, node.publicId,
# node.systemId)
# return
#_dispatch[Node.DOCUMENT_TYPE_NODE] = visitDocumentType
def visit_element(self, node):
"""
Called when an Element node is encountered. Generates for the
printer a startElement event, events for the node's children
(including attributes), and an endElement event.
"""
#print "visit_element", node.xml_name
current_nss = self._namespaces[-1].copy()
# Gather the namespaces and attributes for writing
namespaces = node.xml_namespaces.copy()
del namespaces[u'xml']
if self._ns_hints:
for prefix, namespaceUri in self._ns_hints.items():
# See if this namespace needs to be emitted
if current_nss.get(prefix, 0) != namespaceUri:
namespaces[prefix or u''] = namespaceUri
self._ns_hints = None
if self._added_attributes:
attributes = self._added_attributes
self._added_attributes = None
else:
attributes = {}
for attr in node.xml_attributes.nodes():
# xmlns="uri" or xmlns:foo="uri"
if attr.xml_namespace == XMLNS_NAMESPACE:
if not attr.xml_prefix:
# xmlns="uri"
prefix = None
else:
# xmlns:foo="uri"
prefix = attr.xml_local
if current_nss.get(prefix, 0) != attr.xml_value:
namespaces[prefix] = attr.xml_value
else:
attributes[attr.xml_qname] = attr.xml_value
# The element's namespaceURI/prefix mapping takes precedence
#if node.xml_namespace or current_nss.get(u'', 0):
#if current_nss.get(node.xml_prefix or u'', 0) != node.xml_namespace:
# namespaces[node.xml_prefix or u''] = node.xml_namespace or u""
if node.xml_namespace or namespaces.get(None, 0):
if namespaces.get(node.xml_prefix or None, 0) != node.xml_namespace:
namespaces[node.xml_prefix or None] = node.xml_namespace or u""
#The
kill_prefixes = []
for prefix in namespaces:
if prefix in current_nss and current_nss[prefix] == namespaces[prefix]:
kill_prefixes.append(prefix)
for prefix in kill_prefixes:
del namespaces[prefix]
for prefix in self._removed_ns_decls:
del namespaces[prefix]
self.printer.start_element(node.xml_namespace, node.xml_qname, namespaces.iteritems(),
attributes.iteritems())
if self._removed_ns_decls:
self._removed_ns_decls = []
# Update in scope namespaces with those we emitted
current_nss.update(namespaces)
self._namespaces.append(current_nss)
# Write out this node's children
for child in node.xml_children:
self.visit(child)
self.printer.end_element(node.xml_namespace, node.xml_qname)
del self._namespaces[-1]
return
_dispatch[tree.element.xml_type] = visit_element
def visit_text(self, node):
"""
Called when a Text node is encountered. Generates a text event
for the printer.
"""
self.printer.text(node.xml_value)
return
_dispatch[tree.text.xml_type] = visit_text
def visit_comment(self, node):
"""
Called when a Comment node is encountered. Generates a comment
event for the printer.
"""
self.printer.comment(node.xml_value)
return
_dispatch[tree.comment.xml_type] = visit_comment
def visit_processing_instruction(self, node):
"""
Called when a ProcessingInstruction node is encountered.
Generates a processingInstruction event for the printer.
"""
self.printer.processing_instruction(node.xml_target, node.xml_data)
return
_dispatch[tree.processing_instruction.xml_type] = visit_processing_instruction
def xml_print(root, stream=sys.stdout, encoding='UTF-8', **kwargs):
"""
Given a Node instance assumed to be the root of a DOM or Domlette
tree, this function serializes the document to the given stream or
stdout, using the given encoding (UTF-8 is the default). The asHtml
flag can be used to force HTML-style serialization of an XML DOM.
Otherwise, the DOM type (HTML or XML) is automatically determined.
This function does nothing if root is not a Node.
It is preferable that users import this from Ft.Xml.Domlette
rather than directly from Ft.Xml.Lib.
"""
#if not hasattr(root, "nodeType"):
# return
#ns_hints = SeekNss(root)
ns_hints = {}
# When as_html is not specified, choose output method from interface
# of document node (getElementsByName is an HTML DOM only method)
#if as_html is None:
# as_html = hasattr(root.ownerDocument or root, 'getElementsByName')
if 'xml_declaration' in kwargs and not isinstance(root, tree.entity):
del kwargs['xml_declaration']
v = visitor(stream, encoding, **kwargs)
v.printer.start_document()
v.visit(root)
v.printer.end_document()
else:
v = visitor(stream, encoding, **kwargs)
v.visit(root)
return
def PrettyPrint(root, stream=sys.stdout, encoding='UTF-8', asHtml=None):
"""
Given a Node instance assumed to be the root of a DOM or Domlette
tree, this function serializes the document to the given stream or
stdout, using the given encoding (UTF-8 is the default). Extra
whitespace is added to the output for visual formatting. The asHtml
flag can be used to force HTML-style serialization of an XML DOM.
Otherwise, the DOM type (HTML or XML) is automatically determined.
This function does nothing if root is not a Node.
Please import this from Ft.Xml.Domlette
rather than directly from Ft.Xml.Lib.
"""
from Ft.Xml.Domlette import SeekNss
if not hasattr(root, "nodeType"):
return
ns_hints = SeekNss(root)
# When asHtml is not specified, choose output method from interface
# of document node (getElementsByName is an HTML DOM only method)
if asHtml is None:
asHtml = hasattr(root.ownerDocument or root, 'getElementsByName')
visitor = PrintVisitor(stream, encoding, ns_hints, asHtml, 1)
visitor.visit(root)
stream.write('\n')
return
def CanonicalPrint(root, stream=sys.stdout, exclusive=False,
inclusivePrefixes=None):
"""
Given a Node instance assumed to be the root of an XML DOM or Domlette
tree, this function serializes the document to the given stream or
stdout, using c14n serialization, according to
http://www.w3.org/TR/xml-c14n (the default) or
http://www.w3.org/TR/xml-exc-c14n/
This function does nothing if root is not a Node.
exclusive - if true, apply exclusive c14n according to
http://www.w3.org/TR/xml-exc-c14n/
inclusivePrefixes - if exclusive is True, use this as a list of namespaces
representing the "InclusiveNamespacesPrefixList" list in exclusive c14n
Please import this from Ft.Xml.Domlette
rather than directly from Ft.Xml.Lib.
"""
from Ft.Xml.Domlette import SeekNss
if not hasattr(root, "nodeType"):
return
added_attributes = {} #All the contents should be XML NS attrs
nshints = {}
if not exclusive:
#Roll in ancestral xml:* attributes
parent_xml_attrs = root.xpath(u'ancestor::*/@xml:*')
for attr in parent_xml_attrs:
aname = (attr.namespaceURI, attr.nodeName)
if (aname not in added_attributes
and aname not in root.attributes):
added_attributes[attr.nodeName] = attr.value
nsnodes = root.xpath('namespace::*')
inclusivePrefixes = inclusivePrefixes or []
if u'#default' in inclusivePrefixes:
inclusivePrefixes.remove(u'#default')
inclusivePrefixes.append(u'')
decls_to_remove = []
if exclusive:
used_prefixes = [ node.prefix for node in root.xpath('self::*|@*') ]
declared_prefixes = []
for ans, anodename in root.attributes:
if ans == XMLNS_NAMESPACE:
attr = root.attributes[ans, anodename]
prefix = attr.localName
declared_prefixes.append(prefix)
#print attr.prefix, attr.localName, attr.nodeName
if attr.localName not in used_prefixes:
decls_to_remove.append(prefix)
#for prefix in used_prefixes:
# if prefix not in declared_prefixes:
# nshints[ns.nodeName] = ns.value
#Roll in ancestral NS nodes
for ns in nsnodes:
prefix = ns.nodeName
if (ns.value != XML_NAMESPACE
and (XMLNS_NAMESPACE, ns.nodeName) not in root.attributes
and (not exclusive or ns.localName in inclusivePrefixes)):
#added_attributes[(XMLNS_NAMESPACE, ns.nodeName)] = ns.value
nshints[prefix] = ns.value
elif (exclusive
and prefix in used_prefixes
and prefix not in declared_prefixes):
nshints[prefix] = ns.value
visitor = PrintVisitor(stream, 'UTF-8', nshints, False,
0, True, added_attributes, decls_to_remove)
visitor.visit(root)
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/writers/_treevisitor.py
|
_treevisitor.py
|
import new
import weakref
import operator
from amara.namespaces import EXTENSION_NAMESPACE
from amara.lib.xmlstring import isspace
from amara.writers import streamwriter, textwriter, htmlwriter, xmlwriter
from amara.xslt import XsltError
_TEXT_METHOD = (None, 'text')
_HTML_METHOD = (None, 'html')
_XML_METHOD = (None, 'xml')
_XHTML_METHOD = (EXTENSION_NAMESPACE, 'xhtml') #Coming later
_C14N_METHOD = (EXTENSION_NAMESPACE, 'c14n') #Coming later
class proxymethod(object):
__slots__ = ('_name', '_func', '_refs')
def __init__(self, func):
self._name = func.__name__
self._func = func
self._refs = []
def update(self, obj, cls,
_instancemethod=new.instancemethod,
_members=operator.attrgetter('im_func', 'im_self', 'im_class',
'__call__')):
func = getattr(obj, self._name)
try:
func = func.im_func
except AttributeError:
for ref in self._refs:
proxy = ref()
if proxy and proxy.im_self is obj:
class proxyfunction(object):
__call__ = func.__call__
proxy.__class__ = proxyfunction
else:
for ref in self._refs:
proxy = ref()
if proxy and proxy.im_self is obj:
method = _instancemethod(func, obj, cls)
class proxymethod(object):
im_func, im_self, im_class, __call__ = _members(method)
proxy.__class__ = proxymethod
def __get__(self, obj, cls,
_instancemethod=new.instancemethod,
_members=operator.attrgetter('im_func', 'im_self', 'im_class',
'__call__')):
method = _instancemethod(self._func, obj, cls)
class proxymethod(object):
im_func, im_self, im_class, __call__ = _members(method)
proxy = proxymethod()
self._refs.append(weakref.ref(proxy, self._refs.remove))
return proxy
class proxywriter(streamwriter):
_methods = {
_TEXT_METHOD : textwriter.textwriter,
_HTML_METHOD : htmlwriter.htmlwriter,
_XML_METHOD : xmlwriter.xmlwriter,
}
class __metaclass__(type):
def __init__(cls, name, bases, namespace):
cls.__proxymethods__ = tuple(
obj for obj in namespace.itervalues()
if isinstance(obj, proxymethod))
@classmethod
def _lookup(cls, output_parameters):
method = output_parameters.method
try:
cls = cls._methods[method]
except KeyError:
if method[0] is None:
# display only localName if in the null namespace
method = method[1]
raise XsltError(XsltError.UNKNOWN_OUTPUT_METHOD, str(method))
if (cls is xmlwriter.xmlwriter and
output_parameters.cdata_section_elements):
cls = xmlwriter.cdatasectionwriter
return cls
def __new__(cls, output_parameters, stream):
# Attempt to switch to the "true" writer as soon as possible
if output_parameters.method:
return cls._lookup(output_parameters)(output_parameters, stream)
return object.__new__(cls)
def __init__(self, output_parameters, stream):
streamwriter.__init__(self, output_parameters, stream)
self._stack = []
return
def _finalize(self, method):
self.output_parameters.setdefault('method', method)
writer_class = self._lookup(self.output_parameters)
# Save our instance variables for use after reinitializing
stack = self._stack
del self._stack
self.__class__ = writer_class
for proxy in proxywriter.__proxymethods__:
proxy.update(self, writer_class)
# Do the saved callbacks
get_command = self.__getattribute__
for cmd, args, kw in stack:
get_command(cmd)(*args, **kw)
return
@proxymethod
def start_document(self, *args, **kwds):
self._stack.append(('start_document', args, kwds))
return
@proxymethod
def end_document(self, *args, **kw):
# We haven't chosen an output method yet, use default.
self._stack.append(('end_document', args, kw))
self._finalize(_XML_METHOD)
return
@proxymethod
def start_element(self, name, namespace=None, *args, **kw):
self._stack.append(('start_element', (name, namespace) + args, kw))
if namespace is None and name.lower() == 'html':
self._finalize(_HTML_METHOD)
else:
self._finalize(_XML_METHOD)
return
@proxymethod
def end_element(self, *args, **kw):
self._stack.append(('end_element', args, kw))
return
@proxymethod
def namespace(self, *args, **kw):
self._stack.append(('namespace', args, kw))
return
@proxymethod
def attribute(self, *args, **kw):
self._stack.append(('attribute', args, kw))
return
@proxymethod
def text(self, *args, **kw):
self._stack.append(('text', args, kw))
# Non-whitespace characters, cannot be HTML/XHTML
if not isspace(args[0]):
self._finalize(_XML_METHOD)
return
@proxymethod
def processing_instruction(self, *args, **kw):
self._stack.append(('processing_instruction', args, kw))
return
@proxymethod
def comment(self, *args, **kw):
self._stack.append(('comment', args, kw))
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/proxywriter.py
|
proxywriter.py
|
from amara._xmlstring import splitqname
from amara.lib.irihelpers import uridict
from amara.xpath import XPathError, context
from amara.xslt import functions, exslt, extensions
# Attempt to use the internal `dictproxy` type to make the global variables
# read-only.
try:
import ctypes
except ImportError:
def dictproxy(dict):
return dict
else:
dictproxy = ctypes.pythonapi.PyDictProxy_New
dictproxy.restype = ctypes.py_object
dictproxy.argtypes = (ctypes.py_object,)
__all__ = ['xsltcontext']
class xsltcontext(context):
functions = context.functions.copy()
functions.update(exslt.extension_functions)
functions.update(extensions.extension_functions)
instruction = None
template = None
recursive_parameters = None
def __init__(self, node, position=1, size=1,
variables=None, namespaces=None,
current_node=None, transform=None, processor=None,
mode=None, extmodules=(), extfunctions=None,
output_parameters=None):
context.__init__(self, node, position, size, variables, namespaces,
extmodules, extfunctions, output_parameters)
self.global_variables = dictproxy(self.variables)
self.current_node = current_node
self.transform = transform
self.processor = processor
self.mode = mode
self.documents = uridict()
self.keys = {}
return
def get(self):
return self._current_instruction
def set(self, value):
self._current_instruction = value
self.namespaces = value.namespaces
current_instruction = property(get, set)
del get, set
def add_document(self, document, document_uri=None):
# RTF documents do not have a documentUri
if document_uri:
self.documents[document_uri] = document
return
def update_keys(self):
for key_table in self.keys.itervalues():
for document in self.documents.itervalues():
keys = key_table[document]
return
def message(self, message):
self.processor.message(message)
def expand_qname(self, name):
if not name: return None
prefix, name = splitqname(name)
if prefix:
try:
namespace = self.namespaces[prefix]
except KeyError:
raise XPathError(XPathError.UNDEFINED_PREFIX, prefix=prefix)
else:
namespace = None
return (namespace, name)
def __repr__(self):
ptr = id(self)
if ptr < 0:
ptr += 0x100000000L
return ('<%s at 0x%x: node %r, position %d, size %d, mode %r>' %
(self.__class__.__name__, ptr, self.node, self.position,
self.size, self.mode))
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/xsltcontext.py
|
xsltcontext.py
|
import os, sys, operator, cStringIO, warnings
from gettext import gettext as _
DEFAULT_ENCODING = 'UTF-8'
#from amara import DEFAULT_ENCODING
from amara import ReaderError, tree
from amara.lib import iri, inputsource
from amara.xpath import XPathError
from amara.xslt import XsltError
from amara.xslt import xsltcontext
from amara.xslt.reader import stylesheet_reader
from amara.xslt.result import stringresult
# For builtin extension elements/functions
#from amara.xslt import exslt
#from amara.xslt.extensions import builtins
# Media types that signal that an xml-stylesheet PI points to an XSLT
# document, when the PI contains a type pseudo-attribute.
#
# Note: RFC 3023 suggests application/xslt+xml, and says the +xml
# suffix is not required (but is a SHOULD). If you want to use the
# 'text/xsl' convention, do Processor.XSLT_IMT.append('text/xsl')
# after import, but before instantiating Processor.Processor.
#
XSLT_IMT = ['application/xslt+xml', 'application/xslt',
'text/xml', 'application/xml']
# for xsl:message output
MESSAGE_TEMPLATE = _('STYLESHEET MESSAGE:\n%s\nEND STYLESHEET MESSAGE\n')
class processor(object):
"""
An XSLT processing engine (4XSLT).
Typical usage:
from Ft.Lib.Uri import OsPathToUri
from Ft.Xml import InputSource
from Ft.Xml.Xslt import Processor
# this is just one of several ways to create InputSources
styuri = OsPathToUri('/absolute/path/to/stylesheet.xslt')
srcuri = OsPathToUri('/absolute/path/to/doc.xml')
STY = InputSource.DefaultFactory.fromUri(styuri)
SRC = InputSource.DefaultFactory.fromUri(srcuri)
proc = Processor.Processor()
proc.appendStylesheet(STY)
result = proc.run(SRC)
See the run() and runNode() methods for additional runtime options.
The ignore_pis flag, if true, will cause xml-stylesheet
processing instructions in the source document to be ignored.
Important instance attributes:
.extension_parameters: a dictionary that allows one to attach
additional metadata to a processor instance. We use this
to make invocation-specific data like HTTP query args and
logfile handles available to XSLT extension functions & elements
when invoking the processor via the repository's HTTP server.
.media_descriptors: the preferred/target media, for the purpose of
picking from multiple xml-stylesheet processing instructions.
Defaults to None. If set to a string, xml-stylesheet PIs
without that string in their 'media' pseudo-attribute will be
ignored.
.message_template: format string for `xsl:message` output.
.transform: the complete transformation tree.
"""
# defaults for ExtendedProcessingElements.ExtendedProcessor
_4xslt_debug = False
_4xslt_profile = False
_4xslt_trace = False
_suppress_messages = False
# has the "built-in template invoked with params" warning been issued?
_builtInWarningGiven = False
def __init__(self, ignore_pis=False, content_types=None,
media_descriptors=None, extension_parameters=None,
message_stream=None, message_template=None):
self.ignore_pis = ignore_pis
if content_types is None:
content_types = set(XSLT_IMT)
self.content_types = content_types
# Although nt in the DTD, the prose for HTML LINK element states that
# the default value for the media attribute is "screen".
if media_descriptors is None:
media_descriptors = set(['screen'])
self.media_descriptors = media_descriptors
if extension_parameters is None:
extension_parameters = {}
self.extension_parameters = extension_parameters
if message_stream is None:
message_stream = sys.stderr
self.message_stream = message_stream
if message_template is None:
message_template = MESSAGE_TEMPLATE
self.message_template = message_template
self.transform = None
self._extfunctions = {} #Cache ext functions to give to the context
self._extelements = {}
#self._extelements.update(exslt.ExtElements)
#self._extelements.update(builtins.ExtElements)
self._reader = stylesheet_reader()
return
def getStripElements(self):
if self.transform:
return self.transform.space_rules
else:
return ()
def registerExtensionModules(self, modules):
"""
Registers a list of Python modules that have public ExtFunctions
and/or ExtElements dictionaries.
In a Python module that contains extension implementations,
define a dictionary named ExtFunctions that, for each extension
function or element, maps a (namespace-URI, xpath-function-name)
tuple to a direct reference to the Python function that
implements the extension. To make the function available to the
Processor, call this method, passing in ['your.module.name'].
See Ft.Xml.Xslt.Exslt.*, Ft.Xml.Xslt.BuiltInExtFunctions and
BuiltInExtElements for working examples of extension modules.
"""
for module in modules:
if module:
module = __import__(module, {}, {}, ['ExtFunctions'])
if hasattr(module, 'ExtFunctions'):
self._extfunctions.update(mod.ExtFunctions)
if hasattr(module, 'ExtElements'):
elements = module.ExtElements
self._extelements.update(elements)
self._reader.addExtensionElementMapping(elements)
return
def registerExtensionFunction(self, namespace, localName, function):
"""
Register a single extension function.
For example, implement your own extension function as a Python
function that takes an Ft.Xml.XPath.Context.Context instance as
its first argument. Then, to make the function available to the
Processor, call this method, passing in the namespace URI and
local name of the function, and a direct reference to the Python
function that implements the extension.
See also registerExtensionModules().
"""
self._extfunctions[namespace, localName] = function
return
def registerExtensionElement(self, namespace, localName, klass):
"""
Register a single extension element.
For example, implement your own extension element as a subclass
of Ft.Xml.Xslt.xsltelement. To make the element available to the
Processor, call this method, passing in the namespace URI and
local name of the element, and a direct reference to the class
that implements the extension.
See also registerExtensionModules().
"""
self._extelements[namespace, localName] = klass
mapping = { (namespace, localName) : klass }
self._reader.addExtensionElementMapping(mapping)
return
def append_transform(self, source, uri=None):
"""
Add an XSL transformation document to the processor.
uri - optional override document URI.
This method establishes the transformation that the processor will use
to transform a source tree into a result tree. If a transform has
already been appended, then this method is equivalent to having, in an
outer "shell" document, an `xsl:import` for the most recently added
transform followed by an `xsl:import` for the document accessible via
the given `transform`.
"""
if isinstance(source, tree.node):
document = source.xml_root
if not uri:
try:
uri = document.xml_base
except AttributeError:
raise ValueError('base-uri required for %s' % document)
self._documents[uri] = document
self.transform = self._reader.parse(document)
else:
if not isinstance(source, inputsource):
source = inputsource(source, uri)
self.transform = self._reader.parse(source)
return
def run(self, source, parameters=None, result=None):
"""
Transform a source document as given via an InputSource.
Assumes that either the Processor instance has already had
stylesheets appended (via appendStylesheet(), for example), or
the source document contains xml-stylesheet processing
instructions that are not being ignored.
The `parameters` argument is an optional dictionary of
stylesheet parameters, the keys of which may be given as
strings if they have no namespace, or as (uri, localname)
tuples otherwise.
The optional writer argument is a SAX-like event handler that
is an Ft.Xml.Xslt.NullWriter subclass. The default writer is
either an Ft.Xml.Xslt.XmlWriter, HtmlWriter or PlainTextWriter,
depending on the stylesheet(s).
The optional `output` argument is a Python file-like object
to be used as the destination for the writer's output.
"""
try:
document = tree.parse(source)
except ReaderError, e:
raise XsltError(XsltError.SOURCE_PARSE_ERROR,
uri=(source.uri or '<Python string>'), text=e)
if self.__checkStylesheetPis(document, source):
#Do it again with updates WS strip lists
#NOTE: There is a case where this will produce the wrong results. If, there were
#previous stylesheets that defined removing white space, then the
#processing instruction referenced a stylesheet that overrode some of these
#whitespace processing rules, the original trimmed space will be lost
#Regardless, we need to remove any new whitespace defined in the PI
self._stripElements(document)
return self._run(document, parameters, result)
def runNode(self, node, sourceUri=None, parameters=None, result=None,
preserveSrc=0, docInputSource=None):
"""
Transform a source document as given via a Domlette document
node.
Use Ft.Xml.Domlette.ConvertDocument() to create a Domlette
from some other type of DOM.
Assumes that either the Processor instance has already had
stylesheets appended (via appendStylesheet(), for example), or
the source document contains xml-stylesheet processing
instructions that are not being ignored.
sourceUri - The absolute URI of the document
entity that the node represents, and should be explicitly
provided, even if it is available from the node itself.
`parameters` - optional dictionary of
stylesheet parameters, the keys of which may be given as
strings if they have no namespace, or as (uri, localname)
tuples otherwise.
writer - optional SAX-like event handler that
is an Ft.Xml.Xslt.NullWriter subclass. The default writer is
either an Ft.Xml.Xslt.XmlWriter, HtmlWriter or PlainTextWriter,
depending on the stylesheet(s).
output - optional Python file-like object
to be used as the destination for the writer's output.
preserveSrc - (flag) If set signals that the source DOM should not be
mutated, as would normally happen when honoring XSLT whitespace
stripping requirements. Setting preserveSrc results in the
creation of a copy of the source DOM.
isrc - optional input source used strictly for further resolution
relative the given DOM
"""
if not isinstance(node, tree.entity):
raise ValueError(MessageSource.g_errorMessages[
XsltError.CANNOT_TRANSFORM_FRAGMENT])
# A base URI must be absolute, but DOM L3 Load & Save allows
# implementation-dependent behavior if the URI is actually
# relative, empty or missing. We'll generate a URN for the
# InputSource's benefit if the base URI is empty/missing.
# Relative URIs can pass through; the resolvers will handle
# them appropriately (we hope).
if not sourceUri:
sourceUri = node.xml_base or Uri.BASIC_RESOLVER.generate()
if preserveSrc:
# preserve the node's baseURI so our DOM is a true copy
entity = tree.entity(node.xml_base)
for child in node:
entity.xml_append(entity.importNode(child, 1))
node = entity
self._stripElements(node)
if not docInputSource:
#Create a dummy iSrc
docInputSource = inputsource.input_source(
None, sourceUri, processIncludes=1,
stripElements=self.getStripElements(),
factory=self.inputSourceFactory)
if self.__checkStylesheetPis(node, docInputSource):
#Do it again with updated WS strip lists
#NOTE: There is a case where this will produce the wrong results. If, there were
#previous stylesheets that defined removing white space, then the
#processing instruction referenced a stylesheet that overrode some of these
#whitespace processing rules, the original trimmed space will be lost
#Regardless, we need to remove any new whitespace defined in the PI
self._stripElements(node)
return self._run(node, parameters, result)
def __cmp_stys(self, a, b):
"""
Internal function to assist in sorting xml-stylesheet
processing instructions. See __checkStylesheetPis().
"""
# sort by priority (natural order)
return cmp(a[0], b[0])
##
## For future reference, to support more advanced
## preferences, such as having an ordered list of
## preferred target media values rather than just one,
## and using the Internet media type list in a similar
## fashion, we can sort on multiple pseudo-attrs like
## this:
##
## sort by priority (natural order)
#if cmp(a[0], b[0]):
# return cmp(a[0], b[0])
## then media (natural order)
#elif cmp(a[1], b[1]):
# return cmp(a[1], b[1])
## then type (XSLT_IMT order)
#else:
# for imt in XSLT_IMT:
# if a[2] == imt:
# return b[2] != imt
# else:
# return -(b[2] == imt)
def __checkStylesheetPis(self, node, inputSource):
"""
Looks for xml-stylesheet processing instructions that are
children of the given node's root node, and calls
appendStylesheet() for each one, unless it does not have an
RFC 3023 compliant 'type' pseudo-attribute or does not have
a 'media' pseudo-attribute that matches the preferred media
type that was set as Processor.mediaPref. Uses the given
InputSource to resolve the 'href' pseudo-attribute. If the
instruction has an alternate="yes" pseudo-attribute, it is
treated as a candidate for the first stylesheet only.
"""
# relevant links:
# http://www.w3.org/TR/xml-stylesheet/
# http://lists.fourthought.com/pipermail/4suite/2001-January/001283.html
# http://lists.fourthought.com/pipermail/4suite/2003-February/005088.html
# http://lists.fourthought.com/pipermail/4suite/2003-February/005108.html
#
# The xml-stylsheet spec defers to HTML 4.0's LINK element
# for semantics. It is not clear in HTML how the user-agent
# should interpret multiple LINK elements with rel="stylesheet"
# and without alternate="yes". In XSLT processing, we, like
# Saxon, choose to treat such subsequent non-alternates as
# imports (i.e. each non-alternate stylesheet is imported by
# the previous one).
#
# Given that alternates can appear before or after the
# non-alternate, there's no way to know whether they apply
# to the preceding or following non-alternate. So we choose
# to just treat alternates as only applying to the selection
# of the first stylesheet.
#
# Also, the absence of processing guidelines means we can't
# know whether to treat the absence of a 'media' pseudo-attr
# as implying that this is a default stylesheet (e.g. when the
# preferred media is "foo" and there is no "foo", you use
# this stylesheet), or whether to treat it as only being the
# appropriate stylesheet when no media preference is given to
# the processor.
#
# Furthermore, if more than one candidate for the first
# stylesheet is a match on the 'media' preference (or lack
# thereof), it's not clear what to do. Do we give preference
# to the one with a 'type' that is considered more favorable
# due to its position in the XSLT_IMT list? Do we just use the
# first one? The last one? For now, if there's one that does
# not have alternate="yes", we use that one; otherwise we use
# the first one. Thus, given
# <?xml-stylesheet type="application/xslt+xml" href="sty0"?>
# <?xml-stylesheet type="application/xslt+xml" href="sty1"
# alternate="yes"?>
# sty0 is used, even if the PIs are swapped; whereas if the
# only choices are
# <?xml-stylesheet type="application/xslt+xml" href="sty1"
# alternate="yes"?>
# <?xml-stylesheet type="application/xslt+xml" href="sty2"
# alternate="yes"?>
# then sty1 is used because it comes first.
root = node.xml_root
c = 1 # count of alternates, +1
found_nonalt = 0
stys = []
for node in root:
# only look at prolog, not anything that comes after it
if isinstance(node, tree.element): break
# build dict of pseudo-attrs for the xml-stylesheet PIs
if not (isinstance(node, tree.processing_instruction)
and node.xml_target == 'xml-stylesheet'):
continue
pseudo_attrs = {}
for attdecl in node.xml_data.split():
try:
name, value = attdecl.split('=', 1)
except ValueError:
pass
else:
pseudo_attrs[name] = value[1:-1]
# PI must have both href, type pseudo-attributes;
# type pseudo-attr must match valid XSLT types;
# media pseudo-attr must match preferred media
# (which can be None)
if 'href' in pseudo_attrs and 'type' in pseudo_attrs:
href = pseudo_attrs['href']
imt = pseudo_attrs['type']
media = pseudo_attrs.get('media') # defaults to None
if media in self.media_descriptors and imt in XSLT_IMT:
if ('alternate' in pseudo_attrs
and pseudo_attrs['alternate'] == 'yes'):
stys.append((1, media, imt, href))
elif found_nonalt:
c += 1
stys.append((c, media, imt, href))
else:
stys.append((0, media, imt, href))
found_nonalt = 1
stys.sort(self.__cmp_stys)
# Assume stylesheets for irrelevant media and disallowed IMTs
# are filtered out. Assume stylesheets are in ascending order
# by level. Now just use first stylesheet at each level, but
# treat levels 0 and 1 the same. Meaning of the levels:
# level 0 is first without alternate="yes"
# level 1 is all with alternate="yes"
# levels 2 and up are the others without alternate="yes"
hrefs = []
last_level = -1
#print "stys=",repr(stys)
for sty in stys:
level = sty[0]
if level == 1 and last_level == 0:
# we want to ignore level 1s if we had a level 0
last_level = 1
if level == last_level:
# proceed to next level (effectively, we only use
# the first stylesheet at each level)
continue
last_level = level
hrefs.append(sty[3])
if hrefs:
for href in hrefs:
# Resolve the PI with the InputSource for the document
# containing the PI, so relative hrefs work correctly
new_source = inputSource.resolve(href,
hint='xml-stylesheet PI')
self.appendStylesheet(new_source)
# Return true if any xml-stylesheet PIs were processed
# (i.e., the stylesheets they reference are going to be used)
return not not hrefs
def _run(self, node, parameters=None, result=None):
"""
Runs the stylesheet processor against the given XML DOM node with the
stylesheets that have been registered. It does not mutate the source.
If writer is given, it is used in place of the default output method
decisions for choosing the proper writer.
"""
#QUESTION: What about ws stripping?
#ANSWER: Whitespace stripping happens only in the run*() interfaces.
# This method is use-at-your-own-risk. The XSLT conformance of the
# source is maintained by the caller. This exists as a performance
# hook.
parameters = parameters or {}
self.attributeSets = {}
self.keys = {}
#See f:chain-to extension element
self.chainTo = None
self.chainParams = None
if not self.transform:
raise XsltError(XsltError.NO_STYLESHEET)
# Use an internal result to gather the output only if the caller
# didn't supply other means of retrieving it.
if result is None:
result = stringresult()
result.parameters = self.transform.output_parameters
assert result.writer
# Initialize any stylesheet parameters
initial_variables = parameters.copy()
for name in parameters:
if name not in self.transform.parameters:
del initial_variables[name]
# Prepare the stylesheet for processing
context = xsltcontext.xsltcontext(node,
variables=initial_variables,
transform=self.transform,
processor=self,
extfunctions=self._extfunctions,
output_parameters=result.parameters)
context.add_document(node, node.xml_base)
context.push_writer(result.writer)
self.transform.root.prime(context)
# Process the document
try:
self.transform.apply_templates(context, [node])
except XPathError, e:
raise
instruction = context.instruction
strerror = str(e)
e.message = MessageSource.EXPRESSION_POSITION_INFO % (
instruction.baseUri, instruction.lineNumber,
instruction.columnNumber, instruction.nodeName, strerror)
raise
except XsltError:
raise
except (KeyboardInterrupt, SystemExit):
raise
except:
raise
import traceback
sio = cStringIO.StringIO()
sio.write("Lower-level traceback:\n")
traceback.print_exc(None, sio)
instruction = context.currentInstruction
strerror = sio.getvalue()
raise RuntimeError(MessageSource.EXPRESSION_POSITION_INFO % (
instruction.baseUri, instruction.lineNumber,
instruction.columnNumber, instruction.nodeName, strerror))
writer = context.pop_writer()
assert writer is result.writer
# Perform cleanup
self.transform.root.teardown()
if isinstance(result, stringresult):
return result.clone()
return result
def message_control(self, suppress):
"""
Controls whether the processor emits warnings and xsl:message
messages. Call with suppress=1 to suppress such output.
"""
self._suppress_messages = not not suppress
return
def message(self, message):
"""
Intended to be used by XSLT instruction implementations only.
Used by xsl:message to emit a message to sys.stderr, unless such
messages are suppressed (see messageControl()). Uses the
msgPrefix & msgSuffix instance attributes.
"""
message = self.message_template % (message,)
if not self._suppress_messages:
self.message_stream.write(message)
self.message_stream.flush()
return
def warning(self, message):
"""
Emits a warning via Python's warnings framework, unless warnings
are suppressed (see messageControl()).
Used, for example, to announce that built-in templates are being
invoked with params.
"""
if not self._suppress_messages:
# Using level=2 to show the stack where the warning occured.
warnings.warn(message, stacklevel=2)
return
def addHandler(self, outputParams, stream):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with an output writer wrapper that first
determines which XSLT output method is going to be used (i.e.,
by looking at the output parameters or waiting to see if an
'html' element is the first new node generated), then replaces
itself with the appropriate writer instance.
outputParams is an Ft.Xml.Xslt.OutputParameters instance.
stream will be passed on to the constructor of the real writer.
"""
handler = OutputHandler.OutputHandler(outputParams, stream)
self.writers.append(handler)
handler.startDocument()
return
def removeHandler(self):
"""
Intended to be used by XSLT instruction implementations only.
Deletes the most recently added output writer.
"""
self.writers[-1].endDocument()
del self.writers[-1]
return
def pushResultTree(self, baseUri, implementation=None):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with a new output writer that produces
a separate document. The new document will have the given
baseUri as its URI. This is used to generate result tree
fragments.
Allows specifying an alternative DOM implementation for the
creation of the new document.
"""
writer = RtfWriter.RtfWriter(self.outputParams, baseUri)
self.writers.append(writer)
return writer
def pushResultString(self):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with an output writer that buffers the text
from text events and keeps track of whether non-text events
occurred. This is used by the implementations of XSLT
instructions such as xsl:attribute.
"""
writer = StringWriter.StringWriter(self.outputParams)
self.writers.append(writer)
return
def pushResult(self, handler=None):
"""
Intended to be used by XSLT instruction implementations only.
Sets up the processor to start processing subsequently
generated content with a new output writer (the given handler
of SAX-like output events).
"""
if handler is None:
warnings.warn("Use pushResultTree(uri) to create RTFs",
DeprecationWarning, stacklevel=2)
handler = RtfWriter.RtfWriter(self.outputParams,
self.stylesheet.baseUri)
self.writers.append(handler)
handler.startDocument()
return
def popResult(self):
"""
Intended to be used by XSLT instruction implementations only.
Ends temporary output writing that was started with
pushResultString(), pushResultTree(), or pushResult(), and
returns the result.
"""
handler = self.writers[-1]
del self.writers[-1]
handler.endDocument()
return handler.getResult()
def writer(self):
"""
Intended to be used by XSLT instruction implementations only.
Returns the current output writer.
"""
return self.writers[-1]
writer = property(writer)
def _strip_elements(self, node):
stripElements = self.getStripElements()
if stripElements:
StripElements.StripElements(node, stripElements)
return
def reset(self):
"""
Returns the processor to a state where it can be used to do a
new transformation with a new stylesheet. Deletes the current
stylesheet tree, and may do other cleanup.
"""
self.stylesheet = None
self.getStylesheetReader().reset()
return
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/processor.py
|
processor.py
|
import sys, getopt
from amara import Error
__all__ = ['XsltError', 'transform']
class XsltError(Error):
INTERNAL_ERROR = 1
# xsl:stylesheet
NO_STYLESHEET = 20
#STYLESHEET_MISSING_VERSION = 21
LITERAL_RESULT_MISSING_VERSION = 22
STYLESHEET_PARSE_ERROR = 23
SOURCE_PARSE_ERROR = 24
#XSL_STYLESHEET_NOT_DOCELEM = 25
#TOP_LEVEL_ELEM_WITH_NULL_NS = 26
XSLT_ILLEGAL_ELEMENT = 27
#STYLESHEET_ILLEGAL_ROOT = 28
CIRCULAR_VARIABLE = 29
DUPLICATE_TOP_LEVEL_VAR = 30
DUPLICATE_NAMESPACE_ALIAS = 31
# misc element validation
MISSING_REQUIRED_ELEMENT = 50
ILLEGAL_ELEMENT_CHILD = 51
ILLEGAL_TEXT_CHILD = 52
UNDEFINED_PREFIX = 12
# misc attribute validation
MISSING_REQUIRED_ATTRIBUTE = 70
ILLEGAL_NULL_NAMESPACE_ATTR = 71
ILLEGAL_XSL_NAMESPACE_ATTR = 72
INVALID_ATTR_CHOICE = 73
INVALID_CHAR_ATTR = 74
INVALID_NUMBER_ATTR = 75
INVALID_NS_URIREF_ATTR = 76
INVALID_ID_ATTR = 77
INVALID_QNAME_ATTR = 78
INVALID_NCNAME_ATTR = 79
INVALID_PREFIX_ATTR = 80
INVALID_NMTOKEN_ATTR = 81
QNAME_BUT_NOT_NCNAME = 82
AVT_SYNTAX = 83
PATTERN_SYNTAX = 84
INVALID_AVT = 85
INVALID_PATTERN = 86
INVALID_EXPRESSION = 87
# xsl:apply-imports
APPLYIMPORTS_WITH_NULL_CURRENT_TEMPLATE = 100
# xsl:import and xsl:include
#ILLEGAL_IMPORT = 110
#IMPORT_NOT_FOUND = 111
INCLUDE_NOT_FOUND = 112
CIRCULAR_INCLUDE = 113
# xsl:choose, xsl:when and xsl:otherwise
ILLEGAL_CHOOSE_CHILD = 120
#CHOOSE_REQUIRES_WHEN = 121
#CHOOSE_WHEN_AFTER_OTHERWISE = 122
#CHOOSE_MULTIPLE_OTHERWISE = 123
#WHEN_MISSING_TEST = 124
# xsl:call-template
#ILLEGAL_CALLTEMPLATE_CHILD = 130
NAMED_TEMPLATE_NOT_FOUND = 131
# xsl:template
#ILLEGAL_TEMPLATE_PRIORITY = 140
MULTIPLE_MATCH_TEMPLATES = 141
DUPLICATE_NAMED_TEMPLATE = 142
# xsl:attribute
ATTRIBUTE_ADDED_TOO_LATE = 150
#ATTRIBUTE_MISSING_NAME = 151
ATTRIBUTE_ADDED_TO_NON_ELEMENT = 152
NONTEXT_IN_ATTRIBUTE = 153
BAD_ATTRIBUTE_NAME = 154
# xsl:element
UNDEFINED_ATTRIBUTE_SET = 160
# xsl:for-each
INVALID_FOREACH_SELECT = 170
# xsl:value-of
#VALUEOF_MISSING_SELECT = 180
# xsl:copy-of
#COPYOF_MISSING_SELECT = 190
# xsl:apply-template
#ILLEGAL_APPLYTEMPLATE_CHILD = 210
#ILLEGAL_APPLYTEMPLATE_MODE = 211
INVALID_APPLY_TEMPLATES_SELECT = 'XTTE0520'
# xsl:attribute-set
#ILLEGAL_ATTRIBUTESET_CHILD = 220
#ATTRIBUTESET_REQUIRES_NAME = 221
CIRCULAR_ATTRIBUTE_SET = 222
# xsl:param and xsl:variable
#ILLEGAL_PARAM = 230
#ILLEGAL_PARAM_PARENT = 231
ILLEGAL_SHADOWING = 232
VAR_WITH_CONTENT_AND_SELECT = 233
# xsl:message
#ILLEGAL_MESSAGE_PARENT = 240
STYLESHEET_REQUESTED_TERMINATION = 241
# xsl:processing-instruction
ILLEGAL_XML_PI = 250
NONTEXT_IN_PI = 251
# xsl:output
UNKNOWN_OUTPUT_METHOD = 260
# xsl:decimal-format
DUPLICATE_DECIMAL_FORMAT = 270
UNDEFINED_DECIMAL_FORMAT = 271
# xsl:sort
#ILLEGAL_SORT_DATA_TYPE_VALUE = 280
#ILLEGAL_SORT_CASE_ORDER_VALUE = 281
#ILLEGAL_SORT_ORDER_VALUE = 282
# xsl:number
#ILLEGAL_NUMBER_GROUPING_SIZE_VALUE = 290
#ILLEGAL_NUMBER_LEVEL_VALUE = 291
#ILLEGAL_NUMBER_LETTER_VALUE = 292
ILLEGAL_NUMBER_FORMAT_VALUE = 293
UNSUPPORTED_NUMBER_LANG_VALUE = 294
UNSUPPORTED_NUMBER_LETTER_FOR_LANG = 295
# xsl:namespace-alias
#INVALID_NAMESPACE_ALIAS = 300
# xsl:comment
NONTEXT_IN_COMMENT = 310
# xsl:fallback and forwards-compatible processing
FWD_COMPAT_WITHOUT_FALLBACK = 320
UNKNOWN_EXTENSION_ELEMENT = 321
# built-in functions and XSLT-specific extension functions
DOC_FUNC_EMPTY_NODESET = 1000
UNKNOWN_NODE_BASE_URI = 1001
#KEY_WITH_RTF_CONTEXT = 1002
#WRONG_NUMBER_OF_ARGUMENTS = 2000
WRONG_ARGUMENT_TYPE = 2001
INVALID_QNAME_ARGUMENT = 2002
# EXSLT messages use 3000-3999
UNSUPPORTED_DOCUMENT_URI_SCHEME = 3000
ABORTED_EXSLDOCUMENT_OVERWRITE = 3010
NO_EXSLTDOCUMENT_BASE_URI = 3020
ILLEGAL_DURATION_FORMAT = 3100
RESULT_NOT_IN_FUNCTION = 3200
ILLEGAL_RESULT_SIBLINGS = 3201
# built-in output methods
RESTRICTED_OUTPUT_VIOLATION = 7000
#FEATURE_NOT_SUPPORTED = 9999
@classmethod
def _load_messages(cls):
from gettext import gettext as _
return {
XsltError.INTERNAL_ERROR: _(
'There is an internal bug in 4Suite. Please make a post to '
'the 4Suite mailing list to report this error message to the '
'developers. Include platform details and info about how to '
'reproduce the error. Info about the mailing list is at '
'http://lists.fourthought.com/mailman/listinfo/4suite. '),
# xsl:stylesheet
XsltError.NO_STYLESHEET: _(
'No stylesheets to process.'),
#XsltError.STYLESHEET_MISSING_VERSION: _(
# 'Stylesheet %(uri)s, document root element must have a'
# ' version attribute. (see XSLT 1.0 sec. 2.2)'),
XsltError.LITERAL_RESULT_MISSING_VERSION: _(
"Document root element must have a 'xsl:version' attribute "
"(see XSLT 1.0 sec. 2.3)"),
XsltError.STYLESHEET_PARSE_ERROR: _(
'Error parsing stylesheet %(uri)s: %(text)s'),
XsltError.SOURCE_PARSE_ERROR: _(
'Error parsing source document %(uri)s: %(text)s'),
#XsltError.XSL_STYLESHEET_NOT_DOCELEM: _(
# 'An xsl:stylesheet or xsl:transform element must be '
# 'the document element.'),
#XsltError.TOP_LEVEL_ELEM_WITH_NULL_NS: _(''),
XsltError.XSLT_ILLEGAL_ELEMENT: _(
"Illegal element '%(element)s' in XSLT Namespace "
"(see XSLT 1.0 sec. 2.1)."),
#XsltError.STYLESHEET_ILLEGAL_ROOT: _(
# 'Illegal Document Root Element "%s" (see XSLT 1.0 sec. 2.2).'),
XsltError.CIRCULAR_VARIABLE: _(
'Circular variable reference error (see XSLT 1.0 sec. 11.4) for variable or parameter: %(name)s'),
XsltError.DUPLICATE_TOP_LEVEL_VAR: _(
'Top level variable %(variable)s has duplicate definitions '
'with the same import precedence. (see XSLT 1.0 sec. 11)'),
XsltError.DUPLICATE_NAMESPACE_ALIAS: _(
'The namespace for "%s" has duplicate namespace aliases defined with the same import precedence. (see XSLT 1.0 sec. 2.6.2)'),
# misc element validation
XsltError.MISSING_REQUIRED_ELEMENT: _(
"Element '%(element)s' missing required element '%(child)s'"),
XsltError.ILLEGAL_ELEMENT_CHILD: _(
"Element '%(element)s' not allowed here"),
XsltError.ILLEGAL_TEXT_CHILD: _(
"Illegal literal text %(data)r within element '%(element)s'"),
XsltError.UNDEFINED_PREFIX: _(
"Undefined namespace prefix '%(prefix)s' for element '%(elem)s'"),
# misc attribute validation
XsltError.MISSING_REQUIRED_ATTRIBUTE: _(
"Element '%(element)s' missing required attribute "
"'%(attribute)s'"),
XsltError.ILLEGAL_NULL_NAMESPACE_ATTR: _(
"Illegal null-namespace attribute '%(attribute)s' on "
"element '%(element)s'"),
XsltError.ILLEGAL_XSL_NAMESPACE_ATTR: _(
"Illegal xsl-namespace attribute '%(attribute)s' on "
"element '%(element)s'"),
XsltError.INVALID_ATTR_CHOICE: _(
"Illegal attribute value '%s', must be one of '%s'"),
XsltError.INVALID_CHAR_ATTR: _(
"Invalid char attribute value '%s'"),
XsltError.INVALID_NUMBER_ATTR: _(
"Invalid number attribute value '%s'"),
XsltError.INVALID_NS_URIREF_ATTR: _(
"'%s' is not a valid namespace name (see Namespaces in XML erratum NE05)"),
XsltError.INVALID_ID_ATTR: _(
"Invalid ID attribute value '%s'"),
XsltError.INVALID_QNAME_ATTR: _(
"Invalid QName attribute value '%(value)s'"),
XsltError.INVALID_NCNAME_ATTR: _(
"Invalid NCName attribute value '%s'"),
XsltError.INVALID_PREFIX_ATTR: _(
"Invalid prefix attribute value '%s'"),
XsltError.INVALID_NMTOKEN_ATTR: _(
"Invalid NMTOKEN attribute value '%s'"),
XsltError.QNAME_BUT_NOT_NCNAME: _(
"QName allowed but not NCName, '%s' found"),
XsltError.AVT_SYNTAX: _(
'Unbalanced curly braces ({}) in attribute value template. (see XSLT 1.0 sec. 7.6.2)'),
XsltError.INVALID_AVT: _(
"Malformed attribute value template file=%(baseuri)s, line=%(line)s, column=%(col)s ('%(value)s' '%(msg)s')"),
XsltError.INVALID_PATTERN: _(
'XPattern expression syntax error at line %(line)d, '
'column %(column)d: %(text)s'),
XsltError.INVALID_EXPRESSION: _(
"Malformed XPath expression '%(text)s'"),
# xsl:apply-imports
XsltError.APPLYIMPORTS_WITH_NULL_CURRENT_TEMPLATE: _(
'xsl:apply-imports used where there is no current template. '
' (see XSLT Spec)'),
# xsl:import and xsl:include
#XsltError.ILLEGAL_IMPORT: _(
# 'xsl:import is not allowed here (xsl:import must be at top '
# 'level and precede all other XSLT top-level instructions). '
# '(see XSLT 1.0 sec. 2.6.2)'),
#XsltError.IMPORT_NOT_FOUND: _(''),
XsltError.INCLUDE_NOT_FOUND: _(
"Unable to retrieve the stylesheet '%(uri)s', "
"using base URI '%(base)s'"),
XsltError.CIRCULAR_INCLUDE: _(
"Stylesheet '%(uri)s' may not be included or imported more "
"than once (see XSLT 1.0 sec. 2.6)"),
# xsl:choose, xsl:when and xsl:otherwise
XsltError.ILLEGAL_CHOOSE_CHILD: _('FIXME'),
#XsltError.CHOOSE_REQUIRES_WHEN: _(
# 'xsl:choose must have at least one xsl:when child '
# '(see XSLT 1.0 sec. 9.2)'),
#XsltError.CHOOSE_WHEN_AFTER_OTHERWISE: _(
# "'xsl:choose' cannot have 'xsl:when' child after "
# "'xsl:otherwise' child (see XSLT 1.0 sec. 9.2)"),
#XsltError.CHOOSE_MULTIPLE_OTHERWISE: _(
# "'xsl:choose only allowed one 'xsl:otherwise' child "
# "(see XSLT 1.0 sec. 9.2)'"),
#XsltError.WHEN_MISSING_TEST: _(
# "'xsl:when' requires 'test' attribute "
# "(see XSLT 1.0 sec. 9.2)'"),
# xsl:call-template
#XsltError.ILLEGAL_CALLTEMPLATE_CHILD: _(
# "'xsl:call-template' child must be 'xsl:with-param' "
# "(see XSLT 1.0 sec. 6)'"),
XsltError.NAMED_TEMPLATE_NOT_FOUND: _(
"Named template '%s' invoked but not defined."),
# xsl:template
#XsltError.ILLEGAL_TEMPLATE_PRIORITY: _(
# 'Invalid priority value for template '
# '(see XSLT 1.0 sec. 5.5)'),
XsltError.MULTIPLE_MATCH_TEMPLATES: _(
"Multiple templates matching node '%r' "
"(see XSLT 1.0 sec. 5.5).\n"
"Conflicting template locations:\n%s"),
XsltError.DUPLICATE_NAMED_TEMPLATE: _(
"Named template '%(template)s' already defined with same "
"import precedence"),
# xsl:attribute
XsltError.ATTRIBUTE_ADDED_TOO_LATE: _(
"Children were added to element before 'xsl:attribute' "
"instantiation (see XSLT 1.0 sec. 7.1.3)"),
#XsltError.ATTRIBUTE_MISSING_NAME: _(
# "'xsl:attribute' missing required 'name' attribute "
# "(see XSLT 1.0 sec. 7.1.3)"),
XsltError.ATTRIBUTE_ADDED_TO_NON_ELEMENT: _(
"'xsl:attribute' attempted to add attribute to non-element "
"(see XSLT 1.0 sec. 7.1.3)"),
XsltError.NONTEXT_IN_ATTRIBUTE: _(
"Nodes other than text nodes created during 'xsl:attribute' "
"instantiation (see XSLT 1.0 sec. 7.1.3)"),
XsltError.BAD_ATTRIBUTE_NAME: _(
"An attribute cannot be created with name '%s' "
"(see XSLT 1.0 sec. 7.1.3)"),
# xsl:element
XsltError.UNDEFINED_ATTRIBUTE_SET: _(
"Undefined attribute set '%(name)s'"),
# xsl:for-each
XsltError.INVALID_FOREACH_SELECT: _(
"'select' expression must evaluate to a node-set."),
# xsl:value-of
#XsltError.VALUEOF_MISSING_SELECT: _('Empty "value-of" requires "select" attribute (see XSLT 1.0 sec. 7.6.1)'),
# xsl:copy-of
#XsltError.COPYOF_MISSING_SELECT: _('Empty "copy-of" requires "select" attribute (see XSLT 1.0 sec. 11.3)'),
# xsl:apply-templates
#XsltError.ILLEGAL_APPLYTEMPLATE_CHILD: _('apply-templates child must be with-param or sort. (see XSLT Spec 5.4)'),
#XsltError.ILLEGAL_APPLYTEMPLATE_MODE: _('apply-templates mode must be a QName. (see XSLT Spec 5.4)'),
XsltError.INVALID_APPLY_TEMPLATES_SELECT: _(
"'select' expression must evaluate to a node-set."),
# xsl:attribute-set
#XsltError.ILLEGAL_ATTRIBUTESET_CHILD: _('xsl:attribute-set child must be "attribute" (see XSLT 1.0 sec. 7.1.4)'),
#XsltError.ATTRIBUTESET_REQUIRES_NAME: _('xsl:attribute-set requires "name" attribute (see XSLT 1.0 sec. 7.1.4)'),
XsltError.CIRCULAR_ATTRIBUTE_SET: _("Circular attribute-set error for '%s'. (see XSLT 1.0 sec. 7.1.4)"),
# xsl:param and xsl:variable
#XsltError.ILLEGAL_PARAM: _('xsl:param elements must be the first children of xsl:template (see XSLT 1.0 sec. 11).'),
#XsltError.ILLEGAL_PARAM_PARENT: _('Uri: %s line %s col: %s\n xsl:param can only appear at top level or as the child of an xsl:template (see XSLT 1.0 sec. 11).'),
XsltError.ILLEGAL_SHADOWING: _('Illegal shadowing of %(variable)s. An xsl:param or xsl:variable may not shadow another variable not at top level (see XSLT 1.0 sec. 11).'),
XsltError.VAR_WITH_CONTENT_AND_SELECT: _('Illegal binding of of %(name)s. An xsl:param or xsl:variable may not have both a select attribute and non-empty content. (see XSLT 1.0 sec. 11.2).'),
# xsl:message
#XsltError.ILLEGAL_MESSAGE_PARENT: _('xsl:message cannot be a top-level element. (see XSLT 1.0 sec. 2.2)'),
XsltError.STYLESHEET_REQUESTED_TERMINATION: _('A message instruction in the Stylesheet requested termination of processing:\n%(msg)s'),
# xsl:processing-instruction
XsltError.ILLEGAL_XML_PI: _('A processing instruction cannot be used to output an XML or text declaration. (see XSLT 1.0 sec. 7.3)'),
XsltError.NONTEXT_IN_PI: _('Nodes other than text nodes created during xsl:processing-instruction instantiation. (see XSLT 1.0 sec. 7.4)'),
# xsl:output
XsltError.UNKNOWN_OUTPUT_METHOD: _('Unknown output method (%s)'),
# xsl:decimal-format
XsltError.DUPLICATE_DECIMAL_FORMAT: _('Duplicate declaration of decimal-format %s. (XSLT Spec: 12.3)'),
XsltError.UNDEFINED_DECIMAL_FORMAT: _('Undefined decimal-format (%s)'),
# xsl:sort
#XsltError.ILLEGAL_SORT_CASE_ORDER_VALUE: _('The case-order attribute of sort must be either "upper-first" or "lower-first" (see XSLT 1.0 sec. 10)'),
#XsltError.ILLEGAL_SORT_DATA_TYPE_VALUE: _('The data-type attribute of sort must be either "text" or "number" (see XSLT 1.0 sec. 10).'),
#XsltError.ILLEGAL_SORT_ORDER_VALUE: _('The order attribute of sort must be either "ascending" or "descending". (see XSLT 1.0 sec. 10)'),
# xsl:number
#XsltError.ILLEGAL_NUMBER_GROUPING_SIZE_VALUE: _('The "grouping-size" attribute of number must be an integer. (see XSLT 1.0 sec. 7.7.1)'),
#XsltError.ILLEGAL_NUMBER_LEVEL_VALUE: _('The "level" attribute of number must be "single", "multiple" or "any". (see XSLT 1.0 sec. 7.7)'),
#XsltError.ILLEGAL_NUMBER_LETTER_VALUE: _('The "letter-value" attribute of number must be "alphabetic" or "traditional". (see XSLT 1.0 sec. 7.7.1)'),
XsltError.ILLEGAL_NUMBER_FORMAT_VALUE: _('Value "%s" for "format" attribute of xsl:number is invalid. (see XSLT 1.0 sec. 7.7)'),
XsltError.UNSUPPORTED_NUMBER_LANG_VALUE: _('Language "%s" for alphabetic numbering in xsl:number is unsupported.'),
XsltError.UNSUPPORTED_NUMBER_LETTER_FOR_LANG: _('Value "%s" for "letter-value" attribute of xsl:number is not supported with the language "%s".'),
# xsl:namespace-alias
#XsltError.INVALID_NAMESPACE_ALIAS: _('Invalid arguments to the namespace-alias instruction. (see XSLT 1.0 sec. 7.1.1)'),
# xsl:comment
XsltError.NONTEXT_IN_COMMENT: _('Nodes other than text nodes created during xsl:comment instantiation. (see XSLT 1.0 sec. 7.4)'),
# xsl:fallback and forwards-compatible processing
XsltError.FWD_COMPAT_WITHOUT_FALLBACK: _(
"No 'xsl:fallback' instruction found for element '%(element)s' "
"processed in forward-compatible mode."),
XsltError.UNKNOWN_EXTENSION_ELEMENT: _(
"'No implementation for extension element '%(element)s'"),
# built-in functions and XSLT-specific extension functions
XsltError.DOC_FUNC_EMPTY_NODESET: _('Second argument to document(), if given, must be a non-empty node-set. (see XSLT 1.0 sec. 12.1 erratum E14)'),
XsltError.UNKNOWN_NODE_BASE_URI: _('Could not determine base URI of node: %s'),
#XsltError.KEY_WITH_RTF_CONTEXT: _('key() must not be invoked when the context node comes from the result tree (probably due to an earlier invokation of node-set()).'),
#XsltError.WRONG_NUMBER_OF_ARGUMENTS: _('A built-in or extension function was called with the wrong number of arguments.'),
XsltError.WRONG_ARGUMENT_TYPE: _('A built-in or extension function was called with the wrong type of argument(s).'),
XsltError.INVALID_QNAME_ARGUMENT: _('A built-in or extension function requiring a QName argument was called with this non-QName value: "%(value)s".'),
# EXSLT messages use 3000-3999
XsltError.UNSUPPORTED_DOCUMENT_URI_SCHEME: _(
"Amara's implementation of `exsl:document` only supports an "
"href value having the 'file' URI scheme, which may be "
"implicit. Scheme '%(scheme)s' was found."),
XsltError.ABORTED_EXSLDOCUMENT_OVERWRITE: _(
"An attempt was made to write to '%(filename)s', which "
"already exists. The attempt to save the contents of this "
"file to '%(backup)s' also failed, and so the instruction has "
"been aborted. If you are sure it is OK to overwrite this "
"file, please indicate this by adding the `f:overwrite-okay` "
"attribute to the `exsl:document` instruction."),
XsltError.NO_EXSLTDOCUMENT_BASE_URI: _(
"An `exsl:document` element referred to a relative reference "
"'%(uriref)s', but there is no explicit output document to "
"provide a base URI in order to resolve this relative "
"reference."),
XsltError.ILLEGAL_DURATION_FORMAT: _(
"Duration string '%(duration)s' not in `xs:duration` format."),
XsltError.RESULT_NOT_IN_FUNCTION: _(
"An EXSLT `func:result` element must occur within a "
"`func:function` element."),
XsltError.ILLEGAL_RESULT_SIBLINGS: _(
"An EXSLT `func:result` element must not have following "
"sibling elements besides `xsl:fallback.`"),
# built-in output methods
XsltError.RESTRICTED_OUTPUT_VIOLATION: _('The requested output of element "%s" is forbidden according to output restrictions'),
#XsltError.FEATURE_NOT_SUPPORTED: _('4XSLT does not yet support this feature.'),
}
class XsltStaticError(XsltError, TypeError):
def __init__(self, code, xslt_element, **kwords):
XsltError.__init__(self, code, **kwords)
# Just save the information needed from `xslt_element`
self.uri = xslt_element.baseUri
self.lineno = xslt_element.lineNumber
self.offset = xslt_element.columnNumber
self.tagname = xslt_element.nodeName
def __str__(self):
from gettext import gettext as _
return _("Stylesheet '%s', line %s, column %s, in '%s': %s") % (
self.uri, self.lineno, self.offset, self.tagname, self.message)
class XsltRuntimeError(XsltError, RuntimeError):
@classmethod
def update_error(cls, error, xslt_element):
error.__class__ = cls
error.uri = xslt_element.baseUri
error.lineno = xslt_element.lineNumber
error.offset = xslt_element.columnNumber
error.tagname = xslt_element.nodeName
def __init__(self, code, xslt_element, **kwords):
XsltError.__init__(self, code, **kwords)
# Just save the information needed from `xslt_element`
self.uri = xslt_element.baseUri
self.lineno = xslt_element.lineNumber
self.offset = xslt_element.columnNumber
self.tagname = xslt_element.nodeName
def __str__(self):
from gettext import gettext as _
return _("Stylesheet '%s', line %s, column %s, in '%s': %s") % (
self.uri, self.lineno, self.offset, self.tagname, self.message)
def transform(source, transforms, params=None, output=None):
"""
Convenience function for applying an XSLT transform. Returns
a result object.
source - XML source document in the form of a string (not Unicode
object), file-like object (stream), file path, URI or
amara.lib.inputsource instance. If string or stream
it must be self-contained XML (i.e. not requiring access to
any other resource such as external entities or includes)
transforms - XSLT document (or list thereof) in the form of a string, stream, URL,
file path or amara.lib.inputsource instance
params - optional dictionary of stylesheet parameters, the keys of
which may be given as unicode objects if they have no namespace,
or as (uri, localname) tuples if they do.
output - optional file-like object to which output is written (incrementally, as processed)
"""
#do the imports within the function: a tad bit less efficient, but
#avoid circular crap
from amara.lib import inputsource
from amara.xpath.util import parameterize
from amara.xslt.result import streamresult, stringresult
from amara.xslt.processor import processor
params = parameterize(params) if params else {}
proc = processor()
if isinstance(transforms, (list, tuple)):
for transform in transforms:
proc.append_transform(inputsource(transform))
else:
proc.append_transform(inputsource(transforms))
if output is not None:
result = streamresult(output)
else:
result = stringresult()
return proc.run(inputsource(source), params, result)
def launch(*args, **kwargs):
source = args[0]
transforms = args[1:]
out = sys.stdout
#print >> out,
params = dict(( k.split(u'=') for k in kwargs.get('define', []) ))
#print params
transform(source, transforms, params=params, output=out)
return
help_message = '''
Amara 2.x. Command line support for XSLT processing.
'''
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hD:v", ["help", "define="])
except getopt.error, msg:
raise Usage(msg)
# option processing
kwargs = {}
for option, value in opts:
if option == "-v":
verbose = True
if option in ("-h", "--help"):
raise Usage(help_message)
if option in ("-D", "--define"):
kwargs.setdefault('define', []).append(value)
except Usage, err:
print >> sys.stderr, sys.argv[0].split("/")[-1] + ": " + str(err.msg)
print >> sys.stderr, "\t for help use --help"
return 2
launch(*args, **kwargs)
if __name__ == "__main__":
sys.exit(main())
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/__init__.py
|
__init__.py
|
from amara import tree
from amara.namespaces import EXTENSION_NAMESPACE
from amara.xslt.tree import xslt_element, content_model, attribute_types
__all__ = ('extension_namespaces', 'extension_functions', 'extension_elements')
RESERVED_NAMESPACE = u'http://xmlns.4suite.org/reserved'
class dump_keys_element(xslt_element):
"""
f:dump-keys reports the XSLT keys that have been defined, and the
nodes they identify, for the document that owns the context node.
Keys will only be reported if key() has been evaluated prior to
the instantiation of this element. The key() evaluation must have
been performed with a context node that is from the same document
as the context node for this element.
This extension element is useful for debugging.
By default, the key data is exposed as nodes with this structure:
<zz:KeyDump xmlns:zz="%s">
<zz:Key name="keyName">
<zz:MatchSet value="keyUseValue">
(representation of nodes matched by the key)
</zz:MatchSet>
...
</zz:Key>
...
</zz:KeyDump>
zz:Key elements will be in random order.
zz:MatchSet elements will be in document order.
The node representation will be a copy of each of the nodes,
except for attributes. Attribute nodes matched by the key will
manifest as comment nodes with the content "Attribute: name=value".
If raw="yes", the keys will be emitted as a stylesheet message
(as if via xsl:message) and the format will be their Python repr()
representation.
If force-update="yes" all keys will be computed on all documents
that have been loaded into the processor.
4Suite evaluates keys lazily, which means that you could have
situations where f:dump-keys returns unexpected empty results
because the key of interest has not yet been invoked.
""" % RESERVED_NAMESPACE
content_model = content_model.empty
attribute_types = {
'raw': attribute_types.yesno_avt(
default='no',
description="Present keys in a compact non-XML format"),
'force-update': attribute_types.yesno_avt(
default='no',
description="Force evaluation of all keys on all loaded documents"),
}
def instantiate(self, context):
if self._force_update.evaluate(context):
context.update_keys()
doc = context.node.xml_root
#param = (EXTENSION_NAMESPACE, 'indices')
#if param in processor.extension_parameters:
# for k, v in processor.extension_parameters[param].items():
# # Dummy to xsl:key storage format
# xkeys[(None, k)] = v
if self._raw.evaluate(context):
lines = []
for key_name in sorted(context.keys):
lines.append('Key: %r' % key_name)
string_values = context.keys[key_name][doc]
for string_value in sorted(string_values, key=unicode):
nodes = string_values[string_value]
lines.append(' %r=%r' % (string_value, nodes))
context.message('\n'.join(lines))
else:
context.start_element(u'zz:KeyDump', RESERVED_NAMESPACE)
for key_name in sorted(context.keys):
context.start_element(u'zz:Key', RESERVED_NAMESPACE)
context.attribute(u'name', key_name[1])
string_values = context.keys[key_name][doc]
for string_value in sorted(string_values, key=unicode):
context.start_element(u'zz:MatchSet', RESERVED_NAMESPACE)
context.attribute(u'value', string_value)
for node in string_values[string_value]:
if isinstance(node, tree.attribute):
data = u"Attribute: %s=%s" % (node.xml_qname,
node.xml_value)
context.comment(data)
else:
context.copy_node(node)
context.end_element(u'zz:MatchSet', RESERVED_NAMESPACE)
context.end_element(u'zz:Key', RESERVED_NAMESPACE)
context.end_element(u'zz:KeyDump', RESERVED_NAMESPACE)
return
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXTENSION_NAMESPACE: 'f',
}
extension_functions = {
}
extension_elements = {
(EXTENSION_NAMESPACE, 'dump-keys'): dump_keys_element,
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/extensions/elements.py
|
elements.py
|
from amara import tree
from amara.namespaces import EXTENSION_NAMESPACE
from amara.xslt.tree import xslt_element, content_model, attribute_types
__all__ = ('extension_namespaces', 'extension_functions', 'extension_elements')
RESERVED_NAMESPACE = u'http://xmlns.4suite.org/reserved'
class dump_keys_element(xslt_element):
"""
f:dump-keys reports the XSLT keys that have been defined, and the
nodes they identify, for the document that owns the context node.
Keys will only be reported if key() has been evaluated prior to
the instantiation of this element. The key() evaluation must have
been performed with a context node that is from the same document
as the context node for this element.
This extension element is useful for debugging.
By default, the key data is exposed as nodes with this structure:
<zz:KeyDump xmlns:zz="%s">
<zz:Key name="keyName">
<zz:MatchSet value="keyUseValue">
(representation of nodes matched by the key)
</zz:MatchSet>
...
</zz:Key>
...
</zz:KeyDump>
zz:Key elements will be in random order.
zz:MatchSet elements will be in document order.
The node representation will be a copy of each of the nodes,
except for attributes. Attribute nodes matched by the key will
manifest as comment nodes with the content "Attribute: name=value".
If raw="yes", the keys will be emitted as a stylesheet message
(as if via xsl:message) and the format will be their Python repr()
representation.
If force-update="yes" all keys will be computed on all documents
that have been loaded into the processor.
4Suite evaluates keys lazily, which means that you could have
situations where f:dump-keys returns unexpected empty results
because the key of interest has not yet been invoked.
""" % RESERVED_NAMESPACE
attribute_types = {
'raw': attribute_types.yesno_avt(
default='no',
description="Present keys in a compact non-XML format"),
'force-update': attribute_types.yesno_avt(
default='no',
description="Force evaluation of all keys on all loaded documents"),
}
def instantiate(self, context):
processor = context.processor
if self._force_update.evaluate(context):
context.transform.updateAllKeys(context, processor)
doc = context.node.xml_root
try:
xkeys = processor.keys[doc]
except KeyError:
xkeys = {}
param = (EXTENSION_NAMESPACE, 'indices')
if param in processor.extensionParams:
for k, v in processor.extensionParams[param].items():
#Dummy to xsl:key storage format
xkeys[(None, k)] = v
if self._raw.evaluate(context):
processor.xslMessage(repr(xkeys))
else:
context.start_element(u'zz:KeyDump', RESERVED_NAMESPACE)
for k, v in xkeys.iteritems():
context.start_element(u'zz:Key', RESERVED_NAMESPACE)
context.attribute(u'name', k[1], EMPTY_NAMESPACE)
for kk, vv in v.iteritems():
context.start_element(u'zz:MatchSet', RESERVED_NAMESPACE)
context.attribute(u'value', kk, EMPTY_NAMESPACE)
for node in vv:
if isinstance(node, tree.attribute):
data = u"Attribute: %s=%s" % (node.xml_qname,
node.xml_value)
context.comment(data)
else:
context.copy_node(node)
context.end_element(u'zz:MatchSet', RESERVED_NAMESPACE)
context.end_element(u'zz:Key', RESERVED_NAMESPACE)
context.end_element(u'zz:KeyDump', RESERVED_NAMESPACE)
return
## XSLT Extension Module Interface ####################################
extension_namespaces = {
EXTENSION_NAMESPACE: 'f',
}
extension_functions = {
}
extension_elements = {
(EXTENSION_NAMESPACE, 'dump-keys'): dump_keys_element,
}
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/extensions/functions.py
|
functions.py
|
from amara import tree
from amara.xpath.locationpaths import nodetests
from amara.xslt import XsltError
child_axis = tree.element
attribute_axis = tree.attribute
class patterns(tuple):
def match(self, context, node):
for pattern in self:
if pattern.match(context, node):
return True
return False
def pprint(self, indent=''):
print indent + repr(self)
for pattern in patterns:
pattern.pprint(indent + ' ')
return
def __str__(self):
return ' | '.join(map(str, self))
def __repr__(self):
return '<Patterns at %x: %s>' % (id(self), str(self))
class pattern(nodetests.node_test):
def __init__(self, steps):
# The steps are already in reverse order
self.steps = steps
self.priority = 0.5
axis_type, node_test, ancestor = steps[0]
if axis_type == attribute_axis:
node_type = axis_type
else:
node_type = node_test.node_type
self.node_type = node_type
if len(steps) > 1:
node_test, axis_type = self, None
self.name_key = node_test.name_key
self.node_test = node_test
self.axis_type = axis_type
return
def match(self, context, node, principal_type=None):
(axis_type, node_test, ancestor) = self.steps[0]
if not node_test.match(context, node, axis_type):
return 0
for (axis_type, node_test, ancestor) in self.steps[1:]:
# Move up the tree
node = node.xml_parent
if ancestor:
while node:
if node_test.match(context, node, axis_type):
break
node = node.xml_parent
else:
# We made it to the document without a match
return 0
elif node is None: return 0
elif not node_test.match(context, node, axis_type):
return 0
return 1
def pprint(self, indent=''):
print indent + repr(self)
def __str__(self):
result = ''
for (axis, test, ancestor) in self.steps:
if axis == attribute_axis:
step = '@' + str(test)
else:
step = str(test)
result = step + (ancestor and '//' or '/') + result
# remove trailing slash
return result[:-1]
class predicated_test(nodetests.node_test):
priority = 0.5
def __init__(self, node_test, predicates):
self._node_test = node_test
self._predicates = predicates
self.name_key = node_test.name_key
self.node_type = node_test.node_type
return
def match(self, context, node, principal_type):
parent = node.xml_parent
if principal_type == tree.attribute:
nodes = parent.xml_attributes.nodes()
elif not parent:
# Must be a document
return False
else:
# Amara nodes are iterable (over their children)
nodes = parent
# Pass through the NodeTest (genexp)
nodes = ( node for node in nodes
if self._node_test.match(context, node, principal_type) )
# Child and attribute axes are forward only
nodes = self._predicates.filter(nodes, context, reverse=False)
return node in nodes
def __str__(self):
return str(self._node_test) + str(self._predicates)
class document_test(nodetests.node_test):
priority = 0.5
node_type = tree.entity
def match(self, context, node, principal_type):
return isinstance(node, self.node_type)
def __str__(self):
return '/'
class id_key_test(nodetests.node_test):
priority = 0.5
node_type = tree.node
def __init__(self, function):
self._function = function
def match(self, context, node, principal_type):
return node in self._function.evaluate(context)
def __str__(self):
return str(self._function)
import _parser as _xpatternparser
class parser(_xpatternparser.parser):
_parse = _xpatternparser.parser.parse
def parse(self, expr):
"""Parses the string `expr` into an AST"""
try:
return self._parse(expr)
except _xpatternparser.error, error:
raise XsltError(XsltError.INVALID_PATTERN, line=error.lineno,
column=error.offset, text=error.msg)
_parser = parser()
_parse = _parser._parse
parse = _parser.parse
if __name__ == '__main__':
import sys
sys.exit(_xpatternparser.console().cmdloop())
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/xpatterns/__init__.py
|
__init__.py
|
from cStringIO import StringIO
import amara
from amara.lib import iri
from amara.lib.xmlstring import isqname
from amara.namespaces import XSL_NAMESPACE, EXTENSION_NAMESPACE
from amara.xpath import datatypes
from amara.xpath.functions import builtin_function
import _functions
__all__ = ('document_function', 'key_function', 'format_number_function',
'current_function', 'unparsed_entity_uri_function',
'generate_id_function', 'system_property_function',
'element_available_function', 'function_available_function',
)
class document_function(builtin_function):
"""Function: <nodeset> document(object, nodeset?)"""
name = 'document'
arguments = (datatypes.xpathobject, datatypes.nodeset)
defaults = (None,)
return_type = datatypes.nodeset
def evaluate_as_nodeset(self, context):
arg0, arg1 = self._args
if arg1 is None:
base_uri = context.instruction.baseUri
else:
for node in arg1.evaluate_as_nodeset(context):
base_uri = node.xml_base
break
else:
raise XsltRuntimeError(XsltError.DOC_FUNC_EMPTY_NODESET,
context.instruction)
arg0 = arg0.evaluate(context)
if isinstance(arg0, datatypes.nodeset):
uris = set()
for node in arg0:
uri = datatypes.string(node)
if arg1 is None:
base_uri = node.xml_base
assert base_uri or iri.is_absolute(uri)
uris.add(iri.absolutize(uri, base_uri))
else:
uri = datatypes.string(arg0)
assert base_uri or iri.is_absolute(uri)
uris = [iri.absolutize(uri, base_uri)]
documents = context.documents
sources = context.transform.root.sources
result = []
for uri in uris:
if uri in documents:
doc = documents[uri]
else:
if uri in sources:
doc = amara.parse(StringIO(sources[uri]), uri)
else:
doc = amara.parse(uri)
documents[uri] = doc
result.append(doc)
return datatypes.nodeset(result)
evaluate = evaluate_as_nodeset
class key_function(builtin_function):
"""Function: <nodeset> key(string, object)"""
name = 'key'
arguments = (datatypes.string, datatypes.xpathobject)
return_type = datatypes.nodeset
def evaluate_as_nodeset(self, context):
arg0, arg1 = self._args
# Get the key table
key_name = arg0.evaluate_as_string(context)
if not isqname(key_name):
raise XsltRuntimeError(XsltError.INVALID_QNAME_ARGUMENT,
context.instruction, value=key_name)
key_name = context.expand_qname(key_name)
try:
key_documents = context.keys[key_name]
except KeyError:
# Unknown key name
return datatypes.nodeset()
else:
key_values = key_documents[context.node.xml_root]
# Get the lookup value
value = arg1.evaluate(context)
if isinstance(value, datatypes.nodeset):
result = []
for value in value:
value = datatypes.string(value)
if value in key_values:
result.extend(key_values[value])
else:
value = datatypes.string(value)
if value in key_values:
result = key_values[value]
else:
result = ()
return datatypes.nodeset(result)
evaluate = evaluate_as_nodeset
class format_number_function(builtin_function):
"""Function: <string> format-number(number, string, string?)"""
name = 'format-number'
arguments = (datatypes.number, datatypes.string, datatypes.string)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, arg1, arg2 = self._args
number = arg0.evaluate_as_number(context)
pattern = arg1.evaluate_as_string(context)
if arg2:
qname = arg2.evaluate_as_string(context)
name = context.expand_qname(qname)
try:
format = context.transform.decimal_formats[name]
except KeyError:
raise XsltRuntimeError(XsltError.UNDEFINED_DECIMAL_FORMAT,
self, name=qname)
else:
format = None
result = _functions.decimal_format(number, pattern, format)
return datatypes.string(result)
class current_function(builtin_function):
"""Function: <nodeset> current()"""
name = 'current'
arguments = ()
return_type = datatypes.nodeset
def evaluate_as_nodeset(self, context):
return datatypes.nodeset((context.current_node,))
evaluate = evaluate_as_nodeset
class unparsed_entity_uri_function(builtin_function):
"""Function: <string> unparsed-entity-uri(string)"""
name = 'unparsed-entity-uri'
arguments = (datatypes.string,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
name = arg0.evaluate_as_string(context)
try:
uri = context.node.xml_root.xml_unparsed_entities[name]
except KeyError:
return datatypes.EMPTY_STRING
return datatypes.string(uri)
class generate_id_function(builtin_function):
"""Function: <string> generate-id(nodeset?)"""
name = 'generate-id'
arguments = (datatypes.nodeset,)
defaults = (None,)
return_type = datatypes.string
def evaluate_as_string(self, context):
arg0, = self._args
if arg0 is None:
return datatypes.string(context.node.xml_nodeid)
for node in arg0.evaluate_as_nodeset(context):
return datatypes.string(node.xml_nodeid)
return datatypes.EMPTY_STRING
evaluate = evaluate_as_string
class system_property_function(builtin_function):
"""Function: <object> system-property(string)"""
name = 'system-property'
arguments = (datatypes.string,)
return_type = datatypes.xpathobject
def evaluate(self, context):
arg0, = self._args
arg0 = arg0.evaluate_as_string(context)
namespace, property = context.expand_qname(arg0)
if namespace == XSL_NAMESPACE:
if property == 'version':
return datatypes.number(1)
elif property == 'vender':
return datatypes.string('Amara')
elif property == 'vender-url':
return datatypes.string('http://hg.4suite.org/amara')
elif namespace == EXTENSION_NAMESPACE:
if property == 'version':
return datatypes.string(__version__)
elif property == 'platform':
return datatypes.string(sys.platform)
elif property == 'tempdir':
raise
elif namespace == 'http://xmlns.4suite.org/xslt/env-system-property':
raise
return datatypes.EMPTY_STRING
class element_available_function(builtin_function):
"""Function: <boolean> element-available(string)"""
name = 'element-available'
arguments = (datatypes.string,)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
arg0, = self._args
qname = arg0.evaluate_as_string(context)
name = namespace, local = context.expand_qname(qname)
if namespace is None:
return datatypes.FALSE
if namespace == XSL_NAMESPACE:
from amara.xslt.reader import ELEMENT_CLASSES
available = local in ELEMENT_CLASSES
else:
available = name in context.transform.extension_elements
return datatypes.TRUE if available else datatypes.FALSE
evaluate = evaluate_as_boolean
class function_available_function(builtin_function):
"""Function: <boolean> function-available(string)"""
name = 'function-available'
arguments = (datatypes.string,)
return_type = datatypes.boolean
def evaluate_as_boolean(self, context):
arg0, = self._args
qname = arg0.evaluate_as_string(context)
name = context.expand_qname(qname)
return datatypes.TRUE if name in context.functions else datatypes.FALSE
evaluate = evaluate_as_boolean
|
Amara
|
/Amara-2.0.0a6.tar.bz2/Amara-2.0.0a6/lib/xslt/functions/__init__.py
|
__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.