repo
string | commit
string | message
string | diff
string |
---|---|---|---|
thijs/cl-sphinx-search
|
566117e0d0b73d0dd87ad627639150628a855af6
|
Rename method query -> run-query
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 0489cd0..65e7198 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,1220 +1,1220 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(geo-anchor
:accessor geo-anchor
:initarg :geo-anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute value overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
- @fun{query}, or add a number of queries using @fun{add-query} and
+ @fun{run-query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{set-group-by}
@see{set-group-distinct}
@see{set-select}
@see{reset-filters}
@see{reset-group-by}
@see{reset-overrides)}
@see{last-warning}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter ((client sphinx-client) attr values &key (exclude ()))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
(push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude ()))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-geo-anchor (client latitude-attribute latitude longitude-attribute longitude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[latitude-attribute]{the latitude attribute name}
@arg[latitude]{latitude in radians}
@arg[longitude-attribute]{the longitude attribute name}
@arg[longitude]{longitude in radians}
@return{client}
@short{Setup anchor point for geolocation.}
@begin{pre}
(set-geo-anchor client \"latitude_attr\" 45.231 \"longitude_attribute\" 4.5)
@end{pre}
Setup anchor point for using geosphere distance calculations in
filters and sorting. Distance will be computed with respect to
this point, and will be included in result output.
To actually use this to filter on results a certain distance from
the anchor point, use something like:
@begin{pre}
(set-filter-float-range sph \"@@geodist\" 0 5000)
@end{pre}
This will filter the results to be closer than 5 km from the anchor
point.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-geo-anchor ((client sphinx-client) lat-attr lat lon-attr lon)
(assert (and (stringp lat-attr) (stringp lon-attr) (numberp lat) (numberp lon)))
(setf (geo-anchor client) (list lat-attr lat lon-attr lon))
client)
(defgeneric set-group-by (client attribute function &optional group-sort)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute name to group by}
@arg[function]{the grouping function to use}
@arg[group-sort]{the sorting clause for group-by}
@return{client}
@short{Set grouping options.}
@see{set-group-by}
@see{set-group-distinct}
@begin{pre}
(set-group-by client \"whatever_attr\" +sph-groupby-attr+ \"group asc\")
(set-group-by client \"date_attr\" +sph-groupby-day+)
@end{pre}
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.
@code{attribute} is any valid attribute. Use @fun{reset-group-by}
to disable grouping.
@code{function} is one of:
@begin{dl}
@dt[+sph-groupby-day+]{Group by day (assumes timestamp type attribute
of form YYYYMMDD)}
@dt[+sph-groupby-week+]{Group by week (assumes timestamp type attribute
of form YYYYNNN)}
@dt[+sph-groupby-month+]{Group by month (assumes timestamp type
attribute of form YYYYMM)}
@dt[+sph-groupby-year+]{Group by year (assumes timestamp type attribute
of form YYYY)}
@dt[+sph-groupby-attr+]{Group by attribute value}
@dt[+sph-groupby-attrpair+]{Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the
last of the indexed attributes}
@end{dl}
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
Sphinx attributes:
@begin{dl}
@dt[@id]{document ID}
@dt[@weight, @rank, @relevance]{match weight}
@dt[@group]{group by function value}
@dt[@count]{number of matches in group}
@end{dl}
The default mode is to sort by group-by value in descending order,
ie. by \"@@group desc\".
In the results set, @code{total-found} contains the total amount of
matching groups over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in @code{total-found} than actually present. @code{count} might
also be underestimated.
For example, if sorting by relevance and grouping by a \"published\"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
"))
(defmethod set-group-by ((client sphinx-client) attr func &optional sort)
(assert (and (stringp attr) (stringp sort) (find func +sph-sort-functions+)))
(setf (group-by client) attr)
(setf (group-function client) func)
(setf (group-sort client) sort)
client)
(defgeneric set-group-distinct (client attribute)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to use for count-distinct queries}
@return{client}
@short{Set count-distinct attribute for group-by queries.}
@see{set-group-by}
@see{set-group-distinct}
@see{reset-group-by}
"))
(defmethod set-group-distinct ((client sphinx-client) attribute)
(assert (stringp attribute))
(setf (group-distinct client) attribute)
client)
(defgeneric set-override (client attribute type values)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to override}
@arg[type]{the attribute type as defined in Sphinx config}
@arg[values]{an alist mapping document IDs to attribute values}
@return{client}
@short{Set attribute values overrides.}
There can be only one override per attribute.
@code{values} must be an alist that maps document IDs to attribute
values.
@begin{pre}
(set-override client \"test_attr\" +sph-attr-integer+ '((4314 . 3) (2443 . 2)))
@end{pre}
In the example above, for the document with ID 4314, Sphinx will see an
attribute value for the @code{attribute} called 'test_attr' of 3. And
for the document with ID 2443 it will see 2, while the rest will be what
it was when the indexer was last run.
"))
(defmethod set-override ((client sphinx-client) attribute type values)
(assert (and (stringp attribute) (find type +sph-attr-types+) (listp values)))
(push (cons attribute values) (overrides client))
client)
(defgeneric set-select (client select)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[select]{the select string}
@return{client}
@short{Set the select clause.}
Sets the select clause, listing specific attributes to fetch, and
expressions to compute and fetch. Clause syntax mimics SQL.
The select clause is very similar to the part of a typical SQL query
between @code{SELECT} and @code{FROM}. It lets you choose what
attributes (columns) to fetch, and also what expressions over the
columns to compute and fetch. A difference from SQL is that expressions
must always be aliased to a correct identifier (consisting of letters
and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
computation results can be returned under a 'normal' name in the result
set, used in other clauses, etc.
Everything else is basically identical to SQL. Star ('*') is supported.
Functions are supported. Arbitrary amount of expressions is supported.
Computed expressions can be used for sorting, filtering, and grouping,
just as the regular attributes.
Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
using GROUP BY.
Examples:
@begin{pre}
(set-select sph \"*, (user_karma+ln(pageviews))*0.1 AS myweight\" )
(set-select sph \"exp_years, salary_gbp*{$gbp_usd_rate@} AS salary_usd, IF(age>40,1,0) AS over40\" )
(set-select sph \"*, AVG(price) AS avgprice\" )
@end{pre}
"))
(defmethod set-select ((client sphinx-client) select)
(assert (stringp select))
(setf (select client) select)
client)
(defgeneric reset-filters (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Reset the filters.}
Clear all filters, including the geolocation anchor point.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod reset-filters ((client sphinx-client))
(setf (filters client) ())
(setf (geo-anchor client) ())
client)
(defgeneric reset-group-by (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Clear all the group-by settings.}
@see{set-group-by}
@see{set-group-distinct}
@see{reset-group-by}
"))
(defmethod reset-group-by ((client sphinx-client))
(setf (group-by client) "")
(setf (group-function client) +sph-groupby-day+)
(setf (group-sort client) "@group desc")
(setf (group-distinct client) "")
client)
(defgeneric reset-overrides (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Clear all attribute value overrides.}
"))
(defmethod reset-overrides ((client sphinx-client))
(setf (overrides client) ())
client)
-(defgeneric query (client query &key index comment)
+(defgeneric run-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
- (query client \"test\")
+ (run-query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
-(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
+(defmethod run-query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
- @see{query}
+ @see{run-query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
- @see{query}
+ @see{run-query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((geo-anchor client)
(concatenate 'string
(pack "N/a*" (first (geo-anchor client)))
(pack "N/a*" (third (geo-anchor client)))
(%pack-float (second (geo-anchor client)))
(%pack-float (fourth (geo-anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
#+SPHINX-SEARCH-DEBUG (format t "socket is: ~a~%" (%socket client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(setf (%socket client) ())
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(with-output-to-string (packed-filters)
(dolist (filter filters)
(let ((type (first filter))
(attr (second filter))
(last-el 3))
(format packed-filters "~a~a~a~a"
(pack "N/a*" attr)
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-list-signed-quads (third filter)))
((eql type +sph-filter-range+)
(concatenate 'string
(pack "q>" (third filter))
(pack "q>" (fourth filter)))
(incf last-el))
((eql type +sph-filter-floatrange+)
(concatenate 'string
(%pack-float (third filter))
(%pack-float (fourth filter)))
(incf last-el))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (nth last-el filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(with-output-to-string (packed-list)
(format packed-list "~a" (pack "N" (length values-list)))
(dolist (value values-list)
(format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index a639340..891bdcf 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,148 +1,148 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
- Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
+ Running queries:<break/> <aboutfun>run-query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<break/> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><break/>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/> This documentation was generated by <a a="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ the documentation generation system written by David Lichteblau.<break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-float-range client "filter_attr" 45.231 99)
(set-filter-float-range client "other_attr" 1.32 55.0031 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
given range.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-filters" name="reset-filters" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Reset the filters.</short><break/>
- Clear all filters, including the geolocation anchor point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt><elt>(index *)</elt><elt>(comment )</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
+ Clear all filters, including the geolocation anchor point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
- of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-overrides" name="reset-overrides" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all attribute value overrides.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-select" name="set-select" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>select</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="select">the select string</arg> <return>client</return> <short>Set the select clause.</short><break/>
+ of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__run-query">run-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-overrides" name="reset-overrides" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all attribute value overrides.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-select" name="set-select" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>select</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="select">the select string</arg> <return>client</return> <short>Set the select clause.</short><break/>
Sets the select clause, listing specific attributes to fetch, and
expressions to compute and fetch. Clause syntax mimics SQL.<break/>
The select clause is very similar to the part of a typical SQL query between <code>SELECT</code> and <code>FROM</code>. It lets you choose what
attributes (columns) to fetch, and also what expressions over the
columns to compute and fetch. A difference from SQL is that expressions
must always be aliased to a correct identifier (consisting of letters
and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
computation results can be returned under a 'normal' name in the result
set, used in other clauses, etc.<break/>
Everything else is basically identical to SQL. Star ('*') is supported.
Functions are supported. Arbitrary amount of expressions is supported.
Computed expressions can be used for sorting, filtering, and grouping,
just as the regular attributes.<break/>
Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
using GROUP BY.<break/>
Examples:<break/> <pre>
(set-select sph "*, (user_karma+ln(pageviews))*0.1 AS myweight" )
(set-select sph "exp_years, salary_gbp*{$gbp_usd_rate} AS salary_usd, IF(age>40,1,0) AS over40" )
- (set-select sph "*, AVG(price) AS avgprice" ) </pre></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt><elt>(index *)</elt><elt>(comment )</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
- (query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
- It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
+ (set-select sph "*, AVG(price) AS avgprice" ) </pre></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
It returns a list of hashes containing the result of each query. Each hash
- has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt><elt>(offset 0)</elt><elt>(max 1000)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
+ has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__run-query">run-query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-query" name="run-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
+ (run-query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
+ It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
- Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-range client "filter_attr" 45 99)
(set-filter-range client "other_attr" 2 8 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
given range.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
- Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt><elt>(host localhost)</elt><elt>(port 3312)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
+ Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-by" name="set-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>function</elt><elt>&optional</elt><elt>group-sort</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute name to group by</arg> <arg arg="function">the grouping function to use</arg> <arg arg="group-sort">the sorting clause for group-by</arg> <return>client</return> <short>Set grouping options.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see><break/> <pre>
(set-group-by client "whatever_attr" +sph-groupby-attr+ "group asc")
(set-group-by client "date_attr" +sph-groupby-day+) </pre><break/>
Sets attribute and function of results grouping.<break/>
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.<break/> <code>attribute</code> is any valid attribute. Use <fun id="cl-sphinx-search__fun__reset-group-by">reset-group-by</fun>
to disable grouping.<break/> <code>function</code> is one of:<break/> <dl> <dt dt="+sph-groupby-day+">Group by day (assumes timestamp type attribute of form YYYYMMDD)</dt> <dt dt="+sph-groupby-week+">Group by week (assumes timestamp type attribute of form YYYYNNN)</dt> <dt dt="+sph-groupby-month+">Group by month (assumes timestamp type attribute of form YYYYMM)</dt> <dt dt="+sph-groupby-year+">Group by year (assumes timestamp type attribute of form YYYY)</dt> <dt dt="+sph-groupby-attr+">Group by attribute value</dt> <dt dt="+sph-groupby-attrpair+">Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the last of the indexed attributes</dt> </dl><break/>
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
Sphinx attributes:<break/> <dl> <dt dt="@id">document ID</dt> <dt dt="@weight, @rank, @relevance">match weight</dt> <dt dt="@group">group by function value</dt> <dt dt="@count">number of matches in group</dt> </dl><break/>
The default mode is to sort by group-by value in descending order,
ie. by "@group desc".<break/> In the results set, <code>total-found</code> contains the total amount of
matching groups over the whole index.<break/>
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported in <code>total-found</code> than actually present. <code>count</code> might
also be underestimated.<break/>
For example, if sorting by relevance and grouping by a "published"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-distinct" name="set-group-distinct" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to use for count-distinct queries</arg> <return>client</return> <short>Set count-distinct attribute for group-by queries.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-id-range" name="set-id-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>min</elt><elt>max</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="min">minimum id to start searching from</arg> <arg arg="max">maximum id to stop searching at</arg> <return>client</return> <short>Set the id-range to search within (inclusive).</short><break/>
Set the range of id's within which to search. Range is inclusive, so setting
- [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
+ [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
(set-filter client "filter_attr" '(0 2 4 34 55 77))
(set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><break/>
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that match the filter.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-group-by" name="reset-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all the group-by settings.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-geo-anchor" name="set-geo-anchor" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>latitude-attribute</elt><elt>latitude</elt><elt>longitude-attribute</elt><elt>longitude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="latitude-attribute">the latitude attribute name</arg> <arg arg="latitude">latitude in radians</arg> <arg arg="longitude-attribute">the longitude attribute name</arg> <arg arg="longitude">longitude in radians</arg> <return>client</return> <short>Setup anchor point for geolocation.</short><break/> <pre>
(set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><break/>
Setup anchor point for using geosphere distance calculations in
filters and sorting. Distance will be computed with respect to
this point, and will be included in result output.<break/>
To actually use this to filter on results a certain distance from
the anchor point, use something like:<break/> <pre>
(set-filter-float-range sph "@geodist" 0 5000) </pre><break/>
This will filter the results to be closer than 5 km from the anchor
- point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition></external-symbols><internal-symbols><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>(exclude nil)</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-functions+" name="+sph-sort-functions+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
+ point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition></external-symbols><internal-symbols><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-functions+" name="+sph-sort-functions+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
- of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
+ of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__run-query">run-query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__set-id-range">set-id-range</see> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__set-select">set-select</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see> <see id="cl-sphinx-search__fun__reset-overrides">reset-overrides)</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see> <see id="cl-sphinx-search__fun__max-query-time">max-query-time</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__geo-anchor" name="geo-anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__match-mode" name="match-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-list-signed-quads" name="%pack-list-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__rank-mode" name="rank-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__set-override" name="set-override" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>type</elt><elt>values</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to override</arg> <arg arg="type">the attribute type as defined in Sphinx config</arg> <arg arg="values">an alist mapping document IDs to attribute values</arg> <return>client</return> <short>Set attribute values overrides.</short><break/>
There can be only one override per attribute.<break/> <code>values</code> must be an alist that maps document IDs to attribute
values.<break/> <pre>
(set-override client "test_attr" +sph-attr-integer+ '((4314 . 3) (2443 . 2))) </pre><break/>
In the example above, for the document with ID 4314, Sphinx will see an attribute value for the <code>attribute</code> called 'test_attr' of 3. And
for the document with ID 2443 it will see 2, while the rest will be what
it was when the indexer was last run.</documentation-string></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/index.html b/doc/index.html
index 7be880c..422908e 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -1,8 +1,8 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Sphinx Search API reference</title><link rel="stylesheet" type="text/css" href="index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded">
Index of packages:
</div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><h2 class="page-title"><a href="pages/cl-sphinx-search.html">
Package
- cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index fa9b45d..a30ad31 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,33 +1,33 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
- Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path (host localhost) (port 3312))</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff (offset 0) (max 1000))</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
- Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment (index *) (comment ))</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment (index *) (comment ))</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
+ Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
+ Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__run-query.html">Function run-query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<br><br> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><br><br>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br> This documentation was generated by <a href="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__class__sphinx-client.html b/doc/pages/cl-sphinx-search__class__sphinx-client.html
index f541464..51c39ff 100644
--- a/doc/pages/cl-sphinx-search__class__sphinx-client.html
+++ b/doc/pages/cl-sphinx-search__class__sphinx-client.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Class sphinx-client</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Class sphinx-client</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Superclasses</h3><div class="indent"><tt style="color: #777777">common-lisp:standard-object</tt>, <tt style="color: #777777">sb-pcl::slot-object</tt>, <tt style="color: #777777">common-lisp:t</tt></div><h3>Documented Subclasses</h3><div class="indent">
None
</div><h3>Details<a name="details"></a></h3><div class="indent">The sphinx-search class.<br><br> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><br><br>
The interface to the search daemon goes through this class.<br><br>
Set options and settings of the search to be performed on an object
- of this class, and then have it perform one search by calling <a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, or add a number of queries using <a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a> and then calling <a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>.<br><br>
+ of this class, and then have it perform one search by calling <a href="cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a>, or add a number of queries using <a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a> and then calling <a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>.<br><br>
Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides)</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides)</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___pack-array-signed-quads.html b/doc/pages/cl-sphinx-search__fun___pack-array-signed-quads.html
deleted file mode 100644
index b26791d..0000000
--- a/doc/pages/cl-sphinx-search__fun___pack-array-signed-quads.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-array-signed-quads</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- %pack-array-signed-quads</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-array-signed-quads</tt> (<b>values-list</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___set-filter-range.html b/doc/pages/cl-sphinx-search__fun___set-filter-range.html
index 6203bab..d4e675f 100644
--- a/doc/pages/cl-sphinx-search__fun___set-filter-range.html
+++ b/doc/pages/cl-sphinx-search__fun___set-filter-range.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %set-filter-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- %set-filter-range</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%set-filter-range</tt> (<b>client</b>Â <b>type</b>Â <b>attr</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>(exclude nil)</b>)</div><p style="color: red; font-weight: bold">
+ %set-filter-range</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%set-filter-range</tt> (<b>client</b>Â <b>type</b>Â <b>attr</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><p style="color: red; font-weight: bold">
No documentation string. Possibly unimplemented or incomplete.
</p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__add-query.html b/doc/pages/cl-sphinx-search__fun__add-query.html
index eedaeaf..8d1caf3 100644
--- a/doc/pages/cl-sphinx-search__fun__add-query.html
+++ b/doc/pages/cl-sphinx-search__fun__add-query.html
@@ -1,20 +1,20 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function add-query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- add-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>add-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>Â <b>(index *)</b>Â <b>(comment )</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">length of query queue</div><h3>Details<a name="details"></a></h3><div class="indent"> Add a query to a batch request.<br><br> <pre>
+ add-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>add-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">length of query queue</div><h3>Details<a name="details"></a></h3><div class="indent"> Add a query to a batch request.<br><br> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><br><br>
Add a query to the queue of batched queries.<br><br> Batch queries enable <tt>searchd</tt> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<br><br>
For instance, running exactly the same query with different group-by settings will enable <tt>searchd</tt> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<br><br>
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.<br><br> <br><br></div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__anchor.html b/doc/pages/cl-sphinx-search__fun__anchor.html
deleted file mode 100644
index 9ac4f8f..0000000
--- a/doc/pages/cl-sphinx-search__fun__anchor.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function anchor</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- anchor</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>anchor</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__mode.html b/doc/pages/cl-sphinx-search__fun__mode.html
deleted file mode 100644
index e34f283..0000000
--- a/doc/pages/cl-sphinx-search__fun__mode.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function mode</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- mode</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>mode</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__ranker.html b/doc/pages/cl-sphinx-search__fun__ranker.html
deleted file mode 100644
index e687a18..0000000
--- a/doc/pages/cl-sphinx-search__fun__ranker.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function ranker</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- ranker</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>ranker</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__run-queries.html b/doc/pages/cl-sphinx-search__fun__run-queries.html
index eea27d3..95c9dcf 100644
--- a/doc/pages/cl-sphinx-search__fun__run-queries.html
+++ b/doc/pages/cl-sphinx-search__fun__run-queries.html
@@ -1,15 +1,15 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function run-queries</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
run-queries</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>run-queries</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">nil or a list of hashes</div><h3>Details<a name="details"></a></h3><div class="indent"> Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>.<br><br> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><br><br> Query <tt>searchd</tt> with the collected queries added with <tt>add-query</tt>.<br><br>
It returns a list of hashes containing the result of each query. Each hash
has the following keys: <dl><dt>attributes</dt><dd> : a hash-table containing attributes</dd><dt>fields</dt><dd> : a list of fields</dd><dt>matches</dt><dd> : a hash-table containing the matches</dd><dt>status</dt><dd> : the status returned by <tt>searchd</tt></dd><dt>status-message</dt><dd> : the status message returned by <tt>searchd</tt></dd><dt>time</dt><dd> : the time <tt>searchd</tt> took for the query</dd><dt>total</dt><dd> : the total matches returned</dd><dt>total-found</dt><dd> : the total number of matches found</dd><dt>words</dt><dd> : a hash-table containing the matching words with their statistics</dd></dl><br style="clear: both;"><br><br> <br><br></div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-query.html"><tt>run-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__query.html b/doc/pages/cl-sphinx-search__fun__run-query.html
similarity index 54%
rename from doc/pages/cl-sphinx-search__fun__query.html
rename to doc/pages/cl-sphinx-search__fun__run-query.html
index 33d76f1..d9940df 100644
--- a/doc/pages/cl-sphinx-search__fun__query.html
+++ b/doc/pages/cl-sphinx-search__fun__run-query.html
@@ -1,12 +1,12 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function run-query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>Â <b>(index *)</b>Â <b>(comment )</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">nil or a hash containing the query results</div><h3>Details<a name="details"></a></h3><div class="indent"> Run a query through <tt>searchd</tt>.<br><br> <pre>
- (query client "test") </pre><br><br> Query <tt>searchd</tt>. This method runs a single query through <tt>searchd</tt>.<br><br>
+ run-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>run-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">nil or a hash containing the query results</div><h3>Details<a name="details"></a></h3><div class="indent"> Run a query through <tt>searchd</tt>.<br><br> <pre>
+ (run-query client "test") </pre><br><br> Query <tt>searchd</tt>. This method runs a single query through <tt>searchd</tt>.<br><br>
It returns the results in a hash with the following keys: <dl><dt>attributes</dt><dd> : a hash-table containing attributes</dd><dt>fields</dt><dd> : a list of fields</dd><dt>matches</dt><dd> : a hash-table containing the matches</dd><dt>status</dt><dd> : the status returned by <tt>searchd</tt></dd><dt>status-message</dt><dd> : the status message returned by <tt>searchd</tt></dd><dt>time</dt><dd> : the time <tt>searchd</tt> took for the query</dd><dt>total</dt><dd> : the total matches returned</dd><dt>total-found</dt><dd> : the total number of matches found</dd><dt>words</dt><dd> : a hash-table containing the matching words with their statistics</dd></dl><br style="clear: both;"><br><br> <br><br></div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html b/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
index 64d67e1..3f5fae9 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter-float-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter-float-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-float-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
+ set-filter-float-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-float-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
(set-filter-float-range client "filter_attr" 45.231 99)
(set-filter-float-range client "other_attr" 1.32 55.0031 :exclude t) </pre><br><br>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <tt>min</tt> and <tt>max</tt> (including <tt>min</tt> and <tt>max</tt>)
will be returned.<br><br>
This may be called multiple times with different attributes to
select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that fall within the
given range.<br><br> </div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter-range.html b/doc/pages/cl-sphinx-search__fun__set-filter-range.html
index 12c7cb4..d1845cc 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter-range.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter-range.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
+ set-filter-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
(set-filter-range client "filter_attr" 45 99)
(set-filter-range client "other_attr" 2 8 :exclude t) </pre><br><br>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <tt>min</tt> and <tt>max</tt> (including <tt>min</tt> and <tt>max</tt>)
will be returned.<br><br>
This may be called multiple times with different attributes to
select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that fall within the
given range.<br><br> </div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter.html b/doc/pages/cl-sphinx-search__fun__set-filter.html
index 3b80d3a..a86a9da 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter.html
@@ -1,17 +1,17 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter</tt> (<b>client</b>Â <b>attribute</b>Â <b>values-list</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>values-list</tt> -- the numeric values to filter on</li><li><tt>exclude</tt> -- if set, exclude the given values</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given attribute.<br><br> <pre>
+ set-filter</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter</tt> (<b>client</b>Â <b>attribute</b>Â <b>values-list</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>values-list</tt> -- the numeric values to filter on</li><li><tt>exclude</tt> -- if set, exclude the given values</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given attribute.<br><br> <pre>
(set-filter client "filter_attr" '(0 2 4 34 55 77))
(set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><br><br>
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.<br><br>
This may be called multiple times with different attributes to
select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that match the filter.<br><br> </div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-limits.html b/doc/pages/cl-sphinx-search__fun__set-limits.html
index 37b777d..3524f0a 100644
--- a/doc/pages/cl-sphinx-search__fun__set-limits.html
+++ b/doc/pages/cl-sphinx-search__fun__set-limits.html
@@ -1,14 +1,14 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-limits</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-limits</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-limits</tt> (<b>client</b>Â <b>&key</b>Â <b>offset</b>Â <b>limit</b>Â <b>max</b>Â <b>cutoff</b>Â <b>(offset 0)</b>Â <b>(max 1000)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>offset</tt> -- the offset to start returning matches from</li><li><tt>limit</tt> -- how many matches to return starting from <tt>offset</tt></li><li><tt>max</tt> -- maximum number of matches to return</li><li><tt>cutoff</tt> -- the cutoff to stop searching at</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the offset, limit, cutoff and max matches to return.<br><br> <pre>
+ set-limits</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-limits</tt> (<b>client</b>Â <b>&key</b>Â <b>offset</b>Â <b>limit</b>Â <b>max</b>Â <b>cutoff</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>offset</tt> -- the offset to start returning matches from</li><li><tt>limit</tt> -- how many matches to return starting from <tt>offset</tt></li><li><tt>max</tt> -- maximum number of matches to return</li><li><tt>cutoff</tt> -- the cutoff to stop searching at</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the offset, limit, cutoff and max matches to return.<br><br> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><br><br>
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-server.html b/doc/pages/cl-sphinx-search__fun__set-server.html
index e69a5fb..0e0922d 100644
--- a/doc/pages/cl-sphinx-search__fun__set-server.html
+++ b/doc/pages/cl-sphinx-search__fun__set-server.html
@@ -1,15 +1,15 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-server</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-server</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-server</tt> (<b>client</b>Â <b>&key</b>Â <b>host</b>Â <b>port</b>Â <b>path</b>Â <b>(host localhost)</b>Â <b>(port 3312)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>host</tt> -- the host to connect to when using an INET socket</li><li><tt>port</tt> -- the port to connect to when using an INET socket</li><li><tt>path</tt> -- the path to the unix domain socket when not using INET</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the server host:port or path to connect to.<br><br> <pre>
+ set-server</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-server</tt> (<b>client</b>Â <b>&key</b>Â <b>host</b>Â <b>port</b>Â <b>path</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>host</tt> -- the host to connect to when using an INET socket</li><li><tt>port</tt> -- the port to connect to when using an INET socket</li><li><tt>path</tt> -- the path to the unix domain socket when not using INET</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the server host:port or path to connect to.<br><br> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><br><br> In the first form, sets the <tt>host</tt> (string) and <tt>port</tt> (integer)
details for the searchd server using a network (INET) socket.<br><br> In the second form, where <tt>unix-path</tt> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__variable__sph.html b/doc/pages/cl-sphinx-search__variable__sph.html
deleted file mode 100644
index 5abe322..0000000
--- a/doc/pages/cl-sphinx-search__variable__sph.html
+++ /dev/null
@@ -1,10 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable sph</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Variable
- sph</h2><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
diff --git a/package.lisp b/package.lisp
index affac69..ddbbe2f 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,104 +1,104 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
- #:query
+ #:run-query
#:add-query
#:run-queries
#:last-error
#:last-warning
#:set-id-range
#:set-filter
#:set-filter-range
#:set-filter-float-range
#:max-query-time
#:set-geo-anchor
#:set-group-by
#:set-group-distinct
#:set-select
#:reset-filters
#:reset-group-by
#:reset-overrides)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
- @aboutfun{query}
+ @aboutfun{run-query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
@begin[Acknowledgements]{section}
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
@a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
itself says:
@begin{pre}
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
@end{pre}
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
@b{Documentation}
This documentation was generated by @a[http://www.lichteblau.com/atdoc/doc/]{atdoc},
the documentation generation system written by David Lichteblau.
@end{section}
"))
|
thijs/cl-sphinx-search
|
4c86983fbefa59108f23a455716c68c074b5e1d1
|
Leave overrides as hash-table for now
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 2b78553..0489cd0 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,703 +1,703 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(geo-anchor
:accessor geo-anchor
:initarg :geo-anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
- :initform ()
+ :initform (make-hash-table)
:documentation "per-query attribute value overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{set-group-by}
@see{set-group-distinct}
@see{set-select}
@see{reset-filters}
@see{reset-group-by}
@see{reset-overrides)}
@see{last-warning}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter ((client sphinx-client) attr values &key (exclude ()))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
(push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude ()))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-geo-anchor (client latitude-attribute latitude longitude-attribute longitude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[latitude-attribute]{the latitude attribute name}
@arg[latitude]{latitude in radians}
@arg[longitude-attribute]{the longitude attribute name}
@arg[longitude]{longitude in radians}
@return{client}
@short{Setup anchor point for geolocation.}
@begin{pre}
(set-geo-anchor client \"latitude_attr\" 45.231 \"longitude_attribute\" 4.5)
@end{pre}
Setup anchor point for using geosphere distance calculations in
filters and sorting. Distance will be computed with respect to
this point, and will be included in result output.
To actually use this to filter on results a certain distance from
the anchor point, use something like:
@begin{pre}
(set-filter-float-range sph \"@@geodist\" 0 5000)
@end{pre}
This will filter the results to be closer than 5 km from the anchor
point.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod set-geo-anchor ((client sphinx-client) lat-attr lat lon-attr lon)
(assert (and (stringp lat-attr) (stringp lon-attr) (numberp lat) (numberp lon)))
(setf (geo-anchor client) (list lat-attr lat lon-attr lon))
client)
(defgeneric set-group-by (client attribute function &optional group-sort)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute name to group by}
@arg[function]{the grouping function to use}
@arg[group-sort]{the sorting clause for group-by}
@return{client}
@short{Set grouping options.}
@see{set-group-by}
@see{set-group-distinct}
@begin{pre}
(set-group-by client \"whatever_attr\" +sph-groupby-attr+ \"group asc\")
(set-group-by client \"date_attr\" +sph-groupby-day+)
@end{pre}
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.
@code{attribute} is any valid attribute. Use @fun{reset-group-by}
to disable grouping.
@code{function} is one of:
@begin{dl}
@dt[+sph-groupby-day+]{Group by day (assumes timestamp type attribute
of form YYYYMMDD)}
@dt[+sph-groupby-week+]{Group by week (assumes timestamp type attribute
of form YYYYNNN)}
@dt[+sph-groupby-month+]{Group by month (assumes timestamp type
attribute of form YYYYMM)}
@dt[+sph-groupby-year+]{Group by year (assumes timestamp type attribute
of form YYYY)}
@dt[+sph-groupby-attr+]{Group by attribute value}
@dt[+sph-groupby-attrpair+]{Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the
last of the indexed attributes}
@end{dl}
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
Sphinx attributes:
@begin{dl}
@dt[@id]{document ID}
@dt[@weight, @rank, @relevance]{match weight}
@dt[@group]{group by function value}
@dt[@count]{number of matches in group}
@end{dl}
The default mode is to sort by group-by value in descending order,
ie. by \"@@group desc\".
In the results set, @code{total-found} contains the total amount of
matching groups over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in @code{total-found} than actually present. @code{count} might
also be underestimated.
For example, if sorting by relevance and grouping by a \"published\"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
"))
(defmethod set-group-by ((client sphinx-client) attr func &optional sort)
(assert (and (stringp attr) (stringp sort) (find func +sph-sort-functions+)))
(setf (group-by client) attr)
(setf (group-function client) func)
(setf (group-sort client) sort)
client)
(defgeneric set-group-distinct (client attribute)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to use for count-distinct queries}
@return{client}
@short{Set count-distinct attribute for group-by queries.}
@see{set-group-by}
@see{set-group-distinct}
@see{reset-group-by}
"))
(defmethod set-group-distinct ((client sphinx-client) attribute)
(assert (stringp attribute))
(setf (group-distinct client) attribute)
client)
(defgeneric set-override (client attribute type values)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to override}
@arg[type]{the attribute type as defined in Sphinx config}
@arg[values]{an alist mapping document IDs to attribute values}
@return{client}
@short{Set attribute values overrides.}
There can be only one override per attribute.
@code{values} must be an alist that maps document IDs to attribute
values.
@begin{pre}
(set-override client \"test_attr\" +sph-attr-integer+ '((4314 . 3) (2443 . 2)))
@end{pre}
In the example above, for the document with ID 4314, Sphinx will see an
attribute value for the @code{attribute} called 'test_attr' of 3. And
for the document with ID 2443 it will see 2, while the rest will be what
it was when the indexer was last run.
"))
(defmethod set-override ((client sphinx-client) attribute type values)
(assert (and (stringp attribute) (find type +sph-attr-types+) (listp values)))
(push (cons attribute values) (overrides client))
client)
(defgeneric set-select (client select)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[select]{the select string}
@return{client}
@short{Set the select clause.}
Sets the select clause, listing specific attributes to fetch, and
expressions to compute and fetch. Clause syntax mimics SQL.
The select clause is very similar to the part of a typical SQL query
between @code{SELECT} and @code{FROM}. It lets you choose what
attributes (columns) to fetch, and also what expressions over the
columns to compute and fetch. A difference from SQL is that expressions
must always be aliased to a correct identifier (consisting of letters
and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
computation results can be returned under a 'normal' name in the result
set, used in other clauses, etc.
Everything else is basically identical to SQL. Star ('*') is supported.
Functions are supported. Arbitrary amount of expressions is supported.
Computed expressions can be used for sorting, filtering, and grouping,
just as the regular attributes.
Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
using GROUP BY.
Examples:
@begin{pre}
(set-select sph \"*, (user_karma+ln(pageviews))*0.1 AS myweight\" )
(set-select sph \"exp_years, salary_gbp*{$gbp_usd_rate@} AS salary_usd, IF(age>40,1,0) AS over40\" )
(set-select sph \"*, AVG(price) AS avgprice\" )
@end{pre}
"))
(defmethod set-select ((client sphinx-client) select)
(assert (stringp select))
(setf (select client) select)
client)
(defgeneric reset-filters (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Reset the filters.}
Clear all filters, including the geolocation anchor point.
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{set-geo-anchor}
@see{reset-filters}
"))
(defmethod reset-filters ((client sphinx-client))
(setf (filters client) ())
(setf (geo-anchor client) ())
client)
(defgeneric reset-group-by (client)
|
thijs/cl-sphinx-search
|
2f38603d166bc15ea82bf18a09d9703ca3ac63a9
|
Fixing docs after atdoc fix for escaping @'s
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 8e5d381..2b78553 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,1169 +1,1220 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(geo-anchor
:accessor geo-anchor
:initarg :geo-anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute value overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
- @see{query}
- @see{add-query}
- @see{run-queries}
- @see{last-error}
- @see{last-warning}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{set-group-by}
+ @see{set-group-distinct}
+ @see{set-select}
+ @see{reset-filters}
+ @see{reset-group-by}
+ @see{reset-overrides)}
+ @see{last-warning}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
+
+
+ @see{set-filter}
+ @see{set-filter-range}
+ @see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{reset-filters}
"))
(defmethod set-filter ((client sphinx-client) attr values &key (exclude ()))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
(push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
+
+
+ @see{set-filter}
+ @see{set-filter-range}
+ @see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{reset-filters}
"))
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
+
+
+ @see{set-filter}
+ @see{set-filter-range}
+ @see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{reset-filters}
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude ()))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
(defgeneric set-geo-anchor (client latitude-attribute latitude longitude-attribute longitude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[latitude-attribute]{the latitude attribute name}
@arg[latitude]{latitude in radians}
@arg[longitude-attribute]{the longitude attribute name}
@arg[longitude]{longitude in radians}
@return{client}
@short{Setup anchor point for geolocation.}
@begin{pre}
(set-geo-anchor client \"latitude_attr\" 45.231 \"longitude_attribute\" 4.5)
@end{pre}
Setup anchor point for using geosphere distance calculations in
filters and sorting. Distance will be computed with respect to
this point, and will be included in result output.
To actually use this to filter on results a certain distance from
the anchor point, use something like:
@begin{pre}
- (set-filter-float-range sph \"geodist\" 0 5000)
+ (set-filter-float-range sph \"@@geodist\" 0 5000)
@end{pre}
This will filter the results to be closer than 5 km from the anchor
point.
+
+
+ @see{set-filter}
+ @see{set-filter-range}
+ @see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{reset-filters}
"))
(defmethod set-geo-anchor ((client sphinx-client) lat-attr lat lon-attr lon)
(assert (and (stringp lat-attr) (stringp lon-attr) (numberp lat) (numberp lon)))
(setf (geo-anchor client) (list lat-attr lat lon-attr lon))
client)
(defgeneric set-group-by (client attribute function &optional group-sort)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute name to group by}
@arg[function]{the grouping function to use}
@arg[group-sort]{the sorting clause for group-by}
@return{client}
@short{Set grouping options.}
+ @see{set-group-by}
+ @see{set-group-distinct}
+
@begin{pre}
(set-group-by client \"whatever_attr\" +sph-groupby-attr+ \"group asc\")
(set-group-by client \"date_attr\" +sph-groupby-day+)
@end{pre}
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.
@code{attribute} is any valid attribute. Use @fun{reset-group-by}
to disable grouping.
@code{function} is one of:
@begin{dl}
@dt[+sph-groupby-day+]{Group by day (assumes timestamp type attribute
of form YYYYMMDD)}
@dt[+sph-groupby-week+]{Group by week (assumes timestamp type attribute
of form YYYYNNN)}
@dt[+sph-groupby-month+]{Group by month (assumes timestamp type
attribute of form YYYYMM)}
@dt[+sph-groupby-year+]{Group by year (assumes timestamp type attribute
of form YYYY)}
@dt[+sph-groupby-attr+]{Group by attribute value}
@dt[+sph-groupby-attrpair+]{Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the
last of the indexed attributes}
@end{dl}
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
Sphinx attributes:
@begin{dl}
- @dt[id]{document ID}
- @dt[weight, rank, relevance]{match weight}
- @dt[group]{group by function value}
- @dt[count]{number of matches in group}
+ @dt[@id]{document ID}
+ @dt[@weight, @rank, @relevance]{match weight}
+ @dt[@group]{group by function value}
+ @dt[@count]{number of matches in group}
@end{dl}
The default mode is to sort by group-by value in descending order,
- ie. by \"group desc\".
+ ie. by \"@@group desc\".
In the results set, @code{total-found} contains the total amount of
matching groups over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in @code{total-found} than actually present. @code{count} might
also be underestimated.
For example, if sorting by relevance and grouping by a \"published\"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
"))
(defmethod set-group-by ((client sphinx-client) attr func &optional sort)
(assert (and (stringp attr) (stringp sort) (find func +sph-sort-functions+)))
(setf (group-by client) attr)
(setf (group-function client) func)
(setf (group-sort client) sort)
client)
(defgeneric set-group-distinct (client attribute)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to use for count-distinct queries}
@return{client}
@short{Set count-distinct attribute for group-by queries.}
+
+
+ @see{set-group-by}
+ @see{set-group-distinct}
+ @see{reset-group-by}
"))
(defmethod set-group-distinct ((client sphinx-client) attribute)
(assert (stringp attribute))
(setf (group-distinct client) attribute)
client)
(defgeneric set-override (client attribute type values)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to override}
@arg[type]{the attribute type as defined in Sphinx config}
@arg[values]{an alist mapping document IDs to attribute values}
@return{client}
@short{Set attribute values overrides.}
There can be only one override per attribute.
@code{values} must be an alist that maps document IDs to attribute
values.
@begin{pre}
(set-override client \"test_attr\" +sph-attr-integer+ '((4314 . 3) (2443 . 2)))
@end{pre}
In the example above, for the document with ID 4314, Sphinx will see an
attribute value for the @code{attribute} called 'test_attr' of 3. And
for the document with ID 2443 it will see 2, while the rest will be what
it was when the indexer was last run.
"))
(defmethod set-override ((client sphinx-client) attribute type values)
(assert (and (stringp attribute) (find type +sph-attr-types+) (listp values)))
(push (cons attribute values) (overrides client))
client)
(defgeneric set-select (client select)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[select]{the select string}
@return{client}
@short{Set the select clause.}
Sets the select clause, listing specific attributes to fetch, and
expressions to compute and fetch. Clause syntax mimics SQL.
The select clause is very similar to the part of a typical SQL query
between @code{SELECT} and @code{FROM}. It lets you choose what
attributes (columns) to fetch, and also what expressions over the
columns to compute and fetch. A difference from SQL is that expressions
must always be aliased to a correct identifier (consisting of letters
and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
computation results can be returned under a 'normal' name in the result
set, used in other clauses, etc.
Everything else is basically identical to SQL. Star ('*') is supported.
Functions are supported. Arbitrary amount of expressions is supported.
Computed expressions can be used for sorting, filtering, and grouping,
just as the regular attributes.
Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
using GROUP BY.
Examples:
@begin{pre}
(set-select sph \"*, (user_karma+ln(pageviews))*0.1 AS myweight\" )
(set-select sph \"exp_years, salary_gbp*{$gbp_usd_rate@} AS salary_usd, IF(age>40,1,0) AS over40\" )
(set-select sph \"*, AVG(price) AS avgprice\" )
@end{pre}
"))
(defmethod set-select ((client sphinx-client) select)
(assert (stringp select))
(setf (select client) select)
client)
(defgeneric reset-filters (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Reset the filters.}
Clear all filters, including the geolocation anchor point.
+
+
+ @see{set-filter}
+ @see{set-filter-range}
+ @see{set-filter-float-range}
+ @see{set-geo-anchor}
+ @see{reset-filters}
"))
(defmethod reset-filters ((client sphinx-client))
(setf (filters client) ())
(setf (geo-anchor client) ())
client)
(defgeneric reset-group-by (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Clear all the group-by settings.}
+
+
+ @see{set-group-by}
+ @see{set-group-distinct}
+ @see{reset-group-by}
"))
(defmethod reset-group-by ((client sphinx-client))
(setf (group-by client) "")
(setf (group-function client) +sph-groupby-day+)
(setf (group-sort client) "@group desc")
(setf (group-distinct client) "")
client)
(defgeneric reset-overrides (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{client}
@short{Clear all attribute value overrides.}
"))
(defmethod reset-overrides ((client sphinx-client))
(setf (overrides client) ())
client)
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((geo-anchor client)
(concatenate 'string
(pack "N/a*" (first (geo-anchor client)))
(pack "N/a*" (third (geo-anchor client)))
(%pack-float (second (geo-anchor client)))
(%pack-float (fourth (geo-anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
#+SPHINX-SEARCH-DEBUG (format t "socket is: ~a~%" (%socket client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(setf (%socket client) ())
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(with-output-to-string (packed-filters)
(dolist (filter filters)
(let ((type (first filter))
(attr (second filter))
(last-el 3))
(format packed-filters "~a~a~a~a"
(pack "N/a*" attr)
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-list-signed-quads (third filter)))
((eql type +sph-filter-range+)
(concatenate 'string
(pack "q>" (third filter))
(pack "q>" (fourth filter)))
(incf last-el))
((eql type +sph-filter-floatrange+)
(concatenate 'string
(%pack-float (third filter))
(%pack-float (fourth filter)))
(incf last-el))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (nth last-el filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(with-output-to-string (packed-list)
(format packed-list "~a" (pack "N" (length values-list)))
(dolist (value values-list)
(format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index fe29559..a639340 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,148 +1,148 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<break/> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><break/>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/> This documentation was generated by <a a="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__reset-group-by" name="reset-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all the group-by settings.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-id-range" name="set-id-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>min</elt><elt>max</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="min">minimum id to start searching from</arg> <arg arg="max">maximum id to stop searching at</arg> <return>client</return> <short>Set the id-range to search within (inclusive).</short><break/>
- Set the range of id's within which to search. Range is inclusive, so setting
- [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-geo-anchor" name="set-geo-anchor" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>latitude-attribute</elt><elt>latitude</elt><elt>longitude-attribute</elt><elt>longitude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="latitude-attribute">the latitude attribute name</arg> <arg arg="latitude">latitude in radians</arg> <arg arg="longitude-attribute">the longitude attribute name</arg> <arg arg="longitude">longitude in radians</arg> <return>client</return> <short>Setup anchor point for geolocation.</short><break/> <pre>
- (set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><break/>
- Setup anchor point for using geosphere distance calculations in
- filters and sorting. Distance will be computed with respect to
- this point, and will be included in result output.<break/>
- To actually use this to filter on results a certain distance from
- the anchor point, use something like:<break/> <pre>
- (set-filter-float-range sph "geodist" 0 5000) </pre><break/>
- This will filter the results to be closer than 5 km from the anchor
- point.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ the documentation generation system written by David Lichteblau.<break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-float-range client "filter_attr" 45.231 99)
(set-filter-float-range client "other_attr" 1.32 55.0031 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
- given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
- (set-filter client "filter_attr" '(0 2 4 34 55 77))
- (set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><break/>
- Sets the results to be filtered on the given attribute. Only
- results which have attributes matching the given (numeric)
- values will be returned.<break/>
- This may be called multiple times with different attributes to
- select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that match the filter.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
- (query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
- It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
- (add-query client "test")
- (add-query client "word")
- (run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
- It returns a list of hashes containing the result of each query. Each hash
- has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-filters" name="reset-filters" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Reset the filters.</short><break/>
- Clear all filters, including the geolocation anchor point.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
+ given range.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-filters" name="reset-filters" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Reset the filters.</short><break/>
+ Clear all filters, including the geolocation anchor point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt><elt>(index *)</elt><elt>(comment )</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
- of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
- Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-overrides" name="reset-overrides" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all attribute value overrides.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
- Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
- (set-server client :host host :port port)
- (set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
- details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
- (optionally prefixed by 'unix://'), sets the client to access the
- searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-overrides" name="reset-overrides" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all attribute value overrides.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-select" name="set-select" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>select</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="select">the select string</arg> <return>client</return> <short>Set the select clause.</short><break/>
+ Sets the select clause, listing specific attributes to fetch, and
+ expressions to compute and fetch. Clause syntax mimics SQL.<break/>
+ The select clause is very similar to the part of a typical SQL query between <code>SELECT</code> and <code>FROM</code>. It lets you choose what
+ attributes (columns) to fetch, and also what expressions over the
+ columns to compute and fetch. A difference from SQL is that expressions
+ must always be aliased to a correct identifier (consisting of letters
+ and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
+ computation results can be returned under a 'normal' name in the result
+ set, used in other clauses, etc.<break/>
+ Everything else is basically identical to SQL. Star ('*') is supported.
+ Functions are supported. Arbitrary amount of expressions is supported.
+ Computed expressions can be used for sorting, filtering, and grouping,
+ just as the regular attributes.<break/>
+ Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
+ using GROUP BY.<break/>
+ Examples:<break/> <pre>
+ (set-select sph "*, (user_karma+ln(pageviews))*0.1 AS myweight" )
+ (set-select sph "exp_years, salary_gbp*{$gbp_usd_rate} AS salary_usd, IF(age>40,1,0) AS over40" )
+ (set-select sph "*, AVG(price) AS avgprice" ) </pre></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt><elt>(index *)</elt><elt>(comment )</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
+ (query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
+ It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
+ (add-query client "test")
+ (add-query client "word")
+ (run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
+ It returns a list of hashes containing the result of each query. Each hash
+ has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt><elt>(offset 0)</elt><elt>(max 1000)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
+ (set-limits client :limit limit)
+ (set-limits client :offset offset :limit limit)
+ (set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-range client "filter_attr" 45 99)
(set-filter-range client "other_attr" 2 8 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
- given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
- Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-by" name="set-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>function</elt><elt>&optional</elt><elt>group-sort</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute name to group by</arg> <arg arg="function">the grouping function to use</arg> <arg arg="group-sort">the sorting clause for group-by</arg> <return>client</return> <short>Set grouping options.</short><break/> <pre>
+ given range.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
+ Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt><elt>(host localhost)</elt><elt>(port 3312)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
+ (set-server client :host host :port port)
+ (set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
+ details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
+ (optionally prefixed by 'unix://'), sets the client to access the
+ searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-by" name="set-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>function</elt><elt>&optional</elt><elt>group-sort</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute name to group by</arg> <arg arg="function">the grouping function to use</arg> <arg arg="group-sort">the sorting clause for group-by</arg> <return>client</return> <short>Set grouping options.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see><break/> <pre>
(set-group-by client "whatever_attr" +sph-groupby-attr+ "group asc")
(set-group-by client "date_attr" +sph-groupby-day+) </pre><break/>
Sets attribute and function of results grouping.<break/>
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.<break/> <code>attribute</code> is any valid attribute. Use <fun id="cl-sphinx-search__fun__reset-group-by">reset-group-by</fun>
to disable grouping.<break/> <code>function</code> is one of:<break/> <dl> <dt dt="+sph-groupby-day+">Group by day (assumes timestamp type attribute of form YYYYMMDD)</dt> <dt dt="+sph-groupby-week+">Group by week (assumes timestamp type attribute of form YYYYNNN)</dt> <dt dt="+sph-groupby-month+">Group by month (assumes timestamp type attribute of form YYYYMM)</dt> <dt dt="+sph-groupby-year+">Group by year (assumes timestamp type attribute of form YYYY)</dt> <dt dt="+sph-groupby-attr+">Group by attribute value</dt> <dt dt="+sph-groupby-attrpair+">Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the last of the indexed attributes</dt> </dl><break/>
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
- Sphinx attributes:<break/> <dl> <dt dt="id">document ID</dt> <dt dt="weight, rank, relevance">match weight</dt> <dt dt="group">group by function value</dt> <dt dt="count">number of matches in group</dt> </dl><break/>
+ Sphinx attributes:<break/> <dl> <dt dt="@id">document ID</dt> <dt dt="@weight, @rank, @relevance">match weight</dt> <dt dt="@group">group by function value</dt> <dt dt="@count">number of matches in group</dt> </dl><break/>
The default mode is to sort by group-by value in descending order,
- ie. by "group desc".<break/> In the results set, <code>total-found</code> contains the total amount of
+ ie. by "@group desc".<break/> In the results set, <code>total-found</code> contains the total amount of
matching groups over the whole index.<break/>
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported in <code>total-found</code> than actually present. <code>count</code> might
also be underestimated.<break/>
For example, if sorting by relevance and grouping by a "published"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
- and sorted by day number in descending order (ie. recent days first).</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-distinct" name="set-group-distinct" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to use for count-distinct queries</arg> <return>client</return> <short>Set count-distinct attribute for group-by queries.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
- (set-limits client :limit limit)
- (set-limits client :offset offset :limit limit)
- (set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
- Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-select" name="set-select" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>select</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="select">the select string</arg> <return>client</return> <short>Set the select clause.</short><break/>
- Sets the select clause, listing specific attributes to fetch, and
- expressions to compute and fetch. Clause syntax mimics SQL.<break/>
- The select clause is very similar to the part of a typical SQL query between <code>SELECT</code> and <code>FROM</code>. It lets you choose what
- attributes (columns) to fetch, and also what expressions over the
- columns to compute and fetch. A difference from SQL is that expressions
- must always be aliased to a correct identifier (consisting of letters
- and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
- computation results can be returned under a 'normal' name in the result
- set, used in other clauses, etc.<break/>
- Everything else is basically identical to SQL. Star ('*') is supported.
- Functions are supported. Arbitrary amount of expressions is supported.
- Computed expressions can be used for sorting, filtering, and grouping,
- just as the regular attributes.<break/>
- Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
- using GROUP BY.<break/>
- Examples:<break/> <pre>
- (set-select sph "*, (user_karma+ln(pageviews))*0.1 AS myweight" )
- (set-select sph "exp_years, salary_gbp*{$gbp_usd_rate} AS salary_usd, IF(age>40,1,0) AS over40" )
- (set-select sph "*, AVG(price) AS avgprice" ) </pre></documentation-string></function-definition></external-symbols><internal-symbols><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-functions+" name="+sph-sort-functions+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
+ and sorted by day number in descending order (ie. recent days first).</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-distinct" name="set-group-distinct" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to use for count-distinct queries</arg> <return>client</return> <short>Set count-distinct attribute for group-by queries.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
+ Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
+ Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-id-range" name="set-id-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>min</elt><elt>max</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="min">minimum id to start searching from</arg> <arg arg="max">maximum id to stop searching at</arg> <return>client</return> <short>Set the id-range to search within (inclusive).</short><break/>
+ Set the range of id's within which to search. Range is inclusive, so setting
+ [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt><elt>(exclude nil)</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
+ (set-filter client "filter_attr" '(0 2 4 34 55 77))
+ (set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><break/>
+ Sets the results to be filtered on the given attribute. Only
+ results which have attributes matching the given (numeric)
+ values will be returned.<break/>
+ This may be called multiple times with different attributes to
+ select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that match the filter.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-group-by" name="reset-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all the group-by settings.</short><break/> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-geo-anchor" name="set-geo-anchor" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>latitude-attribute</elt><elt>latitude</elt><elt>longitude-attribute</elt><elt>longitude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="latitude-attribute">the latitude attribute name</arg> <arg arg="latitude">latitude in radians</arg> <arg arg="longitude-attribute">the longitude attribute name</arg> <arg arg="longitude">longitude in radians</arg> <return>client</return> <short>Setup anchor point for geolocation.</short><break/> <pre>
+ (set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><break/>
+ Setup anchor point for using geosphere distance calculations in
+ filters and sorting. Distance will be computed with respect to
+ this point, and will be included in result output.<break/>
+ To actually use this to filter on results a certain distance from
+ the anchor point, use something like:<break/> <pre>
+ (set-filter-float-range sph "@geodist" 0 5000) </pre><break/>
+ This will filter the results to be closer than 5 km from the anchor
+ point.<break/> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see></documentation-string></function-definition></external-symbols><internal-symbols><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>(exclude nil)</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-functions+" name="+sph-sort-functions+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see> <see id="cl-sphinx-search__fun__last-error">last-error</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see> <see id="cl-sphinx-search__fun__set-id-range">set-id-range</see> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__max-query-time">max-query-time</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__geo-anchor" name="geo-anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__match-mode" name="match-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-list-signed-quads" name="%pack-list-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__rank-mode" name="rank-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__set-override" name="set-override" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>type</elt><elt>values</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to override</arg> <arg arg="type">the attribute type as defined in Sphinx config</arg> <arg arg="values">an alist mapping document IDs to attribute values</arg> <return>client</return> <short>Set attribute values overrides.</short><break/>
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__set-id-range">set-id-range</see> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__set-geo-anchor">set-geo-anchor</see> <see id="cl-sphinx-search__fun__set-group-by">set-group-by</see> <see id="cl-sphinx-search__fun__set-group-distinct">set-group-distinct</see> <see id="cl-sphinx-search__fun__set-select">set-select</see> <see id="cl-sphinx-search__fun__reset-filters">reset-filters</see> <see id="cl-sphinx-search__fun__reset-group-by">reset-group-by</see> <see id="cl-sphinx-search__fun__reset-overrides">reset-overrides)</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see> <see id="cl-sphinx-search__fun__max-query-time">max-query-time</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__geo-anchor" name="geo-anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__match-mode" name="match-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-list-signed-quads" name="%pack-list-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__rank-mode" name="rank-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__set-override" name="set-override" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>type</elt><elt>values</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to override</arg> <arg arg="type">the attribute type as defined in Sphinx config</arg> <arg arg="values">an alist mapping document IDs to attribute values</arg> <return>client</return> <short>Set attribute values overrides.</short><break/>
There can be only one override per attribute.<break/> <code>values</code> must be an alist that maps document IDs to attribute
values.<break/> <pre>
(set-override client "test_attr" +sph-attr-integer+ '((4314 . 3) (2443 . 2))) </pre><break/>
In the example above, for the document with ID 4314, Sphinx will see an attribute value for the <code>attribute</code> called 'test_attr' of 3. And
for the document with ID 2443 it will see 2, while the rest will be what
it was when the indexer was last run.</documentation-string></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index 2306aa2..fa9b45d 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,33 +1,33 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
- Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
- Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
+ Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path (host localhost) (port 3312))</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff (offset 0) (max 1000))</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
+ Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment (index *) (comment ))</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment (index *) (comment ))</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<br><br> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><br><br>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br> This documentation was generated by <a href="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__class__sphinx-client.html b/doc/pages/cl-sphinx-search__class__sphinx-client.html
index 5c1c65c..f541464 100644
--- a/doc/pages/cl-sphinx-search__class__sphinx-client.html
+++ b/doc/pages/cl-sphinx-search__class__sphinx-client.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Class sphinx-client</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Class sphinx-client</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Superclasses</h3><div class="indent"><tt style="color: #777777">common-lisp:standard-object</tt>, <tt style="color: #777777">sb-pcl::slot-object</tt>, <tt style="color: #777777">common-lisp:t</tt></div><h3>Documented Subclasses</h3><div class="indent">
None
</div><h3>Details<a name="details"></a></h3><div class="indent">The sphinx-search class.<br><br> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><br><br>
The interface to the search daemon goes through this class.<br><br>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, or add a number of queries using <a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a> and then calling <a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>.<br><br>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides)</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___set-filter-range.html b/doc/pages/cl-sphinx-search__fun___set-filter-range.html
index d4e675f..6203bab 100644
--- a/doc/pages/cl-sphinx-search__fun___set-filter-range.html
+++ b/doc/pages/cl-sphinx-search__fun___set-filter-range.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %set-filter-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- %set-filter-range</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%set-filter-range</tt> (<b>client</b>Â <b>type</b>Â <b>attr</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><p style="color: red; font-weight: bold">
+ %set-filter-range</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%set-filter-range</tt> (<b>client</b>Â <b>type</b>Â <b>attr</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>(exclude nil)</b>)</div><p style="color: red; font-weight: bold">
No documentation string. Possibly unimplemented or incomplete.
</p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__add-query.html b/doc/pages/cl-sphinx-search__fun__add-query.html
index 1edbbdf..eedaeaf 100644
--- a/doc/pages/cl-sphinx-search__fun__add-query.html
+++ b/doc/pages/cl-sphinx-search__fun__add-query.html
@@ -1,20 +1,20 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function add-query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- add-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>add-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">length of query queue</div><h3>Details<a name="details"></a></h3><div class="indent"> Add a query to a batch request.<br><br> <pre>
+ add-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>add-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>Â <b>(index *)</b>Â <b>(comment )</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">length of query queue</div><h3>Details<a name="details"></a></h3><div class="indent"> Add a query to a batch request.<br><br> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><br><br>
Add a query to the queue of batched queries.<br><br> Batch queries enable <tt>searchd</tt> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<br><br>
For instance, running exactly the same query with different group-by settings will enable <tt>searchd</tt> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<br><br>
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.<br><br> <br><br></div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__query.html b/doc/pages/cl-sphinx-search__fun__query.html
index fbd9e34..33d76f1 100644
--- a/doc/pages/cl-sphinx-search__fun__query.html
+++ b/doc/pages/cl-sphinx-search__fun__query.html
@@ -1,12 +1,12 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">nil or a hash containing the query results</div><h3>Details<a name="details"></a></h3><div class="indent"> Run a query through <tt>searchd</tt>.<br><br> <pre>
+ query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>Â <b>(index *)</b>Â <b>(comment )</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">nil or a hash containing the query results</div><h3>Details<a name="details"></a></h3><div class="indent"> Run a query through <tt>searchd</tt>.<br><br> <pre>
(query client "test") </pre><br><br> Query <tt>searchd</tt>. This method runs a single query through <tt>searchd</tt>.<br><br>
It returns the results in a hash with the following keys: <dl><dt>attributes</dt><dd> : a hash-table containing attributes</dd><dt>fields</dt><dd> : a list of fields</dd><dt>matches</dt><dd> : a hash-table containing the matches</dd><dt>status</dt><dd> : the status returned by <tt>searchd</tt></dd><dt>status-message</dt><dd> : the status message returned by <tt>searchd</tt></dd><dt>time</dt><dd> : the time <tt>searchd</tt> took for the query</dd><dt>total</dt><dd> : the total matches returned</dd><dt>total-found</dt><dd> : the total number of matches found</dd><dt>words</dt><dd> : a hash-table containing the matching words with their statistics</dd></dl><br style="clear: both;"><br><br> <br><br></div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__reset-filters.html b/doc/pages/cl-sphinx-search__fun__reset-filters.html
index 481ae0b..c9d69f6 100644
--- a/doc/pages/cl-sphinx-search__fun__reset-filters.html
+++ b/doc/pages/cl-sphinx-search__fun__reset-filters.html
@@ -1,11 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reset-filters</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
reset-filters</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-filters</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Reset the filters.<br><br>
- Clear all filters, including the geolocation anchor point.</div></div></td><td valign="top" width="5%">
+ Clear all filters, including the geolocation anchor point.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__reset-group-by.html b/doc/pages/cl-sphinx-search__fun__reset-group-by.html
index bfdaa15..4f3f4df 100644
--- a/doc/pages/cl-sphinx-search__fun__reset-group-by.html
+++ b/doc/pages/cl-sphinx-search__fun__reset-group-by.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reset-group-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- reset-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-group-by</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Clear all the group-by settings.</div></div></td><td valign="top" width="5%">
+ reset-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-group-by</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Clear all the group-by settings.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html b/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
index 6b073c9..64d67e1 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter-float-range.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter-float-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter-float-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-float-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
+ set-filter-float-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-float-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
(set-filter-float-range client "filter_attr" 45.231 99)
(set-filter-float-range client "other_attr" 1.32 55.0031 :exclude t) </pre><br><br>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <tt>min</tt> and <tt>max</tt> (including <tt>min</tt> and <tt>max</tt>)
will be returned.<br><br>
This may be called multiple times with different attributes to
select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that fall within the
- given range.</div></div></td><td valign="top" width="5%">
+ given range.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter-range.html b/doc/pages/cl-sphinx-search__fun__set-filter-range.html
index 5457652..12c7cb4 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter-range.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter-range.html
@@ -1,18 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter-range</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
+ set-filter-range</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter-range</tt> (<b>client</b>Â <b>attribute</b>Â <b>min</b>Â <b>max</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>min</tt> -- start of the range to filter on</li><li><tt>max</tt> -- end of the range to filter on</li><li><tt>exclude</tt> -- if set, exclude the given range</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given range.<br><br> <pre>
(set-filter-range client "filter_attr" 45 99)
(set-filter-range client "other_attr" 2 8 :exclude t) </pre><br><br>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <tt>min</tt> and <tt>max</tt> (including <tt>min</tt> and <tt>max</tt>)
will be returned.<br><br>
This may be called multiple times with different attributes to
select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that fall within the
- given range.</div></div></td><td valign="top" width="5%">
+ given range.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-filter.html b/doc/pages/cl-sphinx-search__fun__set-filter.html
index 66f211d..3b80d3a 100644
--- a/doc/pages/cl-sphinx-search__fun__set-filter.html
+++ b/doc/pages/cl-sphinx-search__fun__set-filter.html
@@ -1,17 +1,17 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-filter</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-filter</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter</tt> (<b>client</b>Â <b>attribute</b>Â <b>values-list</b>Â <b>&key</b>Â <b>exclude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>values-list</tt> -- the numeric values to filter on</li><li><tt>exclude</tt> -- if set, exclude the given values</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given attribute.<br><br> <pre>
+ set-filter</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-filter</tt> (<b>client</b>Â <b>attribute</b>Â <b>values-list</b>Â <b>&key</b>Â <b>exclude</b>Â <b>(exclude nil)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to filter on</li><li><tt>values-list</tt> -- the numeric values to filter on</li><li><tt>exclude</tt> -- if set, exclude the given values</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Sets the results to be filtered on the given attribute.<br><br> <pre>
(set-filter client "filter_attr" '(0 2 4 34 55 77))
(set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><br><br>
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.<br><br>
This may be called multiple times with different attributes to
- select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that match the filter.</div></div></td><td valign="top" width="5%">
+ select on multiple attributes.<br><br> If <tt>:exclude</tt> is set, excludes results that match the filter.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html b/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html
index ae1bf74..41a6bcb 100644
--- a/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html
+++ b/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html
@@ -1,19 +1,19 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-geo-anchor</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
set-geo-anchor</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-geo-anchor</tt> (<b>client</b>Â <b>latitude-attribute</b>Â <b>latitude</b>Â <b>longitude-attribute</b>Â <b>longitude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>latitude-attribute</tt> -- the latitude attribute name</li><li><tt>latitude</tt> -- latitude in radians</li><li><tt>longitude-attribute</tt> -- the longitude attribute name</li><li><tt>longitude</tt> -- longitude in radians</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Setup anchor point for geolocation.<br><br> <pre>
(set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><br><br>
Setup anchor point for using geosphere distance calculations in
filters and sorting. Distance will be computed with respect to
this point, and will be included in result output.<br><br>
To actually use this to filter on results a certain distance from
the anchor point, use something like:<br><br> <pre>
- (set-filter-float-range sph "geodist" 0 5000) </pre><br><br>
+ (set-filter-float-range sph "@geodist" 0 5000) </pre><br><br>
This will filter the results to be closer than 5 km from the anchor
- point.</div></div></td><td valign="top" width="5%">
+ point.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-group-by.html b/doc/pages/cl-sphinx-search__fun__set-group-by.html
index 8f7d6fe..afdb81e 100644
--- a/doc/pages/cl-sphinx-search__fun__set-group-by.html
+++ b/doc/pages/cl-sphinx-search__fun__set-group-by.html
@@ -1,35 +1,35 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-group-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-by</tt> (<b>client</b>Â <b>attribute</b>Â <b>function</b>Â <b>&optional</b>Â <b>group-sort</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute name to group by</li><li><tt>function</tt> -- the grouping function to use</li><li><tt>group-sort</tt> -- the sorting clause for group-by</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set grouping options.<br><br> <pre>
+ set-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-by</tt> (<b>client</b>Â <b>attribute</b>Â <b>function</b>Â <b>&optional</b>Â <b>group-sort</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute name to group by</li><li><tt>function</tt> -- the grouping function to use</li><li><tt>group-sort</tt> -- the sorting clause for group-by</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set grouping options.<br><br> <br><br> <pre>
(set-group-by client "whatever_attr" +sph-groupby-attr+ "group asc")
(set-group-by client "date_attr" +sph-groupby-day+) </pre><br><br>
Sets attribute and function of results grouping.<br><br>
In grouping mode, all matches are assigned to different groups based on
grouping function value. Each group keeps track of the total match
count, and the best match (in this group) according to current sorting
function. The final result set contains one best match per group, with
grouping function value and matches count attached.<br><br> <tt>attribute</tt> is any valid attribute. Use <a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>
to disable grouping.<br><br> <tt>function</tt> is one of:<br><br> <dl><dt>+sph-groupby-day+</dt><dd> : Group by day (assumes timestamp type attribute of form YYYYMMDD)</dd><dt>+sph-groupby-week+</dt><dd> : Group by week (assumes timestamp type attribute of form YYYYNNN)</dd><dt>+sph-groupby-month+</dt><dd> : Group by month (assumes timestamp type attribute of form YYYYMM)</dd><dt>+sph-groupby-year+</dt><dd> : Group by year (assumes timestamp type attribute of form YYYY)</dd><dt>+sph-groupby-attr+</dt><dd> : Group by attribute value</dd><dt>+sph-groupby-attrpair+</dt><dd> : Group by two attributes, being the given
attribute and the attribute that immediately follows it in the sequence
of indexed attributes. The specified attribute may therefore not be the last of the indexed attributes</dd></dl><br style="clear: both;"><br><br>
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal
- Sphinx attributes:<br><br> <dl><dt>id</dt><dd> : document ID</dd><dt>weight, rank, relevance</dt><dd> : match weight</dd><dt>group</dt><dd> : group by function value</dd><dt>count</dt><dd> : number of matches in group</dd></dl><br style="clear: both;"><br><br>
+ Sphinx attributes:<br><br> <dl><dt>@id</dt><dd> : document ID</dd><dt>@weight, @rank, @relevance</dt><dd> : match weight</dd><dt>@group</dt><dd> : group by function value</dd><dt>@count</dt><dd> : number of matches in group</dd></dl><br style="clear: both;"><br><br>
The default mode is to sort by group-by value in descending order,
- ie. by "group desc".<br><br> In the results set, <tt>total-found</tt> contains the total amount of
+ ie. by "@group desc".<br><br> In the results set, <tt>total-found</tt> contains the total amount of
matching groups over the whole index.<br><br>
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported in <tt>total-found</tt> than actually present. <tt>count</tt> might
also be underestimated.<br><br>
For example, if sorting by relevance and grouping by a "published"
attribute with +sph-groupby-day+ function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).</div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-group-distinct.html b/doc/pages/cl-sphinx-search__fun__set-group-distinct.html
index 1649693..d045555 100644
--- a/doc/pages/cl-sphinx-search__fun__set-group-distinct.html
+++ b/doc/pages/cl-sphinx-search__fun__set-group-distinct.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-group-distinct</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-group-distinct</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-distinct</tt> (<b>client</b>Â <b>attribute</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to use for count-distinct queries</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set count-distinct attribute for group-by queries.</div></div></td><td valign="top" width="5%">
+ set-group-distinct</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-distinct</tt> (<b>client</b>Â <b>attribute</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to use for count-distinct queries</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set count-distinct attribute for group-by queries.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-limits.html b/doc/pages/cl-sphinx-search__fun__set-limits.html
index 3524f0a..37b777d 100644
--- a/doc/pages/cl-sphinx-search__fun__set-limits.html
+++ b/doc/pages/cl-sphinx-search__fun__set-limits.html
@@ -1,14 +1,14 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-limits</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-limits</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-limits</tt> (<b>client</b>Â <b>&key</b>Â <b>offset</b>Â <b>limit</b>Â <b>max</b>Â <b>cutoff</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>offset</tt> -- the offset to start returning matches from</li><li><tt>limit</tt> -- how many matches to return starting from <tt>offset</tt></li><li><tt>max</tt> -- maximum number of matches to return</li><li><tt>cutoff</tt> -- the cutoff to stop searching at</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the offset, limit, cutoff and max matches to return.<br><br> <pre>
+ set-limits</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-limits</tt> (<b>client</b>Â <b>&key</b>Â <b>offset</b>Â <b>limit</b>Â <b>max</b>Â <b>cutoff</b>Â <b>(offset 0)</b>Â <b>(max 1000)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>offset</tt> -- the offset to start returning matches from</li><li><tt>limit</tt> -- how many matches to return starting from <tt>offset</tt></li><li><tt>max</tt> -- maximum number of matches to return</li><li><tt>cutoff</tt> -- the cutoff to stop searching at</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the offset, limit, cutoff and max matches to return.<br><br> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><br><br>
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-server.html b/doc/pages/cl-sphinx-search__fun__set-server.html
index 0e0922d..e69a5fb 100644
--- a/doc/pages/cl-sphinx-search__fun__set-server.html
+++ b/doc/pages/cl-sphinx-search__fun__set-server.html
@@ -1,15 +1,15 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-server</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- set-server</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-server</tt> (<b>client</b>Â <b>&key</b>Â <b>host</b>Â <b>port</b>Â <b>path</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>host</tt> -- the host to connect to when using an INET socket</li><li><tt>port</tt> -- the port to connect to when using an INET socket</li><li><tt>path</tt> -- the path to the unix domain socket when not using INET</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the server host:port or path to connect to.<br><br> <pre>
+ set-server</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-server</tt> (<b>client</b>Â <b>&key</b>Â <b>host</b>Â <b>port</b>Â <b>path</b>Â <b>(host localhost)</b>Â <b>(port 3312)</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>host</tt> -- the host to connect to when using an INET socket</li><li><tt>port</tt> -- the port to connect to when using an INET socket</li><li><tt>path</tt> -- the path to the unix domain socket when not using INET</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the server host:port or path to connect to.<br><br> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><br><br> In the first form, sets the <tt>host</tt> (string) and <tt>port</tt> (integer)
details for the searchd server using a network (INET) socket.<br><br> In the second form, where <tt>unix-path</tt> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
|
thijs/cl-sphinx-search
|
617180d7780d9cddfbdbb10d820aee9108a7a0fe
|
Add a number of methods; nearing workable state
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index c6a4f81..8e5d381 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,968 +1,1169 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
- (anchor
- :accessor anchor
- :initarg :anchor
+ (geo-anchor
+ :accessor geo-anchor
+ :initarg :geo-anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
- :initform (make-hash-table)
- :documentation "per-query attribute values overrides")
+ :initform ()
+ :documentation "per-query attribute value overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{query}
@see{add-query}
@see{run-queries}
@see{last-error}
@see{last-warning}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
"))
(defmethod set-filter ((client sphinx-client) attr values &key (exclude ()))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
(push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
-;; (let ((filter (make-hash-table)))
-;; (setf (gethash 'type filter) +sph-filter-values+)
-;; (setf (gethash 'attr filter) attr)
-;; (setf (gethash 'values filter) values)
-;; (setf (gethash 'exclude filter) (cond (exclude 1)
-;; (t 0)))
-;; (push filter (filters client))
-;; client))
-
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
-;; (assert (and (numberp min) (numberp max) (>= max min)))
-;; (let ((filter (make-hash-table)))
-;; (setf (gethash 'type filter) +sph-filter-range+)
-;; (setf (gethash 'attr filter) attr)
-;; (setf (gethash 'min filter) min)
-;; (setf (gethash 'max filter) max)
-;; (setf (gethash 'exclude filter) (cond (exclude 1)
-;; (t 0)))
-;; (push filter (filters client))
-;; client))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude ()))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
-;; (let ((filter (make-hash-table)))
-;; (setf (gethash 'type filter) type)
-;; (setf (gethash 'attr filter) attr)
-;; (setf (gethash 'min filter) min)
-;; (setf (gethash 'max filter) max)
-;; (setf (gethash 'exclude filter) (cond (exclude 1)
-;; (t 0)))
-;; (push filter (filters client))
-;; client))
-
-;; (defgeneric (client )
-;; (:documentation
-;; "@arg[client]{a @class{sphinx-client}}
-;; @arg[]{}
-;; @return{}
-;; @short{.}
-
-;; .
-;; "))
-
-;; (defmethod ((client sphinx-client) )
-;; )
+
+(defgeneric set-geo-anchor (client latitude-attribute latitude longitude-attribute longitude)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[latitude-attribute]{the latitude attribute name}
+ @arg[latitude]{latitude in radians}
+ @arg[longitude-attribute]{the longitude attribute name}
+ @arg[longitude]{longitude in radians}
+ @return{client}
+ @short{Setup anchor point for geolocation.}
+
+ @begin{pre}
+ (set-geo-anchor client \"latitude_attr\" 45.231 \"longitude_attribute\" 4.5)
+ @end{pre}
+
+ Setup anchor point for using geosphere distance calculations in
+ filters and sorting. Distance will be computed with respect to
+ this point, and will be included in result output.
+
+ To actually use this to filter on results a certain distance from
+ the anchor point, use something like:
+
+ @begin{pre}
+ (set-filter-float-range sph \"geodist\" 0 5000)
+ @end{pre}
+
+ This will filter the results to be closer than 5 km from the anchor
+ point.
+"))
+
+(defmethod set-geo-anchor ((client sphinx-client) lat-attr lat lon-attr lon)
+ (assert (and (stringp lat-attr) (stringp lon-attr) (numberp lat) (numberp lon)))
+ (setf (geo-anchor client) (list lat-attr lat lon-attr lon))
+ client)
+
+
+(defgeneric set-group-by (client attribute function &optional group-sort)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute name to group by}
+ @arg[function]{the grouping function to use}
+ @arg[group-sort]{the sorting clause for group-by}
+ @return{client}
+ @short{Set grouping options.}
+
+ @begin{pre}
+ (set-group-by client \"whatever_attr\" +sph-groupby-attr+ \"group asc\")
+ (set-group-by client \"date_attr\" +sph-groupby-day+)
+ @end{pre}
+
+ Sets attribute and function of results grouping.
+
+ In grouping mode, all matches are assigned to different groups based on
+ grouping function value. Each group keeps track of the total match
+ count, and the best match (in this group) according to current sorting
+ function. The final result set contains one best match per group, with
+ grouping function value and matches count attached.
+
+ @code{attribute} is any valid attribute. Use @fun{reset-group-by}
+ to disable grouping.
+
+ @code{function} is one of:
+
+ @begin{dl}
+ @dt[+sph-groupby-day+]{Group by day (assumes timestamp type attribute
+ of form YYYYMMDD)}
+ @dt[+sph-groupby-week+]{Group by week (assumes timestamp type attribute
+ of form YYYYNNN)}
+ @dt[+sph-groupby-month+]{Group by month (assumes timestamp type
+ attribute of form YYYYMM)}
+ @dt[+sph-groupby-year+]{Group by year (assumes timestamp type attribute
+ of form YYYY)}
+ @dt[+sph-groupby-attr+]{Group by attribute value}
+ @dt[+sph-groupby-attrpair+]{Group by two attributes, being the given
+ attribute and the attribute that immediately follows it in the sequence
+ of indexed attributes. The specified attribute may therefore not be the
+ last of the indexed attributes}
+ @end{dl}
+
+ Groups in the set of results can be sorted by any SQL-like sorting clause,
+ including both document attributes and the following special internal
+ Sphinx attributes:
+
+ @begin{dl}
+ @dt[id]{document ID}
+ @dt[weight, rank, relevance]{match weight}
+ @dt[group]{group by function value}
+ @dt[count]{number of matches in group}
+ @end{dl}
+
+ The default mode is to sort by group-by value in descending order,
+ ie. by \"group desc\".
+
+ In the results set, @code{total-found} contains the total amount of
+ matching groups over the whole index.
+
+ WARNING: grouping is done in fixed memory and thus its results
+ are only approximate; so there might be more groups reported
+ in @code{total-found} than actually present. @code{count} might
+ also be underestimated.
+
+ For example, if sorting by relevance and grouping by a \"published\"
+ attribute with +sph-groupby-day+ function, then the result set will
+ contain only the most relevant match for each day when there were any
+ matches published, with day number and per-day match count attached,
+ and sorted by day number in descending order (ie. recent days first).
+"))
+
+(defmethod set-group-by ((client sphinx-client) attr func &optional sort)
+ (assert (and (stringp attr) (stringp sort) (find func +sph-sort-functions+)))
+ (setf (group-by client) attr)
+ (setf (group-function client) func)
+ (setf (group-sort client) sort)
+ client)
+
+
+(defgeneric set-group-distinct (client attribute)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute to use for count-distinct queries}
+ @return{client}
+ @short{Set count-distinct attribute for group-by queries.}
+"))
+
+(defmethod set-group-distinct ((client sphinx-client) attribute)
+ (assert (stringp attribute))
+ (setf (group-distinct client) attribute)
+ client)
+
+
+(defgeneric set-override (client attribute type values)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute to override}
+ @arg[type]{the attribute type as defined in Sphinx config}
+ @arg[values]{an alist mapping document IDs to attribute values}
+ @return{client}
+ @short{Set attribute values overrides.}
+
+ There can be only one override per attribute.
+
+ @code{values} must be an alist that maps document IDs to attribute
+ values.
+
+ @begin{pre}
+ (set-override client \"test_attr\" +sph-attr-integer+ '((4314 . 3) (2443 . 2)))
+ @end{pre}
+
+ In the example above, for the document with ID 4314, Sphinx will see an
+ attribute value for the @code{attribute} called 'test_attr' of 3. And
+ for the document with ID 2443 it will see 2, while the rest will be what
+ it was when the indexer was last run.
+"))
+
+(defmethod set-override ((client sphinx-client) attribute type values)
+ (assert (and (stringp attribute) (find type +sph-attr-types+) (listp values)))
+ (push (cons attribute values) (overrides client))
+ client)
+
+
+(defgeneric set-select (client select)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[select]{the select string}
+ @return{client}
+ @short{Set the select clause.}
+
+ Sets the select clause, listing specific attributes to fetch, and
+ expressions to compute and fetch. Clause syntax mimics SQL.
+
+ The select clause is very similar to the part of a typical SQL query
+ between @code{SELECT} and @code{FROM}. It lets you choose what
+ attributes (columns) to fetch, and also what expressions over the
+ columns to compute and fetch. A difference from SQL is that expressions
+ must always be aliased to a correct identifier (consisting of letters
+ and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
+ computation results can be returned under a 'normal' name in the result
+ set, used in other clauses, etc.
+
+ Everything else is basically identical to SQL. Star ('*') is supported.
+ Functions are supported. Arbitrary amount of expressions is supported.
+ Computed expressions can be used for sorting, filtering, and grouping,
+ just as the regular attributes.
+
+ Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
+ using GROUP BY.
+
+ Examples:
+
+ @begin{pre}
+ (set-select sph \"*, (user_karma+ln(pageviews))*0.1 AS myweight\" )
+ (set-select sph \"exp_years, salary_gbp*{$gbp_usd_rate@} AS salary_usd, IF(age>40,1,0) AS over40\" )
+ (set-select sph \"*, AVG(price) AS avgprice\" )
+ @end{pre}
+"))
+
+(defmethod set-select ((client sphinx-client) select)
+ (assert (stringp select))
+ (setf (select client) select)
+ client)
+
+
+(defgeneric reset-filters (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{client}
+ @short{Reset the filters.}
+
+ Clear all filters, including the geolocation anchor point.
+"))
+
+(defmethod reset-filters ((client sphinx-client))
+ (setf (filters client) ())
+ (setf (geo-anchor client) ())
+ client)
+
+
+(defgeneric reset-group-by (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{client}
+ @short{Clear all the group-by settings.}
+"))
+
+(defmethod reset-group-by ((client sphinx-client))
+ (setf (group-by client) "")
+ (setf (group-function client) +sph-groupby-day+)
+ (setf (group-sort client) "@group desc")
+ (setf (group-distinct client) "")
+ client)
+
+
+(defgeneric reset-overrides (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{client}
+ @short{Clear all attribute value overrides.}
+"))
+
+(defmethod reset-overrides ((client sphinx-client))
+ (setf (overrides client) ())
+ client)
+
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
- (cond ((anchor client)
+ (cond ((geo-anchor client)
(concatenate 'string
- (pack "N/a*" (first (anchor client)))
- (pack "N/a*" (third (anchor client)))
- (%pack-float (second (anchor client)))
- (%pack-float (fourth (anchor client)))))
+ (pack "N/a*" (first (geo-anchor client)))
+ (pack "N/a*" (third (geo-anchor client)))
+ (%pack-float (second (geo-anchor client)))
+ (%pack-float (fourth (geo-anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
#+SPHINX-SEARCH-DEBUG (format t "socket is: ~a~%" (%socket client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(setf (%socket client) ())
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(with-output-to-string (packed-filters)
(dolist (filter filters)
(let ((type (first filter))
(attr (second filter))
(last-el 3))
(format packed-filters "~a~a~a~a"
(pack "N/a*" attr)
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-list-signed-quads (third filter)))
((eql type +sph-filter-range+)
(concatenate 'string
(pack "q>" (third filter))
(pack "q>" (fourth filter)))
(incf last-el))
((eql type +sph-filter-floatrange+)
(concatenate 'string
(%pack-float (third filter))
(%pack-float (fourth filter)))
(incf last-el))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (nth last-el filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(with-output-to-string (packed-list)
(format packed-list "~a" (pack "N" (length values-list)))
(dolist (value values-list)
(format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
-
diff --git a/constants.lisp b/constants.lisp
index c974ad9..8ec0dcf 100644
--- a/constants.lisp
+++ b/constants.lisp
@@ -1,97 +1,104 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
;; known searchd commands
(defconstant +searchd-command-search+ 0)
(defconstant +searchd-command-excerpt+ 1)
(defconstant +searchd-command-update+ 2)
(defconstant +searchd-command-keywords+ 3)
(defconstant +searchd-command-persist+ 4)
;; current client-side command implementation versions
(defconstant +ver-command-search+ #x116)
(defconstant +ver-command-excerpt+ #x100)
(defconstant +ver-command-update+ #x101)
(defconstant +ver-command-keywords+ #x100)
;; known searchd status codes
(defconstant +searchd-ok+ 0)
(defconstant +searchd-error+ 1)
(defconstant +searchd-retry+ 2)
(defconstant +searchd-warning+ 3)
;; known match modes
(defconstant +sph-match-all+ 0)
(defconstant +sph-match-any+ 1)
(defconstant +sph-match-phrase+ 2)
(defconstant +sph-match-boolean+ 3)
(defconstant +sph-match-extended+ 4)
(defconstant +sph-match-fullscan+ 5)
(defconstant +sph-match-extended2+ 6)
;; known ranking modes (extended2 mode only)
(defconstant +sph-rank-proximity-bm25+ 0) ;; default mode, phrase proximity major factor and BM25 minor one
(defconstant +sph-rank-bm25+ 1) ;; statistical mode, BM25 ranking only (faster but worse quality)
(defconstant +sph-rank-none+ 2) ;; no ranking, all matches get a weight of 1
(defconstant +sph-rank-wordcount+ 3) ;; simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
;; known sort modes
(defconstant +sph-sort-relevance+ 0)
(defconstant +sph-sort-attr-desc+ 1)
(defconstant +sph-sort-attr-asc+ 2)
(defconstant +sph-sort-time-segments+ 3)
(defconstant +sph-sort-extended+ 4)
(defconstant +sph-sort-expr+ 5)
;; known filter types
(defconstant +sph-filter-values+ 0)
(defconstant +sph-filter-range+ 1)
(defconstant +sph-filter-floatrange+ 2)
;; known attribute types
(defconstant +sph-attr-none+ 0)
(defconstant +sph-attr-integer+ 1)
(defconstant +sph-attr-timestamp+ 2)
(defconstant +sph-attr-ordinal+ 3)
(defconstant +sph-attr-bool+ 4)
(defconstant +sph-attr-float+ 5)
(defconstant +sph-attr-bigint+ 6)
;; SPH_ATTR_MULTI = 0X40000000L
(defconstant +sph-attr-multi+ #x40000000)
;; SPH_ATTR_TYPES = (SPH_ATTR_NONE,
;; SPH_ATTR_INTEGER,
;; SPH_ATTR_TIMESTAMP,
;; SPH_ATTR_ORDINAL,
;; SPH_ATTR_BOOL,
;; SPH_ATTR_FLOAT,
;; SPH_ATTR_BIGINT,
;; SPH_ATTR_MULTI)
(defmacro define-constant (name value &optional doc)
`(defconstant ,name (if (boundp ',name) (symbol-value ',name) ,value)
,@(when doc (list doc))))
(define-constant +sph-attr-types+ (list +sph-attr-none+
+sph-attr-integer+
+sph-attr-timestamp+
+sph-attr-ordinal+
+sph-attr-bool+
+sph-attr-float+
+sph-attr-bigint+
+sph-attr-multi+))
;; known grouping functions
(defconstant +sph-groupby-day+ 0)
(defconstant +sph-groupby-week+ 1)
(defconstant +sph-groupby-month+ 2)
(defconstant +sph-groupby-year+ 3)
(defconstant +sph-groupby-attr+ 4)
(defconstant +sph-groupby-attrpair+ 5)
+(define-constant +sph-sort-functions+ (list +sph-groupby-day+
+ +sph-groupby-week+
+ +sph-groupby-month+
+ +sph-groupby-year+
+ +sph-groupby-attr+
+ +sph-groupby-attrpair+))
+
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index cc1e640..fe29559 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,88 +1,148 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<break/> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><break/>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/> This documentation was generated by <a a="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<break/> </section><break/></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ the documentation generation system written by David Lichteblau.<break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__reset-group-by" name="reset-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all the group-by settings.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-id-range" name="set-id-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>min</elt><elt>max</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="min">minimum id to start searching from</arg> <arg arg="max">maximum id to stop searching at</arg> <return>client</return> <short>Set the id-range to search within (inclusive).</short><break/>
+ Set the range of id's within which to search. Range is inclusive, so setting
+ [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-geo-anchor" name="set-geo-anchor" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>latitude-attribute</elt><elt>latitude</elt><elt>longitude-attribute</elt><elt>longitude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="latitude-attribute">the latitude attribute name</arg> <arg arg="latitude">latitude in radians</arg> <arg arg="longitude-attribute">the longitude attribute name</arg> <arg arg="longitude">longitude in radians</arg> <return>client</return> <short>Setup anchor point for geolocation.</short><break/> <pre>
+ (set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><break/>
+ Setup anchor point for using geosphere distance calculations in
+ filters and sorting. Distance will be computed with respect to
+ this point, and will be included in result output.<break/>
+ To actually use this to filter on results a certain distance from
+ the anchor point, use something like:<break/> <pre>
+ (set-filter-float-range sph "geodist" 0 5000) </pre><break/>
+ This will filter the results to be closer than 5 km from the anchor
+ point.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-float-range" name="set-filter-float-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-float-range client "filter_attr" 45.231 99)
(set-filter-float-range client "other_attr" 1.32 55.0031 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
- given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
- (set-server client :host host :port port)
- (set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
- details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
- (optionally prefixed by 'unix://'), sets the client to access the
- searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
+ given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
+ (set-filter client "filter_attr" '(0 2 4 34 55 77))
+ (set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><break/>
+ Sets the results to be filtered on the given attribute. Only
+ results which have attributes matching the given (numeric)
+ values will be returned.<break/>
+ This may be called multiple times with different attributes to
+ select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that match the filter.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
(query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
- It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
- (set-limits client :limit limit)
- (set-limits client :offset offset :limit limit)
- (set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
- Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
+ It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
It returns a list of hashes containing the result of each query. Each hash
- has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
- Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
+ has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-filters" name="reset-filters" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Reset the filters.</short><break/>
+ Clear all filters, including the geolocation anchor point.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
- of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
+ of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
+ Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__reset-overrides" name="reset-overrides" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>client</return> <short>Clear all attribute value overrides.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
+ Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
+ (set-server client :host host :port port)
+ (set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
+ details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
+ (optionally prefixed by 'unix://'), sets the client to access the
+ searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter-range" name="set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="min">start of the range to filter on</arg> <arg arg="max">end of the range to filter on</arg> <arg arg="exclude">if set, exclude the given range</arg> <return>client</return> <short>Sets the results to be filtered on the given range.</short><break/> <pre>
(set-filter-range client "filter_attr" 45 99)
(set-filter-range client "other_attr" 2 8 :exclude t) </pre><break/>
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between <code>min</code> and <code>max</code> (including <code>min</code> and <code>max</code>)
will be returned.<break/>
This may be called multiple times with different attributes to
select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that fall within the
- given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
- Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
- Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-id-range" name="set-id-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>min</elt><elt>max</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="min">minimum id to start searching from</arg> <arg arg="max">maximum id to stop searching at</arg> <return>client</return> <short>Set the id-range to search within (inclusive).</short><break/>
- Set the range of id's within which to search. Range is inclusive, so setting
- [0, 450] both 0 and 450 id's will be found.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-filter" name="set-filter" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>values-list</elt><elt>&key</elt><elt>exclude</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to filter on</arg> <arg arg="values-list">the numeric values to filter on</arg> <arg arg="exclude">if set, exclude the given values</arg> <return>client</return> <short>Sets the results to be filtered on the given attribute.</short><break/> <pre>
- (set-filter client "filter_attr" '(0 2 4 34 55 77))
- (set-filter client "other_attr" '(8 4 2 11) :exclude t) </pre><break/>
- Sets the results to be filtered on the given attribute. Only
- results which have attributes matching the given (numeric)
- values will be returned.<break/>
- This may be called multiple times with different attributes to
- select on multiple attributes.<break/> If <code>:exclude</code> is set, excludes results that match the filter.</documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
+ given range.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
+ Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-by" name="set-group-by" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>function</elt><elt>&optional</elt><elt>group-sort</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute name to group by</arg> <arg arg="function">the grouping function to use</arg> <arg arg="group-sort">the sorting clause for group-by</arg> <return>client</return> <short>Set grouping options.</short><break/> <pre>
+ (set-group-by client "whatever_attr" +sph-groupby-attr+ "group asc")
+ (set-group-by client "date_attr" +sph-groupby-day+) </pre><break/>
+ Sets attribute and function of results grouping.<break/>
+ In grouping mode, all matches are assigned to different groups based on
+ grouping function value. Each group keeps track of the total match
+ count, and the best match (in this group) according to current sorting
+ function. The final result set contains one best match per group, with
+ grouping function value and matches count attached.<break/> <code>attribute</code> is any valid attribute. Use <fun id="cl-sphinx-search__fun__reset-group-by">reset-group-by</fun>
+ to disable grouping.<break/> <code>function</code> is one of:<break/> <dl> <dt dt="+sph-groupby-day+">Group by day (assumes timestamp type attribute of form YYYYMMDD)</dt> <dt dt="+sph-groupby-week+">Group by week (assumes timestamp type attribute of form YYYYNNN)</dt> <dt dt="+sph-groupby-month+">Group by month (assumes timestamp type attribute of form YYYYMM)</dt> <dt dt="+sph-groupby-year+">Group by year (assumes timestamp type attribute of form YYYY)</dt> <dt dt="+sph-groupby-attr+">Group by attribute value</dt> <dt dt="+sph-groupby-attrpair+">Group by two attributes, being the given
+ attribute and the attribute that immediately follows it in the sequence
+ of indexed attributes. The specified attribute may therefore not be the last of the indexed attributes</dt> </dl><break/>
+ Groups in the set of results can be sorted by any SQL-like sorting clause,
+ including both document attributes and the following special internal
+ Sphinx attributes:<break/> <dl> <dt dt="id">document ID</dt> <dt dt="weight, rank, relevance">match weight</dt> <dt dt="group">group by function value</dt> <dt dt="count">number of matches in group</dt> </dl><break/>
+ The default mode is to sort by group-by value in descending order,
+ ie. by "group desc".<break/> In the results set, <code>total-found</code> contains the total amount of
+ matching groups over the whole index.<break/>
+ WARNING: grouping is done in fixed memory and thus its results
+ are only approximate; so there might be more groups reported in <code>total-found</code> than actually present. <code>count</code> might
+ also be underestimated.<break/>
+ For example, if sorting by relevance and grouping by a "published"
+ attribute with +sph-groupby-day+ function, then the result set will
+ contain only the most relevant match for each day when there were any
+ matches published, with day number and per-day match count attached,
+ and sorted by day number in descending order (ie. recent days first).</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-group-distinct" name="set-group-distinct" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to use for count-distinct queries</arg> <return>client</return> <short>Set count-distinct attribute for group-by queries.</short></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
+ (set-limits client :limit limit)
+ (set-limits client :offset offset :limit limit)
+ (set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-select" name="set-select" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>select</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="select">the select string</arg> <return>client</return> <short>Set the select clause.</short><break/>
+ Sets the select clause, listing specific attributes to fetch, and
+ expressions to compute and fetch. Clause syntax mimics SQL.<break/>
+ The select clause is very similar to the part of a typical SQL query between <code>SELECT</code> and <code>FROM</code>. It lets you choose what
+ attributes (columns) to fetch, and also what expressions over the
+ columns to compute and fetch. A difference from SQL is that expressions
+ must always be aliased to a correct identifier (consisting of letters
+ and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
+ computation results can be returned under a 'normal' name in the result
+ set, used in other clauses, etc.<break/>
+ Everything else is basically identical to SQL. Star ('*') is supported.
+ Functions are supported. Arbitrary amount of expressions is supported.
+ Computed expressions can be used for sorting, filtering, and grouping,
+ just as the regular attributes.<break/>
+ Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
+ using GROUP BY.<break/>
+ Examples:<break/> <pre>
+ (set-select sph "*, (user_karma+ln(pageviews))*0.1 AS myweight" )
+ (set-select sph "exp_years, salary_gbp*{$gbp_usd_rate} AS salary_usd, IF(age>40,1,0) AS over40" )
+ (set-select sph "*, AVG(price) AS avgprice" ) </pre></documentation-string></function-definition></external-symbols><internal-symbols><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___set-filter-range" name="%set-filter-range" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>type</elt><elt>attr</elt><elt>min</elt><elt>max</elt><elt>&key</elt><elt>exclude</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-functions+" name="+sph-sort-functions+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see> <see id="cl-sphinx-search__fun__last-error">last-error</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see> <see id="cl-sphinx-search__fun__set-id-range">set-id-range</see> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__max-query-time">max-query-time</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__match-mode" name="match-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-list-signed-quads" name="%pack-list-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__rank-mode" name="rank-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see> <see id="cl-sphinx-search__fun__last-error">last-error</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see> <see id="cl-sphinx-search__fun__set-id-range">set-id-range</see> <see id="cl-sphinx-search__fun__set-filter">set-filter</see> <see id="cl-sphinx-search__fun__set-filter-range">set-filter-range</see> <see id="cl-sphinx-search__fun__set-filter-float-range">set-filter-float-range</see> <see id="cl-sphinx-search__fun__max-query-time">max-query-time</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__geo-anchor" name="geo-anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__match-mode" name="match-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-list-signed-quads" name="%pack-list-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__rank-mode" name="rank-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__set-override" name="set-override" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>attribute</elt><elt>type</elt><elt>values</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="attribute">the attribute to override</arg> <arg arg="type">the attribute type as defined in Sphinx config</arg> <arg arg="values">an alist mapping document IDs to attribute values</arg> <return>client</return> <short>Set attribute values overrides.</short><break/>
+ There can be only one override per attribute.<break/> <code>values</code> must be an alist that maps document IDs to attribute
+ values.<break/> <pre>
+ (set-override client "test_attr" +sph-attr-integer+ '((4314 . 3) (2443 . 2))) </pre><break/>
+ In the example above, for the document with ID 4314, Sphinx will see an attribute value for the <code>attribute</code> called 'test_attr' of 3. And
+ for the document with ID 2443 it will see 2, while the rest will be what
+ it was when the indexer was last run.</documentation-string></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/index.html b/doc/index.html
index 64836c1..7be880c 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -1,8 +1,8 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Sphinx Search API reference</title><link rel="stylesheet" type="text/css" href="index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded">
Index of packages:
</div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><h2 class="page-title"><a href="pages/cl-sphinx-search.html">
Package
- cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index 4a9b87c..2306aa2 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,33 +1,33 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
- cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
+ cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> </div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<br><br> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><br><br>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br> This documentation was generated by <a href="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-filters.html"><tt>reset-filters</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__reset-overrides.html"><tt>reset-overrides</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter.html"><tt>set-filter</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-float-range.html"><tt>set-filter-float-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-filter-range.html"><tt>set-filter-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-geo-anchor.html"><tt>set-geo-anchor</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-by.html"><tt>set-group-by</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-group-distinct.html"><tt>set-group-distinct</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-id-range.html"><tt>set-id-range</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-select.html"><tt>set-select</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__geo-anchor.html b/doc/pages/cl-sphinx-search__fun__geo-anchor.html
new file mode 100644
index 0000000..b04dafa
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__geo-anchor.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function geo-anchor</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ geo-anchor</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>geo-anchor</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__reset-filters.html b/doc/pages/cl-sphinx-search__fun__reset-filters.html
new file mode 100644
index 0000000..481ae0b
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__reset-filters.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reset-filters</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ reset-filters</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-filters</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Reset the filters.<br><br>
+ Clear all filters, including the geolocation anchor point.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__reset-group-by.html b/doc/pages/cl-sphinx-search__fun__reset-group-by.html
new file mode 100644
index 0000000..bfdaa15
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__reset-group-by.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reset-group-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ reset-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-group-by</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Clear all the group-by settings.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__reset-overrides.html b/doc/pages/cl-sphinx-search__fun__reset-overrides.html
new file mode 100644
index 0000000..3dd98bf
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__reset-overrides.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reset-overrides</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ reset-overrides</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reset-overrides</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Clear all attribute value overrides.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html b/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html
new file mode 100644
index 0000000..ae1bf74
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__set-geo-anchor.html
@@ -0,0 +1,19 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-geo-anchor</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ set-geo-anchor</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-geo-anchor</tt> (<b>client</b>Â <b>latitude-attribute</b>Â <b>latitude</b>Â <b>longitude-attribute</b>Â <b>longitude</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>latitude-attribute</tt> -- the latitude attribute name</li><li><tt>latitude</tt> -- latitude in radians</li><li><tt>longitude-attribute</tt> -- the longitude attribute name</li><li><tt>longitude</tt> -- longitude in radians</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Setup anchor point for geolocation.<br><br> <pre>
+ (set-geo-anchor client "latitude_attr" 45.231 "longitude_attribute" 4.5) </pre><br><br>
+ Setup anchor point for using geosphere distance calculations in
+ filters and sorting. Distance will be computed with respect to
+ this point, and will be included in result output.<br><br>
+ To actually use this to filter on results a certain distance from
+ the anchor point, use something like:<br><br> <pre>
+ (set-filter-float-range sph "geodist" 0 5000) </pre><br><br>
+ This will filter the results to be closer than 5 km from the anchor
+ point.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-group-by.html b/doc/pages/cl-sphinx-search__fun__set-group-by.html
new file mode 100644
index 0000000..8f7d6fe
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__set-group-by.html
@@ -0,0 +1,35 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-group-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ set-group-by</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-by</tt> (<b>client</b>Â <b>attribute</b>Â <b>function</b>Â <b>&optional</b>Â <b>group-sort</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute name to group by</li><li><tt>function</tt> -- the grouping function to use</li><li><tt>group-sort</tt> -- the sorting clause for group-by</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set grouping options.<br><br> <pre>
+ (set-group-by client "whatever_attr" +sph-groupby-attr+ "group asc")
+ (set-group-by client "date_attr" +sph-groupby-day+) </pre><br><br>
+ Sets attribute and function of results grouping.<br><br>
+ In grouping mode, all matches are assigned to different groups based on
+ grouping function value. Each group keeps track of the total match
+ count, and the best match (in this group) according to current sorting
+ function. The final result set contains one best match per group, with
+ grouping function value and matches count attached.<br><br> <tt>attribute</tt> is any valid attribute. Use <a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a>
+ to disable grouping.<br><br> <tt>function</tt> is one of:<br><br> <dl><dt>+sph-groupby-day+</dt><dd> : Group by day (assumes timestamp type attribute of form YYYYMMDD)</dd><dt>+sph-groupby-week+</dt><dd> : Group by week (assumes timestamp type attribute of form YYYYNNN)</dd><dt>+sph-groupby-month+</dt><dd> : Group by month (assumes timestamp type attribute of form YYYYMM)</dd><dt>+sph-groupby-year+</dt><dd> : Group by year (assumes timestamp type attribute of form YYYY)</dd><dt>+sph-groupby-attr+</dt><dd> : Group by attribute value</dd><dt>+sph-groupby-attrpair+</dt><dd> : Group by two attributes, being the given
+ attribute and the attribute that immediately follows it in the sequence
+ of indexed attributes. The specified attribute may therefore not be the last of the indexed attributes</dd></dl><br style="clear: both;"><br><br>
+ Groups in the set of results can be sorted by any SQL-like sorting clause,
+ including both document attributes and the following special internal
+ Sphinx attributes:<br><br> <dl><dt>id</dt><dd> : document ID</dd><dt>weight, rank, relevance</dt><dd> : match weight</dd><dt>group</dt><dd> : group by function value</dd><dt>count</dt><dd> : number of matches in group</dd></dl><br style="clear: both;"><br><br>
+ The default mode is to sort by group-by value in descending order,
+ ie. by "group desc".<br><br> In the results set, <tt>total-found</tt> contains the total amount of
+ matching groups over the whole index.<br><br>
+ WARNING: grouping is done in fixed memory and thus its results
+ are only approximate; so there might be more groups reported in <tt>total-found</tt> than actually present. <tt>count</tt> might
+ also be underestimated.<br><br>
+ For example, if sorting by relevance and grouping by a "published"
+ attribute with +sph-groupby-day+ function, then the result set will
+ contain only the most relevant match for each day when there were any
+ matches published, with day number and per-day match count attached,
+ and sorted by day number in descending order (ie. recent days first).</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__reset-group-by.html"><tt>reset-group-by</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-group-distinct.html b/doc/pages/cl-sphinx-search__fun__set-group-distinct.html
new file mode 100644
index 0000000..1649693
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__set-group-distinct.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-group-distinct</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ set-group-distinct</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-group-distinct</tt> (<b>client</b>Â <b>attribute</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to use for count-distinct queries</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set count-distinct attribute for group-by queries.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-override.html b/doc/pages/cl-sphinx-search__fun__set-override.html
new file mode 100644
index 0000000..c1d88d9
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__set-override.html
@@ -0,0 +1,16 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-override</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ set-override</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-override</tt> (<b>client</b>Â <b>attribute</b>Â <b>type</b>Â <b>values</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>attribute</tt> -- the attribute to override</li><li><tt>type</tt> -- the attribute type as defined in Sphinx config</li><li><tt>values</tt> -- an alist mapping document IDs to attribute values</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set attribute values overrides.<br><br>
+ There can be only one override per attribute.<br><br> <tt>values</tt> must be an alist that maps document IDs to attribute
+ values.<br><br> <pre>
+ (set-override client "test_attr" +sph-attr-integer+ '((4314 . 3) (2443 . 2))) </pre><br><br>
+ In the example above, for the document with ID 4314, Sphinx will see an attribute value for the <tt>attribute</tt> called 'test_attr' of 3. And
+ for the document with ID 2443 it will see 2, while the rest will be what
+ it was when the indexer was last run.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__set-select.html b/doc/pages/cl-sphinx-search__fun__set-select.html
new file mode 100644
index 0000000..3ecb01f
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__set-select.html
@@ -0,0 +1,29 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-select</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ set-select</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-select</tt> (<b>client</b>Â <b>select</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>select</tt> -- the select string</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the select clause.<br><br>
+ Sets the select clause, listing specific attributes to fetch, and
+ expressions to compute and fetch. Clause syntax mimics SQL.<br><br>
+ The select clause is very similar to the part of a typical SQL query between <tt>SELECT</tt> and <tt>FROM</tt>. It lets you choose what
+ attributes (columns) to fetch, and also what expressions over the
+ columns to compute and fetch. A difference from SQL is that expressions
+ must always be aliased to a correct identifier (consisting of letters
+ and digits) using the 'AS' keyword. Sphinx enforces aliases so that the
+ computation results can be returned under a 'normal' name in the result
+ set, used in other clauses, etc.<br><br>
+ Everything else is basically identical to SQL. Star ('*') is supported.
+ Functions are supported. Arbitrary amount of expressions is supported.
+ Computed expressions can be used for sorting, filtering, and grouping,
+ just as the regular attributes.<br><br>
+ Aggregate functions (AVG(), MIN(), MAX(), SUM()) are supported when
+ using GROUP BY.<br><br>
+ Examples:<br><br> <pre>
+ (set-select sph "*, (user_karma+ln(pageviews))*0.1 AS myweight" )
+ (set-select sph "exp_years, salary_gbp*{$gbp_usd_rate} AS salary_usd, IF(age>40,1,0) AS over40" )
+ (set-select sph "*, AVG(price) AS avgprice" ) </pre></div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__variable__+sph-sort-functions+.html b/doc/pages/cl-sphinx-search__variable__+sph-sort-functions+.html
new file mode 100644
index 0000000..3d4a416
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__variable__+sph-sort-functions+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-functions+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-functions+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/package.lisp b/package.lisp
index 1c4bbe4..affac69 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,100 +1,104 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
#:last-error
#:last-warning
#:set-id-range
#:set-filter
#:set-filter-range
#:set-filter-float-range
- #:max-query-time)
+ #:max-query-time
+ #:set-geo-anchor
+ #:set-group-by
+ #:set-group-distinct
+ #:set-select
+ #:reset-filters
+ #:reset-group-by
+ #:reset-overrides)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
@begin[Acknowledgements]{section}
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
@a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
itself says:
@begin{pre}
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
@end{pre}
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
@b{Documentation}
This documentation was generated by @a[http://www.lichteblau.com/atdoc/doc/]{atdoc},
the documentation generation system written by David Lichteblau.
@end{section}
-
-
"))
-
diff --git a/perl-ex/Sphinx.pm b/perl-ex/Sphinx.pm
index 20702fd..1929dca 100644
--- a/perl-ex/Sphinx.pm
+++ b/perl-ex/Sphinx.pm
@@ -1510,680 +1510,680 @@ the same keys as the hash returned by L<Query>, plus:
=item * error
Errors, if any, for this query.
=item * warnings
Any warnings associated with the query.
=back
=cut
sub RunQueries {
my $self = shift;
unless (@{$self->{_reqs}}) {
$self->_Error("no queries defined, issue AddQuery() first");
return;
}
my $fp = $self->_Connect() or do { $self->{_reqs} = []; return };
##################
# send query, get response
##################
my $nreqs = @{$self->{_reqs}};
my $req = pack("Na*", $nreqs, join("", @{$self->{_reqs}}));
#$req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
my $reqa = pack ( "nn", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH );
print STDERR "runqueries req header:\n";
print STDERR Dump($reqa) . "\n";
print STDERR 'len req: ' . length( $req ) . "\n";
$req = $reqa . pack ( "N/a*", $req); # add header
print STDERR "runqueries sending command:\n";
print STDERR Dump($req) . "\n";
$self->_Send($fp, $req);
$self->{_reqs} = [];
my $response = $self->_GetResponse ( $fp, VER_COMMAND_SEARCH );
return unless $response;
print STDERR "runqueries: got response:\n";
print STDERR Dump($response) . "\n";
##################
# parse response
##################
my $p = 0;
my $max = length($response); # Protection from broken response
my @results;
for (my $ires = 0; $ires < $nreqs; $ires++) {
my $result = {}; # Empty hash ref
push(@results, $result);
$result->{matches} = []; # Empty array ref
$result->{error} = "";
$result->{warnings} = "";
# extract status
my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
if ($status != SEARCHD_OK) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $message = substr ( $response, $p, $len ); $p += $len;
if ($status == SEARCHD_WARNING) {
$result->{warning} = $message;
} else {
$result->{error} = $message;
next;
}
}
# read schema
my @fields;
my (%attrs, @attr_list);
my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nfields-->0 && $p<$max ) {
my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
push(@fields, substr ( $response, $p, $len )); $p += $len;
}
$result->{"fields"} = \@fields;
my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nattrs-->0 && $p<$max ) {
my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $attr = substr ( $response, $p, $len ); $p += $len;
my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$attrs{$attr} = $type;
push(@attr_list, $attr);
}
$result->{"attrs"} = \%attrs;
# read match count
my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
# read matches
while ( $count-->0 && $p<$max ) {
my $data = {};
if ($id64) {
$data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
$data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
} else {
( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
}
foreach my $attr (@attr_list) {
if ($attrs{$attr} == SPH_ATTR_BIGINT) {
$data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
next;
}
if ($attrs{$attr} == SPH_ATTR_FLOAT) {
my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
next;
}
my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
if ($attrs{$attr} & SPH_ATTR_MULTI) {
my $nvalues = $val;
$data->{$attr} = [];
while ($nvalues-->0 && $p < $max) {
$val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@{$data->{$attr}}, $val);
}
} else {
$data->{$attr} = $val;
}
}
push(@{$result->{matches}}, $data);
}
my $words;
($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
$result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
$p += 16;
while ( $words-->0 && $p < $max) {
my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
$p += 4;
my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
$p += $len;
my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
$p += 8;
$result->{words}{$word} = {
"docs" => $docs,
"hits" => $hits
};
}
}
return \@results;
}
=head2 BuildExcerpts
$excerpts = $sph->BuildExcerpts($docs, $index, $words, $opts)
Generate document excerpts for the specified documents.
=over 4
=item docs
An array reference of strings which represent the document
contents
=item index
A string specifiying the index whose settings will be used
for stemming, lexing and case folding
=item words
A string which contains the words to highlight
=item opts
A hash which contains additional optional highlighting parameters:
=over 4
=item before_match - a string to insert before a set of matching words, default is "<b>"
=item after_match - a string to insert after a set of matching words, default is "<b>"
=item chunk_separator - a string to insert between excerpts chunks, default is " ... "
=item limit - max excerpt size in symbols (codepoints), default is 256
=item around - how many words to highlight around each match, default is 5
=item exact_phrase - whether to highlight exact phrase matches only, default is false
=item single_passage - whether to extract single best passage only, default is false
=item use_boundaries
=item weight_order
=back
=back
Returns undef on failure.
Returns an array ref of string excerpts on success.
=cut
sub BuildExcerpts {
my ($self, $docs, $index, $words, $opts) = @_;
$opts ||= {};
croak("BuildExcepts() called with incorrect parameters")
unless (ref($docs) eq 'ARRAY'
&& defined($index)
&& defined($words)
&& ref($opts) eq 'HASH');
my $fp = $self->_Connect() or return;
##################
# fixup options
##################
$opts->{"before_match"} ||= "<b>";
$opts->{"after_match"} ||= "</b>";
$opts->{"chunk_separator"} ||= " ... ";
$opts->{"limit"} ||= 256;
$opts->{"around"} ||= 5;
$opts->{"exact_phrase"} ||= 0;
$opts->{"single_passage"} ||= 0;
$opts->{"use_boundaries"} ||= 0;
$opts->{"weight_order"} ||= 0;
##################
# build request
##################
# v.1.0 req
my $req;
my $flags = 1; # remove spaces
$flags |= 2 if ( $opts->{"exact_phrase"} );
$flags |= 4 if ( $opts->{"single_passage"} );
$flags |= 8 if ( $opts->{"use_boundaries"} );
$flags |= 16 if ( $opts->{"weight_order"} );
$req = pack ( "NN", 0, $flags ); # mode=0, flags=$flags
$req .= pack ( "N/a*", $index ); # req index
$req .= pack ( "N/a*", $self->{_string_encoder}->($words)); # req words
# options
$req .= pack ( "N/a*", $opts->{"before_match"});
$req .= pack ( "N/a*", $opts->{"after_match"});
$req .= pack ( "N/a*", $opts->{"chunk_separator"});
$req .= pack ( "N", int($opts->{"limit"}) );
$req .= pack ( "N", int($opts->{"around"}) );
# documents
$req .= pack ( "N", scalar(@$docs) );
foreach my $doc (@$docs) {
croak('BuildExcerpts: Found empty document in $docs') unless ($doc);
$req .= pack("N/a*", $self->{_string_encoder}->($doc));
}
##########################
# send query, get response
##########################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, $req); # add header
print STDERR "sending:\n";
print STDERR Dump($req) . "\n";
$self->_Send($fp, $req);
my $response = $self->_GetResponse($fp, VER_COMMAND_EXCERPT);
return unless $response;
my ($pos, $i) = 0;
my $res = []; # Empty hash ref
my $rlen = length($response);
for ( $i=0; $i< scalar(@$docs); $i++ ) {
my $len = unpack ( "N*", substr ( $response, $pos, 4 ) );
$pos += 4;
if ( $pos+$len > $rlen ) {
$self->_Error("incomplete reply");
return;
}
push(@$res, $self->{_string_decoder}->( substr ( $response, $pos, $len ) ));
$pos += $len;
}
return $res;
}
=head2 BuildKeywords
$results = $sph->BuildKeywords($query, $index, $hits)
Generate keyword list for a given query
Returns undef on failure,
Returns an array of hashes, where each hash describes a word in the query with the following keys:
=over 4
=item * tokenized
Tokenised term from query
=item * normalized
Normalised term from query
=item * docs
Number of docs in which word was found (if $hits is true)
=item * hits
Number of occurrences of word (if $hits is true)
=back
=cut
sub BuildKeywords {
my ( $self, $query, $index, $hits ) = @_;
my $fp = $self->_Connect() or return;
# v.1.0 req
my $req = pack("N/a*", $self->{_string_encoder}->($query) );
$req .= pack("N/a*", $index);
$req .= pack("N", $self->{_string_encoder}->($hits) );
##################
# send query, get response
##################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, $req);
$self->_Send($fp, $req);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_KEYWORDS );
return unless $response;
##################
# parse response
##################
my $p = 0;
my @res;
my $rlen = length($response);
my $nwords = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
for (my $i=0; $i < $nwords; $i++ ) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
$len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
my %data = ( tokenized => $tokenized, normalized => $normalized );
if ($hits) {
( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
}
push(@res, \%data);
}
if ( $p > $rlen ) {
$self->_Error("incomplete reply");
return;
}
return \@res;
}
=head2 EscapeString
$escaped = $sph->EscapeString('abcde!@#$%')
Inserts backslash before all non-word characters in the given string.
=cut
sub EscapeString {
my $self = shift;
return quotemeta(shift);
}
=head2 UpdateAttributes
$sph->UpdateAttributes($index, \@attrs, \%values);
$sph->UpdateAttributes($index, \@attrs, \%values, $mva);
Update specified attributes on specified documents
=over 4
=item index
Name of the index to be updated
=item attrs
Array of attribute name strings
=item values
A hash with key as document id, value as an array of new attribute values
=back
Returns number of actually updated documents (0 or more) on success
Returns undef on failure
Usage example:
$sph->UpdateAttributes("test1", [ qw/group_id/ ], { 1 => [ 456] }) );
=cut
sub UpdateAttributes {
my ($self, $index, $attrs, $values, $mva ) = @_;
croak("index is not defined") unless (defined $index);
croak("attrs must be an array") unless ref($attrs) eq "ARRAY";
for my $attr (@$attrs) {
croak("attribute is not defined") unless (defined $attr);
}
croak("values must be a hashref") unless ref($values) eq "HASH";
for my $id (keys %$values) {
my $entry = $values->{$id};
croak("value id $id is not numeric") unless ($id =~ /$num_re/);
croak("value entry must be an array") unless ref($entry) eq "ARRAY";
croak("size of values must match size of attrs") unless @$entry == @$attrs;
for my $v (@$entry) {
if ($mva) {
croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
for my $vv (@$v) {
croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
}
} else {
croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
}
}
}
## build request
my $req = pack ( "N/a*", $index);
$req .= pack ( "N", scalar @$attrs );
for my $attr (@$attrs) {
$req .= pack ( "N/a*", $attr)
. pack("N", $mva ? 1 : 0);
}
$req .= pack ( "N", scalar keys %$values );
foreach my $id (keys %$values) {
my $entry = $values->{$id};
$req .= $self->_sphPackU64($id);
if ($mva) {
for my $v ( @$entry ) {
$req .= pack ( "N", @$v );
for my $vv (@$v) {
$req .= pack ("N", $vv);
}
}
} else {
for my $v ( @$entry ) {
$req .= pack ( "N", $v );
}
}
}
## connect, send query, get response
my $fp = $self->_Connect() or return;
$req = pack ( "nnN/a*", SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, $req); ## add header
send ( $fp, $req, 0);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_UPDATE );
return unless $response;
## parse response
my ($updated) = unpack ( "N*", substr ( $response, 0, 4 ) );
return $updated;
}
=head2 Open
$sph->Open()
Opens a persistent connection for subsequent queries.
To reduce the network connection overhead of making Sphinx queries, you can call
$sph->Open(), then run any number of queries, and call $sph->Close() when
finished.
Returns 1 on success, 0 on failure.
- =cut
+=cut
- sub Open {
- my $self = shift;
+sub Open {
+ my $self = shift;
- if ($self->{_socket}) {
- $self->_Error("already connected");
- return 0;
- }
- my $fp = $self->_Connect() or return 0;
+ if ($self->{_socket}) {
+ $self->_Error("already connected");
+ return 0;
+ }
+ my $fp = $self->_Connect() or return 0;
- my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
- $self->_Send($fp, $req) or return 0;
+ my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
+ $self->_Send($fp, $req) or return 0;
- $self->{_socket} = $fp;
- return 1;
- }
+ $self->{_socket} = $fp;
+ return 1;
+}
=head2 Close
$sph->Close()
Closes a persistent connection.
Returns 1 on success, 0 on failure.
=cut
sub Close {
my $self = shift;
if (! $self->{_socket}) {
$self->_Error("not connected");
return 0;
}
close($self->{_socket});
$self->{_socket} = undef;
return 1;
}
=head2 Status
$status = $sph->Status()
Queries searchd status, and returns a hash of status variable name and value pairs.
Returns undef on failure.
=cut
sub Status {
my $self = shift;
my $fp = $self->_Connect() or return;
my $req = pack("nnNN", SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 ); # len=4, body=1
$self->_Send($fp, $req) or return;
my $response = $self->_GetResponse ( $fp, VER_COMMAND_STATUS );
return unless $response;
my $p = 0;
my ($rows, $cols) = unpack("N*N*", substr ( $response, $p, 8 ) ); $p += 8;
return {} unless $rows && $cols;
my %res;
for (1 .. $rows ) {
my @entry;
for ( 1 .. $cols) {
my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
}
if ($cols <= 2) {
$res{$entry[0]} = $entry[1];
} else {
my $name = shift @entry;
$res{$name} = \@entry;
}
}
return \%res;
}
=head1 SEE ALSO
L<http://www.sphinxsearch.com>
=head1 NOTES
There is (or was) a bundled Sphinx.pm in the contrib area of the Sphinx source
distribution, which was used as the starting point of Sphinx::Search.
Maintenance of that version appears to have lapsed at sphinx-0.9.7, so many of
the newer API calls are not available there. Sphinx::Search is mostly
compatible with the old Sphinx.pm except:
=over 4
=item On failure, Sphinx::Search returns undef rather than 0 or -1.
=Item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
Sphinx::Search->new
->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms")
=back
Sphinx::Search also provides documentation and unit tests, which were the main
motivations for branching from the earlier work.
=head1 AUTHOR
Jon Schutz
=head1 BUGS
Please report any bugs or feature requests to
C<bug-sphinx-search at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Sphinx-Search>.
I will be notified, and then you'll automatically be notified of progress on
your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Sphinx::Search
You can also look for information at:
=over 4
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Sphinx-Search>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Sphinx-Search>
=item * RT: CPAN's request tracker
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Sphinx-Search>
=item * Search CPAN
L<http://search.cpan.org/dist/Sphinx-Search>
=back
=head1 ACKNOWLEDGEMENTS
This module is based on Sphinx.pm (not deployed to CPAN) for Sphinx version
0.9.7-rc1, by Len Kranendonk, which was in turn based on the Sphinx PHP API.
=head1 COPYRIGHT & LICENSE
Copyright 2007 Jon Schutz, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License.
=cut
1;
|
thijs/cl-sphinx-search
|
43f391dfd56e4219c6bbb34fc9db231dca7cd346
|
Setting filters actually seems to work
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index a0660cf..c6a4f81 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,964 +1,968 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
-(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
+(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
(defvar *response-length* ())
-
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{query}
@see{add-query}
@see{run-queries}
@see{last-error}
@see{last-warning}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
"))
-(defmethod set-filter ((client sphinx-client) attr values &key (exclude nil))
+(defmethod set-filter ((client sphinx-client) attr values &key (exclude ()))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
(push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-values+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'values filter) values)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
-(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude nil))
+(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
;; (assert (and (numberp min) (numberp max) (>= max min)))
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-range+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
-(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude nil))
+(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude ()))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
-(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude nil))
+(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude ()))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) type)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
;; (defgeneric (client )
;; (:documentation
;; "@arg[client]{a @class{sphinx-client}}
;; @arg[]{}
;; @return{}
;; @short{.}
;; .
;; "))
;; (defmethod ((client sphinx-client) )
;; )
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
- (%pack-float (last (anchor client)))))
+ (%pack-float (fourth (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
+ #+SPHINX-SEARCH-DEBUG (format t "socket is: ~a~%" (%socket client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
+ (setf (%socket client) ())
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(with-output-to-string (packed-filters)
(dolist (filter filters)
(let ((type (first filter))
- (attr (second filter)))
+ (attr (second filter))
+ (last-el 3))
(format packed-filters "~a~a~a~a"
- (pack "N/a*" attr)
- (pack "N" type)
- (cond ((eql type +sph-filter-values+)
- (%pack-list-signed-quads (third filter)))
- ((eql type +sph-filter-range+)
- (concatenate 'string
- (pack "q>" (third filter))
- (pack "q>" (fourth filter))))
- ((eql type +sph-filter-floatrange+)
- (concatenate 'string
- (%pack-float (third filter))
- (%pack-float (fourth filter))))
- (t
- (error "Unhandled filter type ~S" type)))
- (pack "N" (last filter)))))))
+ (pack "N/a*" attr)
+ (pack "N" type)
+ (cond ((eql type +sph-filter-values+)
+ (%pack-list-signed-quads (third filter)))
+ ((eql type +sph-filter-range+)
+ (concatenate 'string
+ (pack "q>" (third filter))
+ (pack "q>" (fourth filter)))
+ (incf last-el))
+ ((eql type +sph-filter-floatrange+)
+ (concatenate 'string
+ (%pack-float (third filter))
+ (%pack-float (fourth filter)))
+ (incf last-el))
+ (t
+ (error "Unhandled filter type ~S" type)))
+ (pack "N" (nth last-el filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(with-output-to-string (packed-list)
(format packed-list "~a" (pack "N" (length values-list)))
(dolist (value values-list)
(format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
81e125ea1d0f9fd5f0af2eac41184a829f043b51
|
Getting some results...
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 04740e4..a0660cf 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -415,550 +415,550 @@
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-range+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude nil))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude nil))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) type)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
;; (defgeneric (client )
;; (:documentation
;; "@arg[client]{a @class{sphinx-client}}
;; @arg[]{}
;; @return{}
;; @short{.}
;; .
;; "))
;; (defmethod ((client sphinx-client) )
;; )
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(with-output-to-string (packed-filters)
(dolist (filter filters)
(let ((type (first filter))
(attr (second filter)))
- (concatenate 'string
+ (format packed-filters "~a~a~a~a"
(pack "N/a*" attr)
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-list-signed-quads (third filter)))
((eql type +sph-filter-range+)
(concatenate 'string
(pack "q>" (third filter))
(pack "q>" (fourth filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string
(%pack-float (third filter))
(%pack-float (fourth filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (last filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(with-output-to-string (packed-list)
(format packed-list "~a" (pack "N" (length values-list)))
(dolist (value values-list)
(format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
d8e9f9ceae91b3c0de2886d1fdd6ee749861e00c
|
No errors anymore, but not giving correct results...
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 5169781..04740e4 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -411,562 +411,554 @@
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude nil))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
;; (assert (and (numberp min) (numberp max) (>= max min)))
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-range+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude nil))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude nil))
(assert (and (numberp min) (numberp max) (>= max min)))
(push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) type)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
;; (defgeneric (client )
;; (:documentation
;; "@arg[client]{a @class{sphinx-client}}
;; @arg[]{}
;; @return{}
;; @short{.}
;; .
;; "))
;; (defmethod ((client sphinx-client) )
;; )
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
- (map 'string #'(lambda (filter)
- (let ((type (first filter))
- (attr (second filter)))
- (concatenate 'string
- (pack "N/a*" attr)
- (pack "N" type)
- (cond ((eql type +sph-filter-values+)
- (%pack-list-signed-quads (third filter)))
- ((eql type +sph-filter-range+)
- (concatenate 'string
- (pack "q>" (third filter))
- (pack "q>" (fourth filter))))
- ((eql type +sph-filter-floatrange+)
- (concatenate 'string
- (%pack-float (third filter))
- (%pack-float (fourth filter))))
- (t
- (error "Unhandled filter type ~S" type)))
- (pack "N" (last filter)))))
- filters))
-
-
-;; (when (hash-table-p filter)
-;; (concatenate 'string
-;; (pack "N/a*" (gethash 'attr filter))
-;; (let ((type (gethash 'type filter)))
-;; (concatenate 'string
-;; (pack "N" type)
+ (with-output-to-string (packed-filters)
+ (dolist (filter filters)
+ (let ((type (first filter))
+ (attr (second filter)))
+ (concatenate 'string
+ (pack "N/a*" attr)
+ (pack "N" type)
+ (cond ((eql type +sph-filter-values+)
+ (%pack-list-signed-quads (third filter)))
+ ((eql type +sph-filter-range+)
+ (concatenate 'string
+ (pack "q>" (third filter))
+ (pack "q>" (fourth filter))))
+ ((eql type +sph-filter-floatrange+)
+ (concatenate 'string
+ (%pack-float (third filter))
+ (%pack-float (fourth filter))))
+ (t
+ (error "Unhandled filter type ~S" type)))
+ (pack "N" (last filter)))))))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
- (concatenate 'string
- (pack "N" (length values-list))
- (map 'string #'(lambda (value)
- (pack "q>" value)) values-list)))
+ (with-output-to-string (packed-list)
+ (format packed-list "~a" (pack "N" (length values-list)))
+ (dolist (value values-list)
+ (format packed-list "~a" (pack "q>" value)))))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
063765235deb665c1b941d037225584f1d7be03f
|
Filters using simple list structure
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index a968a30..5169781 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,969 +1,972 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
+(declaim (optimize (debug 3) (safety 3) (speed 0) (space 0)))
+
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(match-mode
:accessor match-mode
:initarg :match-mode
:initform +sph-match-all+
:documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of lists")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(rank-mode
:accessor rank-mode
:initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{query}
@see{add-query}
@see{run-queries}
@see{last-error}
@see{last-warning}
@see{set-id-range}
@see{set-filter}
@see{set-filter-range}
@see{set-filter-float-range}
@see{max-query-time}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric set-id-range (client min max)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[min]{minimum id to start searching from}
@arg[max]{maximum id to stop searching at}
@return{client}
@short{Set the id-range to search within (inclusive).}
Set the range of id's within which to search. Range is inclusive, so setting
[0, 450] both 0 and 450 id's will be found.
"))
(defmethod set-id-range ((client sphinx-client) min max)
(assert (and (numberp min) (numberp max)
(>= max min)))
(setf (min-id client) min)
(setf (max-id client) max))
(defgeneric set-filter (client attribute values-list &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[values-list]{the numeric values to filter on}
@arg[exclude]{if set, exclude the given values}
@return{client}
@short{Sets the results to be filtered on the given attribute.}
@begin{pre}
(set-filter client \"filter_attr\" '(0 2 4 34 55 77))
(set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
@end{pre}
Sets the results to be filtered on the given attribute. Only
results which have attributes matching the given (numeric)
values will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that match the filter.
"))
(defmethod set-filter ((client sphinx-client) attr values &key (exclude nil))
(assert (and (listp values) (> (length values) 0)))
(dolist (item values)
(assert (numberp item)))
- (push '(+sph-filter-values+ attr values (cond (exclude 1) (t 0))) (filters client))
+ (push `(,+sph-filter-values+ ,attr ,values ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-values+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'values filter) values)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-range client \"filter_attr\" 45 99)
(set-filter-range client \"other_attr\" 2 8 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude nil))
(%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
;; (assert (and (numberp min) (numberp max) (>= max min)))
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) +sph-filter-range+)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
(defgeneric set-filter-float-range (client attribute min max &key exclude)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[attribute]{the attribute to filter on}
@arg[min]{start of the range to filter on}
@arg[max]{end of the range to filter on}
@arg[exclude]{if set, exclude the given range}
@return{client}
@short{Sets the results to be filtered on the given range.}
@begin{pre}
(set-filter-float-range client \"filter_attr\" 45.231 99)
(set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
@end{pre}
Sets the results to be filtered on a range of values for the given
attribute. Only those records where the attribute value is between
@code{min} and @code{max} (including @code{min} and @code{max})
will be returned.
This may be called multiple times with different attributes to
select on multiple attributes.
If @code{:exclude} is set, excludes results that fall within the
given range.
"))
(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude nil))
(%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude nil))
(assert (and (numberp min) (numberp max) (>= max min)))
- (push '(type attr min max (cond (exclude 1) (t 0))) (filters client))
+ (push `(,type ,attr ,min ,max ,(cond (exclude 1) (t 0))) (filters client))
client)
;; (let ((filter (make-hash-table)))
;; (setf (gethash 'type filter) type)
;; (setf (gethash 'attr filter) attr)
;; (setf (gethash 'min filter) min)
;; (setf (gethash 'max filter) max)
;; (setf (gethash 'exclude filter) (cond (exclude 1)
;; (t 0)))
;; (push filter (filters client))
;; client))
;; (defgeneric (client )
;; (:documentation
;; "@arg[client]{a @class{sphinx-client}}
;; @arg[]{}
;; @return{}
;; @short{.}
;; .
;; "))
;; (defmethod ((client sphinx-client) )
;; )
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(let ((type (first filter))
(attr (second filter)))
(concatenate 'string
(pack "N/a*" attr)
(pack "N" type)
- (cond
+ (cond ((eql type +sph-filter-values+)
+ (%pack-list-signed-quads (third filter)))
+ ((eql type +sph-filter-range+)
+ (concatenate 'string
+ (pack "q>" (third filter))
+ (pack "q>" (fourth filter))))
+ ((eql type +sph-filter-floatrange+)
+ (concatenate 'string
+ (%pack-float (third filter))
+ (%pack-float (fourth filter))))
+ (t
+ (error "Unhandled filter type ~S" type)))
+ (pack "N" (last filter)))))
+ filters))
- (when (hash-table-p filter)
- (concatenate 'string
- (pack "N/a*" (gethash 'attr filter))
- (let ((type (gethash 'type filter)))
- (concatenate 'string
- (pack "N" type)
- (cond ((eql type +sph-filter-values+)
- (%pack-list-signed-quads (gethash 'values filter)))
- ((eql type +sph-filter-range+)
- (concatenate 'string (pack "q>" (gethash 'min filter))
- (pack "q>" (gethash 'max filter))))
- ((eql type +sph-filter-floatrange+)
- (concatenate 'string (%pack-float (gethash 'min filter))
- (%pack-float (gethash 'max filter))))
- (t
- (error "Unhandled filter type ~S" type)))
- (pack "N" (gethash 'exclude filter)))))))
- filters))
+;; (when (hash-table-p filter)
+;; (concatenate 'string
+;; (pack "N/a*" (gethash 'attr filter))
+;; (let ((type (gethash 'type filter)))
+;; (concatenate 'string
+;; (pack "N" type)
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-list-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
d5f9ccb0fefa89bd54f63717231e0d8df4a27e39
|
Fleshing out the interface
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 1677173..d72cf17 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,794 +1,945 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}.}
Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}.}
Get the last warning message sent by searchd.
"))
(defgeneric max-query-time (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Get the max query time.
"))
(defgeneric (setf max-query-time) (max-time client)
(:documentation
"@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
@arg[client]{a @class{sphinx-client}}
@return{a number; the max query time in milliseconds.}
Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
- (mode
- :accessor mode
- :initarg :mode
+ (match-mode
+ :accessor match-mode
+ :initarg :match-mode
:initform +sph-match-all+
- :documentation "query matching mode (default is +sph-match-all+)")
+ :documentation "query matching match-mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
- (ranker
- :accessor ranker
- :initarg :ranker
+ (rank-mode
+ :accessor rank-mode
+ :initarg :rank-mode
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{last-warning}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
+(defgeneric set-id-range (client min max)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[min]{minimum id to start searching from}
+ @arg[max]{maximum id to stop searching at}
+ @return{client}
+ @short{Set the id-range to search within (inclusive).}
+
+ Set the range of id's within which to search. Range is inclusive, so setting
+ [0, 450] both 0 and 450 id's will be found.
+"))
+
+(defmethod set-id-range ((client sphinx-client) min max)
+ (assert (and (numberp min) (numberp max)
+ (>= max min)))
+ (setf (min-id client) min)
+ (setf (max-id client) max))
+
+
+(defgeneric set-filter (client attribute values-list &key exclude)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute to filter on}
+ @arg[values-list]{the numeric values to filter on}
+ @arg[exclude]{if set, exclude the given values}
+ @return{client}
+ @short{Sets the results to be filtered on the given attribute.}
+
+ @begin{pre}
+ (set-filter client \"filter_attr\" '(0 2 4 34 55 77))
+ (set-filter client \"other_attr\" '(8 4 2 11) :exclude t)
+ @end{pre}
+
+ Sets the results to be filtered on the given attribute. Only
+ results which have attributes matching the given (numeric)
+ values will be returned.
+
+ This may be called multiple times with different attributes to
+ select on multiple attributes.
+
+ If @code{:exclude} is set, excludes results that match the filter.
+"))
+
+(defmethod set-filter ((client sphinx-client) attr values &key (exclude nil))
+ (assert (and (listp values) (> (length values) 0)))
+ (dolist (item values)
+ (assert (numberp item)))
+ (let ((filter (make-hash-table)))
+ (setf (gethash 'type filter) +sph-filter-values+)
+ (setf (gethash 'attr filter) attr)
+ (setf (gethash 'values filter) values)
+ (setf (gethash 'exclude filter) (cond (exclude 1)
+ (t 0)))
+ (push filter (filters client))
+ client))
+
+(defgeneric set-filter-range (client attribute min max &key exclude)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute to filter on}
+ @arg[min]{start of the range to filter on}
+ @arg[max]{end of the range to filter on}
+ @arg[exclude]{if set, exclude the given range}
+ @return{client}
+ @short{Sets the results to be filtered on the given range.}
+
+ @begin{pre}
+ (set-filter-range client \"filter_attr\" 45 99)
+ (set-filter-range client \"other_attr\" 2 8 :exclude t)
+ @end{pre}
+
+ Sets the results to be filtered on a range of values for the given
+ attribute. Only those records where the attribute value is between
+ @code{min} and @code{max} (including @code{min} and @code{max})
+ will be returned.
+
+ This may be called multiple times with different attributes to
+ select on multiple attributes.
+
+ If @code{:exclude} is set, excludes results that fall within the
+ given range.
+"))
+
+(defmethod set-filter-range ((client sphinx-client) attr min max &key (exclude nil))
+ (%set-filter-range client +sph-filter-range+ attr min max :exclude exclude))
+
+;; (assert (and (numberp min) (numberp max) (>= max min)))
+;; (let ((filter (make-hash-table)))
+;; (setf (gethash 'type filter) +sph-filter-range+)
+;; (setf (gethash 'attr filter) attr)
+;; (setf (gethash 'min filter) min)
+;; (setf (gethash 'max filter) max)
+;; (setf (gethash 'exclude filter) (cond (exclude 1)
+;; (t 0)))
+;; (push filter (filters client))
+;; client))
+
+(defgeneric set-filter-float-range (client attribute min max &key exclude)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[attribute]{the attribute to filter on}
+ @arg[min]{start of the range to filter on}
+ @arg[max]{end of the range to filter on}
+ @arg[exclude]{if set, exclude the given range}
+ @return{client}
+ @short{Sets the results to be filtered on the given range.}
+
+ @begin{pre}
+ (set-filter-float-range client \"filter_attr\" 45.231 99)
+ (set-filter-float-range client \"other_attr\" 1.32 55.0031 :exclude t)
+ @end{pre}
+
+ Sets the results to be filtered on a range of values for the given
+ attribute. Only those records where the attribute value is between
+ @code{min} and @code{max} (including @code{min} and @code{max})
+ will be returned.
+
+ This may be called multiple times with different attributes to
+ select on multiple attributes.
+
+ If @code{:exclude} is set, excludes results that fall within the
+ given range.
+"))
+
+(defmethod set-filter-float-range ((client sphinx-client) attr min max &key (exclude nil))
+ (%set-filter-range client +sph-filter-floatrange+ attr min max :exclude exclude))
+
+(defmethod %set-filter-range ((client sphinx-client) type attr min max &key (exclude nil))
+ (assert (and (numberp min) (numberp max) (>= max min)))
+ (let ((filter (make-hash-table)))
+ (setf (gethash 'type filter) type)
+ (setf (gethash 'attr filter) attr)
+ (setf (gethash 'min filter) min)
+ (setf (gethash 'max filter) max)
+ (setf (gethash 'exclude filter) (cond (exclude 1)
+ (t 0)))
+ (push filter (filters client))
+ client))
+
+;; (defgeneric (client )
+;; (:documentation
+;; "@arg[client]{a @class{sphinx-client}}
+;; @arg[]{}
+;; @return{}
+;; @short{.}
+
+;; .
+;; "))
+
+;; (defmethod ((client sphinx-client) )
+;; )
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
- (pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
+ (pack "NNNNN" (offset client) (limit client) (match-mode client) (rank-mode client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
- (%pack-array-signed-quads (gethash 'values filter)))
+ (%pack-list-signed-quads (gethash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (gethash 'min filter))
(pack "q>" (gethash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (gethash 'min filter))
(%pack-float (gethash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (gethash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
-(defun %pack-array-signed-quads (values-list)
+(defun %pack-list-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/package.lisp b/package.lisp
index 2e00e7a..239c76a 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,96 +1,97 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
#:last-error
#:last-warning
+ #:set-id-range
#:max-query-time)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
@begin[Acknowledgements]{section}
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
@a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
itself says:
@begin{pre}
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
@end{pre}
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
@b{Documentation}
This documentation was generated by @a[http://www.lichteblau.com/atdoc/doc/]{atdoc},
the documentation generation system written by David Lichteblau.
@end{section}
"))
|
thijs/cl-sphinx-search
|
45efde1c7d5e20f22c6b586f341d543916762bc6
|
Old documentation changes
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 15e8fc5..1677173 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,540 +1,559 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
- @return{a string; the last error message returned from the @code{searchd}}
+ @return{a string; the last error message returned from the @code{searchd}.}
- Get the last error message sent by searchd
+ Get the last error message sent by searchd.
"))
(defgeneric last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
- @return{a string; the last warning message returned from the @code{searchd}}
+ @return{a string; the last warning message returned from the @code{searchd}.}
- Get the last warning message sent by searchd
+ Get the last warning message sent by searchd.
+"))
+
+
+(defgeneric max-query-time (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a number; the max query time in milliseconds.}
+
+ Get the max query time.
+"))
+
+
+(defgeneric (setf max-query-time) (max-time client)
+ (:documentation
+ "@arg[max-time]{the max query time in milliseconds Sphinx is allowed to take}
+ @arg[client]{a @class{sphinx-client}}
+ @return{a number; the max query time in milliseconds.}
+
+ Set the max query time to max-time in milliseconds.
"))
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{last-warning}
"))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index 8769b0c..8b946e3 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,62 +1,63 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<break/> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><break/>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/> This documentation was generated by <a a="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
the documentation generation system written by David Lichteblau.<break/> </section><break/></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
- Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code></return><break/>
- Get the last warning message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code>.</return><break/>
+ Get the last warning message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
- searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
+ searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a number; the max query time in milliseconds.</return><break/>
+ Get the max query time.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
(query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
- It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code></return><break/>
- Get the last error message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
+ It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code>.</return><break/>
+ Get the last error message sent by searchd.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
It returns a list of hashes containing the result of each query. Each hash
- has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
+ has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__mode" name="mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-array-signed-quads" name="%pack-array-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__ranker" name="ranker" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__sph" name="sph" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__mode" name="mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-array-signed-quads" name="%pack-array-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__ranker" name="ranker" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/index.html b/doc/index.html
index 6691bfe..605f59c 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -1,8 +1,8 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Sphinx Search API reference</title><link rel="stylesheet" type="text/css" href="index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded">
Index of packages:
</div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><h2 class="page-title"><a href="pages/cl-sphinx-search.html">
Package
- cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index 115db0f..f8a40e6 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,33 +1,33 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<br><br> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><br><br>
Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br> This documentation was generated by <a href="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
- the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__max-query-time.html"><tt>max-query-time</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__last-error.html b/doc/pages/cl-sphinx-search__fun__last-error.html
index 270337d..624cdcf 100644
--- a/doc/pages/cl-sphinx-search__fun__last-error.html
+++ b/doc/pages/cl-sphinx-search__fun__last-error.html
@@ -1,11 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-error</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- last-error</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-error</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last error message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
- Get the last error message sent by searchd</div></div></td><td valign="top" width="5%">
+ last-error</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-error</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last error message returned from the <tt>searchd</tt>.</div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last error message sent by searchd.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__last-warning.html b/doc/pages/cl-sphinx-search__fun__last-warning.html
index db58358..e9bcf53 100644
--- a/doc/pages/cl-sphinx-search__fun__last-warning.html
+++ b/doc/pages/cl-sphinx-search__fun__last-warning.html
@@ -1,11 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-warning</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- last-warning</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-warning</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last warning message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
- Get the last warning message sent by searchd</div></div></td><td valign="top" width="5%">
+ last-warning</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-warning</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last warning message returned from the <tt>searchd</tt>.</div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last warning message sent by searchd.</div></div></td><td valign="top" width="5%">
Â
</td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__max-query-time.html b/doc/pages/cl-sphinx-search__fun__max-query-time.html
index 1921818..fd0d068 100644
--- a/doc/pages/cl-sphinx-search__fun__max-query-time.html
+++ b/doc/pages/cl-sphinx-search__fun__max-query-time.html
@@ -1,10 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function max-query-time</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- max-query-time</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>max-query-time</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
+ max-query-time</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>max-query-time</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a number; the max query time in milliseconds.</div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the max query time.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__variable__sph.html b/doc/pages/cl-sphinx-search__variable__sph.html
new file mode 100644
index 0000000..5abe322
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__variable__sph.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable sph</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Variable
+ sph</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/package.lisp b/package.lisp
index af4bac9..2e00e7a 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,95 +1,96 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
#:last-error
- #:last-warning)
+ #:last-warning
+ #:max-query-time)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
@begin[Acknowledgements]{section}
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
@a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
itself says:
@begin{pre}
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
@end{pre}
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
@b{Documentation}
This documentation was generated by @a[http://www.lichteblau.com/atdoc/doc/]{atdoc},
the documentation generation system written by David Lichteblau.
@end{section}
"))
|
thijs/cl-sphinx-search
|
5dca6618211fd691c5aa4514328cb9aea46d7b19
|
Moved generics to top
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 1bde9e2..15e8fc5 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,734 +1,734 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
+(defvar *response-length* ())
+
+
+(defmacro adv-p (n)
+ `(setf p (+ p ,n)))
+
+
+(defgeneric last-error (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last error message returned from the @code{searchd}}
+
+ Get the last error message sent by searchd
+"))
+
+
+(defgeneric last-warning (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last warning message returned from the @code{searchd}}
+
+ Get the last warning message sent by searchd
+"))
+
+
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
@see{last-warning}
"))
-(defvar *response-length* ())
-
-
-(defmacro adv-p (n)
- `(setf p (+ p ,n)))
-
-
-(defgeneric last-error (client)
- (:documentation
- "@arg[client]{a @class{sphinx-client}}
- @return{a string; the last error message returned from the @code{searchd}}
-
- Get the last error message sent by searchd
-"))
-
-
-(defgeneric last-warning (client)
- (:documentation
- "@arg[client]{a @class{sphinx-client}}
- @return{a string; the last warning message returned from the @code{searchd}}
-
- Get the last warning message sent by searchd
-"))
-
-
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
|
thijs/cl-sphinx-search
|
3e89e9fd78b2d7cd8b544132d9f7028145295e31
|
Small wording change
|
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index d694e3b..8769b0c 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,63 +1,62 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<break/> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><break/>
- Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/>
- This documentation was generated by atdoc, the documentation generation
- system written by David Lichteblau and found <a a="http://www.lichteblau.com/atdoc/doc/">here</a>.<break/> </section><break/></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
+ Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/> This documentation was generated by <a a="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
+ the documentation generation system written by David Lichteblau.<break/> </section><break/></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code></return><break/>
Get the last warning message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
(query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code></return><break/>
Get the last error message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
It returns a list of hashes containing the result of each query. Each hash
has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
(let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
(add-query sph "test")
(run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__mode" name="mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-array-signed-quads" name="%pack-array-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__ranker" name="ranker" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index 8b0e461..115db0f 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,34 +1,33 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
itself says:<br><br> <pre>
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API. </pre><br><br>
- Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br>
- This documentation was generated by atdoc, the documentation generation
- system written by David Lichteblau and found <a href="http://www.lichteblau.com/atdoc/doc/">here</a>.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br> This documentation was generated by <a href="http://www.lichteblau.com/atdoc/doc/">atdoc</a>,
+ the documentation generation system written by David Lichteblau.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/package.lisp b/package.lisp
index 11ddaee..af4bac9 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,95 +1,95 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
#:last-error
#:last-warning)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
@begin[Acknowledgements]{section}
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
@a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
itself says:
@begin{pre}
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
@end{pre}
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
@b{Documentation}
- This documentation was generated by atdoc, the documentation generation
- system written by David Lichteblau and found
- @a[http://www.lichteblau.com/atdoc/doc/]{here}.
+ This documentation was generated by @a[http://www.lichteblau.com/atdoc/doc/]{atdoc},
+ the documentation generation system written by David Lichteblau.
+
@end{section}
"))
|
thijs/cl-sphinx-search
|
a2bf7e21e70da75a514efc19a93701aacfd6e032
|
Use accessors for last-error and -warning; regen docs
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 4678644..1bde9e2 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,781 +1,775 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(%encoding
:accessor %encoding
:initarg :encoding
:initform :utf-8
:documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
@begin{pre}
(let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
- that can be retrieved with the @fun{get-last-error} function.
+ that can be retrieved with the @fun{last-error} function.
@see{set-server}
@see{set-limits}
- @see{get-last-warning}
+ @see{last-warning}
"))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
+(defgeneric last-error (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last error message returned from the @code{searchd}}
+
+ Get the last error message sent by searchd
+"))
+
+
+(defgeneric last-warning (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last warning message returned from the @code{searchd}}
+
+ Get the last warning message sent by searchd
+"))
+
+
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
-(defgeneric get-last-error (client)
- (:documentation
- "@arg[client]{a @class{sphinx-client}}
- @return{a string; the last error message returned from the @code{searchd}}
-
- Get the last error message sent by searchd
-"))
-
-(defmethod get-last-error ((client sphinx-client))
- (last-error client))
-
-
-(defgeneric get-last-warning (client)
- (:documentation
- "@arg[client]{a @class{sphinx-client}}
- @return{a string; the last warning message returned from the @code{searchd}}
-
- Get the last warning message sent by searchd
-"))
-
-(defmethod get-last-warning ((client sphinx-client))
- (last-warning client))
-
-
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(when (%connect client)
(%send client data)
(let ((response (%get-response client :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed")
())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defmethod %read-from ((client sphinx-client) size)
(let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (gethash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (gethash 'min filter))
(pack "q>" (gethash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (gethash 'min filter))
(%pack-float (gethash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (gethash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/doc/.atdoc.xml b/doc/.atdoc.xml
index 2e1b494..d694e3b 100644
--- a/doc/.atdoc.xml
+++ b/doc/.atdoc.xml
@@ -1,52 +1,63 @@
<?xml version="1.0" encoding="UTF-8"?>
<documentation include-internal-symbols-p="yes" index-title="Sphinx Search API reference" css="index.css" heading="Common Lisp Sphinx Search API"><package name="cl-sphinx-search" id="cl-sphinx-search"><documentation-string>This package provides an interface to the search daemon (<em>searchd</em>) for <a a="http://www.sphinxsearch.com/">Sphinx</a>.<break/> <section section="About Sphinx"><break/>
From the site:<break/> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<break/>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<break/>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> </section><break/> <section section="Synopsis"> <pre><break/>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<break/> </pre> </section><break/> <section section="One class">
There is just one class:<break/> <aboutclass>sphinx-client</aboutclass> </section><break/> <section section="Methods">
Setting options/parameters:<break/> <aboutfun>set-server</aboutfun> <aboutfun>set-limits</aboutfun><break/>
- Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
+ Running queries:<break/> <aboutfun>query</aboutfun> <aboutfun>add-query</aboutfun> <aboutfun>run-queries</aboutfun><break/> </section><break/> <section section="Acknowledgements">
+ This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a a="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
+ itself says:<break/> <pre>
+ This module is based on Sphinx.pm (not deployed to CPAN) for
+ Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
+ based on the Sphinx PHP API. </pre><break/>
+ Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <code>api/</code> directory.<break/> <b>Documentation</b><break/>
+ This documentation was generated by atdoc, the documentation generation
+ system written by David Lichteblau and found <a a="http://www.lichteblau.com/atdoc/doc/">here</a>.<break/> </section><break/></documentation-string><external-symbols><function-definition id="cl-sphinx-search__fun__set-limits" name="set-limits" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>offset</elt><elt>limit</elt><elt>max</elt><elt>cutoff</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="offset">the offset to start returning matches from</arg> <arg arg="limit">how many matches to return starting from <code>offset</code></arg> <arg arg="max">maximum number of matches to return</arg> <arg arg="cutoff">the cutoff to stop searching at</arg> <return>client</return> <short>Set the offset, limit, cutoff and max matches to return.</short><break/> <pre>
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches) </pre><break/>
- Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__get-last-error" name="get-last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code></return><break/>
- Get the last error message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code></return><break/>
+ Get the last warning message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__set-server" name="set-server" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>host</elt><elt>port</elt><elt>path</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="host">the host to connect to when using an INET socket</arg> <arg arg="port">the port to connect to when using an INET socket</arg> <arg arg="path">the path to the unix domain socket when not using INET</arg> <return>client</return> <short>Set the server host:port or path to connect to.</short><break/> <pre>
(set-server client :host host :port port)
(set-server client :path unix-path) </pre><break/> In the first form, sets the <code>host</code> (string) and <code>port</code> (integer)
details for the searchd server using a network (INET) socket.<break/> In the second form, where <code>unix-path</code> is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__add-query" name="add-query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>length of query queue</return> <short>Add a query to a batch request.</short><break/> <pre>
(add-query client "test")
(add-query client "word" :index "*")
(run-queries client) </pre><break/>
Add a query to the queue of batched queries.<break/> Batch queries enable <code>searchd</code> to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.<break/>
For instance, running exactly the same query with different group-by settings will enable <code>searchd</code> to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.<break/>
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.<break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__query" name="query" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>query</elt><elt>&key</elt><elt>index</elt><elt>comment</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <arg arg="query">the query to run through <code>searchd</code></arg> <arg arg="index">the index to use; defaults to "*"</arg> <arg arg="comment">a comment describing this query; default none</arg> <return>nil or a hash containing the query results</return> <short>Run a query through <code>searchd</code>.</short><break/> <pre>
(query client "test") </pre><break/> Query <code>searchd</code>. This method runs a single query through <code>searchd</code>.<break/>
- It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__get-last-warning" name="get-last-warning" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last warning message returned from the <code>searchd</code></return><break/>
- Get the last warning message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
+ It returns the results in a hash with the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__add-query">add-query</see> <see id="cl-sphinx-search__fun__run-queries">run-queries</see><break/></documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>a string; the last error message returned from the <code>searchd</code></return><break/>
+ Get the last error message sent by searchd</documentation-string></function-definition><function-definition id="cl-sphinx-search__fun__run-queries" name="run-queries" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list><documentation-string><arg arg="client">a <class id="cl-sphinx-search__class__sphinx-client">sphinx-client</class></arg> <return>nil or a list of hashes</return> <short>Run the queries added with <code>add-query</code> through <code>searchd</code>.</short><break/> <pre>
(add-query client "test")
(add-query client "word")
(run-queries client) </pre><break/> Query <code>searchd</code> with the collected queries added with <code>add-query</code>.<break/>
It returns a list of hashes containing the result of each query. Each hash
- has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__last-warning" name="last-warning" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/>
+ has the following keys: <dl> <dt dt="attributes">a hash-table containing attributes</dt> <dt dt="fields">a list of fields</dt> <dt dt="matches">a hash-table containing the matches</dt> <dt dt="status">the status returned by <code>searchd</code></dt> <dt dt="status-message">the status message returned by <code>searchd</code></dt> <dt dt="time">the time <code>searchd</code> took for the query</dt> <dt dt="total">the total matches returned</dt> <dt dt="total-found">the total number of matches found</dt> <dt dt="words">a hash-table containing the matching words with their statistics</dt> </dl><break/> <see id="cl-sphinx-search__fun__query">query</see> <see id="cl-sphinx-search__fun__add-query">add-query</see><break/></documentation-string></function-definition></external-symbols><internal-symbols><function-definition id="cl-sphinx-search__fun___pack-filters" name="%pack-filters" package="cl-sphinx-search"><lambda-list><elt>filters</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-extended+" name="+sph-match-extended+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-overrides" name="%pack-overrides" package="cl-sphinx-search"><lambda-list><elt>overrides</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___socket" name="%socket" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___port" name="%port" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__sort-mode" name="sort-mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-expr+" name="+sph-sort-expr+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__select" name="select" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-match-any+" name="+sph-match-any+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-by" name="group-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-bool+" name="+sph-attr-bool+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-matches" name="max-matches" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><class-definition id="cl-sphinx-search__class__sphinx-client" name="sphinx-client" package="cl-sphinx-search"><cpl><superclass status="EXTERNAL" name="standard-object" package="common-lisp"/><superclass status="INTERNAL" name="slot-object" package="sb-pcl"/><superclass status="EXTERNAL" name="t" package="common-lisp"/></cpl><subclasses/><documentation-string><short>The sphinx-search class.</short><break/> <pre>
+ (let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
+ (add-query sph "test")
+ (run-queries sph)) </pre><break/>
The interface to the search daemon goes through this class.<break/>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <fun id="cl-sphinx-search__fun__query">query</fun>, or add a number of queries using <fun id="cl-sphinx-search__fun__add-query">add-query</fun> and then calling <fun id="cl-sphinx-search__fun__run-queries">run-queries</fun>.<break/>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__get-last-error">get-last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__get-last-warning">get-last-warning</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>fp</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>socket</elt><elt>size</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response-status" name="%get-response-status" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__mode" name="mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-function" name="group-function" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-array-signed-quads" name="%pack-array-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>fp</elt><elt>client-version</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__last-error" name="last-error" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__ranker" name="ranker" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <fun id="cl-sphinx-search__fun__last-error">last-error</fun> function.<break/> <see id="cl-sphinx-search__fun__set-server">set-server</see> <see id="cl-sphinx-search__fun__set-limits">set-limits</see> <see id="cl-sphinx-search__fun__last-warning">last-warning</see></documentation-string></class-definition><function-definition id="cl-sphinx-search__fun___host" name="%host" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-float" name="%pack-float" package="cl-sphinx-search"><lambda-list><elt>float-value</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-keywords+" name="+searchd-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-asc+" name="+sph-sort-attr-asc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-time-segments+" name="+sph-sort-time-segments+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___encoding" name="%encoding" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-week+" name="+sph-groupby-week+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-values+" name="+sph-filter-values+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-year+" name="+sph-groupby-year+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-float+" name="+sph-attr-float+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-attr-desc+" name="+sph-sort-attr-desc+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-excerpt+" name="+searchd-command-excerpt+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___pack-hash" name="%pack-hash" package="cl-sphinx-search"><lambda-list><elt>hash-table</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__group-sort" name="group-sort" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-bm25+" name="+sph-rank-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-all+" name="+sph-match-all+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-bigint+" name="+sph-attr-bigint+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__anchor" name="anchor" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-proximity-bm25+" name="+sph-rank-proximity-bm25+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-ok+" name="+searchd-ok+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-ordinal+" name="+sph-attr-ordinal+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__retry-count" name="retry-count" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__cutoff" name="cutoff" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__retry-delay" name="retry-delay" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-command-update+" name="+searchd-command-update+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-integer+" name="+sph-attr-integer+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__max-id" name="max-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___send" name="%send" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>data</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-types+" name="+sph-attr-types+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-fields" name="%get-fields" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__max-query-time" name="max-query-time" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-retry+" name="+searchd-retry+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-none+" name="+sph-attr-none+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-attr-timestamp+" name="+sph-attr-timestamp+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__min-id" name="min-id" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___parse-response" name="%parse-response" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>n-requests</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___read-from" name="%read-from" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>size</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+searchd-warning+" name="+searchd-warning+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-excerpt+" name="+ver-command-excerpt+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-boolean+" name="+sph-match-boolean+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-phrase+" name="+sph-match-phrase+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__status" name="status" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-day+" name="+sph-groupby-day+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___path" name="%path" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__field-weights" name="field-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__filters" name="filters" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-update+" name="+ver-command-update+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__group-distinct" name="group-distinct" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-none+" name="+sph-rank-none+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__limit" name="limit" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__mode" name="mode" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attr+" name="+sph-groupby-attr+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-month+" name="+sph-groupby-month+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-matches" name="%get-matches" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>attribute-names</elt><elt>attributes</elt><elt>start</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable___response-length_" name="*response-length*" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+ver-command-search+" name="+ver-command-search+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___get-attributes" name="%get-attributes" package="cl-sphinx-search"><lambda-list><elt>response</elt><elt>start</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__offset" name="offset" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___pack-array-signed-quads" name="%pack-array-signed-quads" package="cl-sphinx-search"><lambda-list><elt>values-list</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__define-constant" name="define-constant" package="cl-sphinx-search"><lambda-list><elt>name</elt><elt>value</elt><elt>&optional</elt><elt>doc</elt></lambda-list></macro-definition><function-definition id="cl-sphinx-search__fun__overrides" name="overrides" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun___get-response" name="%get-response" package="cl-sphinx-search"><lambda-list><elt>client</elt><elt>&key</elt><elt>client-version</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+ver-command-keywords+" name="+ver-command-keywords+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-extended2+" name="+sph-match-extended2+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__sort-by" name="sort-by" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-filter-floatrange+" name="+sph-filter-floatrange+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun___connect" name="%connect" package="cl-sphinx-search"><lambda-list><elt>client</elt></lambda-list></function-definition><variable-definition id="cl-sphinx-search__variable__+sph-rank-wordcount+" name="+sph-rank-wordcount+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-groupby-attrpair+" name="+sph-groupby-attrpair+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__reqs" name="reqs" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__weights" name="weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><function-definition id="cl-sphinx-search__fun__ranker" name="ranker" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition><macro-definition id="cl-sphinx-search__fun__adv-p" name="adv-p" package="cl-sphinx-search"><lambda-list><elt>n</elt></lambda-list></macro-definition><variable-definition id="cl-sphinx-search__variable__+sph-attr-multi+" name="+sph-attr-multi+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-persist+" name="+searchd-command-persist+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-match-fullscan+" name="+sph-match-fullscan+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-extended+" name="+sph-sort-extended+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-command-search+" name="+searchd-command-search+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-filter-range+" name="+sph-filter-range+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+searchd-error+" name="+searchd-error+" package="cl-sphinx-search"/><variable-definition id="cl-sphinx-search__variable__+sph-sort-relevance+" name="+sph-sort-relevance+" package="cl-sphinx-search"/><function-definition id="cl-sphinx-search__fun__index-weights" name="index-weights" package="cl-sphinx-search"><lambda-list><elt>object</elt></lambda-list></function-definition></internal-symbols></package></documentation>
\ No newline at end of file
diff --git a/doc/index.html b/doc/index.html
index db20174..6691bfe 100644
--- a/doc/index.html
+++ b/doc/index.html
@@ -1,8 +1,8 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Sphinx Search API reference</title><link rel="stylesheet" type="text/css" href="index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded">
Index of packages:
</div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><h2 class="page-title"><a href="pages/cl-sphinx-search.html">
Package
- cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> </div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__get-last-error.html"><tt>get-last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__get-last-warning.html"><tt>get-last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ cl-sphinx-search</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e3">Methods</a></li><li><a href="pages/cl-sphinx-search.html#d0d0e0e0e0e4">Acknowledgements</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">cl-sphinx-search:</span></tt></span></td><td valign="top"><a href="pages/cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search.html b/doc/pages/cl-sphinx-search.html
index ad5bc54..8b0e461 100644
--- a/doc/pages/cl-sphinx-search.html
+++ b/doc/pages/cl-sphinx-search.html
@@ -1,26 +1,34 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package cl-sphinx-search</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
Package
- cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> </div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
+ cl-sphinx-search</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> <br><br> <br><br></div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><a href="#d0d0e0e0e0e4" style="font-weight: bold">Acknowledgements</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
From the site:<br><br> <pre>
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.<br><br>
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).<br><br>
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
(let ((sph (make-instance 'sphinx-client)))
(add-query sph "test")
(run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
There is just one class:<br><br> <div class="def"><a href="cl-sphinx-search__class__sphinx-client.html">
Class
sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="cl-sphinx-search__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
Setting options/parameters:<br><br> <div class="def"><a href="cl-sphinx-search__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="cl-sphinx-search__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="cl-sphinx-search__fun__set-limits.html#details">...</a></div><br><br><br>
- Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__get-last-error.html"><tt>get-last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__get-last-warning.html"><tt>get-last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
+ Running queries:<br><br> <div class="def"><a href="cl-sphinx-search__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="cl-sphinx-search__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="cl-sphinx-search__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="cl-sphinx-search__fun__run-queries.html#details">...</a></div><br><br><br> <h2><a name="d0d0e0e0e0e4"></a>Acknowledgements</h2>
+ This port is based on Sphinx.pm version 0.22 (deployed to CPAN <a href="http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/">here</a>), which
+ itself says:<br><br> <pre>
+ This module is based on Sphinx.pm (not deployed to CPAN) for
+ Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
+ based on the Sphinx PHP API. </pre><br><br>
+ Also used was the api for python which was supplied with the source code download for Sphinx Search v0.9.9-rc2, in the <tt>api/</tt> directory.<br><br> <b>Documentation</b><br><br>
+ This documentation was generated by atdoc, the documentation generation
+ system written by David Lichteblau and found <a href="http://www.lichteblau.com/atdoc/doc/">here</a>.<br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__class__sphinx-client.html b/doc/pages/cl-sphinx-search__class__sphinx-client.html
index e4757ba..ac3b51e 100644
--- a/doc/pages/cl-sphinx-search__class__sphinx-client.html
+++ b/doc/pages/cl-sphinx-search__class__sphinx-client.html
@@ -1,15 +1,18 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Class sphinx-client</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Class sphinx-client</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Superclasses</h3><div class="indent"><tt style="color: #777777">common-lisp:standard-object</tt>, <tt style="color: #777777">sb-pcl::slot-object</tt>, <tt style="color: #777777">common-lisp:t</tt></div><h3>Documented Subclasses</h3><div class="indent">
None
- </div><h3>Details<a name="details"></a></h3><div class="indent">The sphinx-search class.<br><br>
+ </div><h3>Details<a name="details"></a></h3><div class="indent">The sphinx-search class.<br><br> <pre>
+ (let ((sph (make-instance 'sphinx-client :host "localhost" :port 3315)))
+ (add-query sph "test")
+ (run-queries sph)) </pre><br><br>
The interface to the search daemon goes through this class.<br><br>
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling <a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a>, or add a number of queries using <a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a> and then calling <a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a>.<br><br>
- Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="cl-sphinx-search__fun__get-last-error.html"><tt>get-last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__get-last-error.html"><tt>get-last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__get-last-warning.html"><tt>get-last-warning</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-error.html"><tt>last-error</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="cl-sphinx-search__fun__last-warning.html"><tt>last-warning</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___encoding.html b/doc/pages/cl-sphinx-search__fun___encoding.html
new file mode 100644
index 0000000..04f45aa
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun___encoding.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %encoding</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Function
+ %encoding</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%encoding</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___get-response.html b/doc/pages/cl-sphinx-search__fun___get-response.html
index cda9572..f8a8efd 100644
--- a/doc/pages/cl-sphinx-search__fun___get-response.html
+++ b/doc/pages/cl-sphinx-search__fun___get-response.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-response</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- %get-response</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-response</tt> (<b>client</b>Â <b>&key</b>Â <b>fp</b>Â <b>client-version</b>)</div><p style="color: red; font-weight: bold">
+ %get-response</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-response</tt> (<b>client</b>Â <b>&key</b>Â <b>client-version</b>)</div><p style="color: red; font-weight: bold">
No documentation string. Possibly unimplemented or incomplete.
</p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___read-from.html b/doc/pages/cl-sphinx-search__fun___read-from.html
index 93a9469..4796627 100644
--- a/doc/pages/cl-sphinx-search__fun___read-from.html
+++ b/doc/pages/cl-sphinx-search__fun___read-from.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %read-from</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- %read-from</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%read-from</tt> (<b>socket</b>Â <b>size</b>)</div><p style="color: red; font-weight: bold">
+ %read-from</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%read-from</tt> (<b>client</b>Â <b>size</b>)</div><p style="color: red; font-weight: bold">
No documentation string. Possibly unimplemented or incomplete.
</p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun___send.html b/doc/pages/cl-sphinx-search__fun___send.html
index 427634f..fbea323 100644
--- a/doc/pages/cl-sphinx-search__fun___send.html
+++ b/doc/pages/cl-sphinx-search__fun___send.html
@@ -1,10 +1,10 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %send</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- %send</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%send</tt> (<b>client</b>Â <b>&key</b>Â <b>fp</b>Â <b>data</b>)</div><p style="color: red; font-weight: bold">
+ %send</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%send</tt> (<b>client</b>Â <b>data</b>)</div><p style="color: red; font-weight: bold">
No documentation string. Possibly unimplemented or incomplete.
</p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__define-constant.html b/doc/pages/cl-sphinx-search__fun__define-constant.html
new file mode 100644
index 0000000..20bb6ff
--- /dev/null
+++ b/doc/pages/cl-sphinx-search__fun__define-constant.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Macro define-constant</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
+ Macro
+ define-constant</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>define-constant</tt> (<b>name</b>Â <b>value</b>Â <b>&optional</b>Â <b>doc</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__get-last-error.html b/doc/pages/cl-sphinx-search__fun__get-last-error.html
deleted file mode 100644
index 26f4930..0000000
--- a/doc/pages/cl-sphinx-search__fun__get-last-error.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function get-last-error</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- get-last-error</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>get-last-error</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last error message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
- Get the last error message sent by searchd</div></div></td><td valign="top" width="5%">
- Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__get-last-warning.html b/doc/pages/cl-sphinx-search__fun__get-last-warning.html
deleted file mode 100644
index 70d5b5c..0000000
--- a/doc/pages/cl-sphinx-search__fun__get-last-warning.html
+++ /dev/null
@@ -1,11 +0,0 @@
-<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
-<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function get-last-warning</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
- Â Â
- <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
- Package:
- <a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
- Function
- get-last-warning</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>get-last-warning</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last warning message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
- Get the last warning message sent by searchd</div></div></td><td valign="top" width="5%">
- Â
- </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__last-error.html b/doc/pages/cl-sphinx-search__fun__last-error.html
index f82cbf3..270337d 100644
--- a/doc/pages/cl-sphinx-search__fun__last-error.html
+++ b/doc/pages/cl-sphinx-search__fun__last-error.html
@@ -1,10 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-error</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- last-error</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-error</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
+ last-error</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-error</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last error message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last error message sent by searchd</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/cl-sphinx-search__fun__last-warning.html b/doc/pages/cl-sphinx-search__fun__last-warning.html
index 67ce588..db58358 100644
--- a/doc/pages/cl-sphinx-search__fun__last-warning.html
+++ b/doc/pages/cl-sphinx-search__fun__last-warning.html
@@ -1,10 +1,11 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-warning</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
 Â
<b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
Package:
<a href="cl-sphinx-search.html">cl-sphinx-search</a></p><h2 class="page-title">
Function
- last-warning</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-warning</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
- No documentation string. Possibly unimplemented or incomplete.
- </p></div></div></body></html>
\ No newline at end of file
+ last-warning</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-warning</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last warning message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last warning message sent by searchd</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="cl-sphinx-search__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/package.lisp b/package.lisp
index 7f41e89..11ddaee 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,70 +1,95 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
+
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
- #:get-last-error
- #:get-last-warning)
+ #:last-error
+ #:last-warning)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
+
+ @begin[Acknowledgements]{section}
+ This port is based on Sphinx.pm version 0.22 (deployed to CPAN
+ @a[http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/]{here}), which
+ itself says:
+
+ @begin{pre}
+ This module is based on Sphinx.pm (not deployed to CPAN) for
+ Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
+ based on the Sphinx PHP API.
+ @end{pre}
+
+ Also used was the api for python which was supplied with the source code
+ download for Sphinx Search v0.9.9-rc2, in the @code{api/} directory.
+
+ @b{Documentation}
+
+ This documentation was generated by atdoc, the documentation generation
+ system written by David Lichteblau and found
+ @a[http://www.lichteblau.com/atdoc/doc/]{here}.
+
+ @end{section}
+
+
"))
|
thijs/cl-sphinx-search
|
6e8345a0c35cc95be824931502563ddde80c7b35
|
Added online doc location on thijso.com
|
diff --git a/README.md b/README.md
index 1982072..98569b3 100644
--- a/README.md
+++ b/README.md
@@ -1,45 +1,45 @@
Common Lisp Sphinx Search API client
===========
-See docs in `doc/`
+See docs in `doc/` or online [here](http://thijso.com/cl-spinx-search/doc/index.html).
### UNFINISHED & UNTESTED
This is a rather blunt port of the perl API found on CPAN, at
[Sphinx-Search-0.22](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/).
It is mostly unfinished (the only done parts are descibed in the
documentation found in `doc/`), and most definitely **untested**. So
use at your own risk.
### SPHINX VERSION COMPATIBILITY
This version of cl-sphinx-search is for Sphinx 0.9.9-rc2 and later.
Please note that this version of cl-sphinx-search is guaranteed
**not** to work with incompatible versions of Sphinx.
### ACKNOWLEDGEMENTS
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
[here](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/)), which
itself says:
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the `api/` directory.
### COPYRIGHT & LICENSE
Copyright 2009 M.L. Oppermann, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the MIT license, see the file `LICENSE`, and found
[here](http://www.opensource.org/licenses/mit-license.php).
|
thijs/cl-sphinx-search
|
36bf0b3f2aec05a62caf0b30252b171426231f17
|
Added compatibility notice to README
|
diff --git a/README.md b/README.md
index 52edd91..1982072 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,45 @@
Common Lisp Sphinx Search API client
===========
See docs in `doc/`
### UNFINISHED & UNTESTED
This is a rather blunt port of the perl API found on CPAN, at
[Sphinx-Search-0.22](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/).
It is mostly unfinished (the only done parts are descibed in the
documentation found in `doc/`), and most definitely **untested**. So
use at your own risk.
+### SPHINX VERSION COMPATIBILITY
+
+This version of cl-sphinx-search is for Sphinx 0.9.9-rc2 and later.
+
+Please note that this version of cl-sphinx-search is guaranteed
+**not** to work with incompatible versions of Sphinx.
+
+
### ACKNOWLEDGEMENTS
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
[here](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/)), which
itself says:
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the `api/` directory.
### COPYRIGHT & LICENSE
Copyright 2009 M.L. Oppermann, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the MIT license, see the file `LICENSE`, and found
[here](http://www.opensource.org/licenses/mit-license.php).
|
thijs/cl-sphinx-search
|
8d68f8ac9aa75eebf882c0dd9a803175e62ab35b
|
Encoding & cleanup socket passing
|
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index c502f20..4678644 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,770 +1,781 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
+ (%encoding
+ :accessor %encoding
+ :initarg :encoding
+ :initform :utf-8
+ :documentation "the encoding used; utf-8 or latin-1 for sbcs")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
+ @begin{pre}
+ (let ((sph (make-instance 'sphinx-client :host \"localhost\" :port 3315)))
+ (add-query sph \"test\")
+ (run-queries sph))
+ @end{pre}
+
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{get-last-error} function.
@see{set-server}
@see{set-limits}
@see{get-last-warning}
"))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric get-last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}}
Get the last error message sent by searchd
"))
(defmethod get-last-error ((client sphinx-client))
(last-error client))
(defgeneric get-last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}}
Get the last warning message sent by searchd
"))
(defmethod get-last-warning ((client sphinx-client))
(last-warning client))
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
- (let ((fp (%connect client)))
- (when fp
- (%send client :fp fp :data data)
- (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
- #+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
- (when response
- (setf *response-length* (length response))
- (%parse-response response (length (reqs client))))))))))
+ (when (%connect client)
+ (%send client data)
+ (let ((response (%get-response client :client-version +ver-command-search+)))
+ #+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
+ (when response
+ (setf *response-length* (length response))
+ (%parse-response response (length (reqs client)))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
- (pack "N/a*" query)
+ (pack "N/a*" (octets-to-string (string-to-octets query :encoding (%encoding client)) :encoding :latin-1))
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
- #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
+ #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req :encoding (%encoding client)))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
- (let ((v (unpack "N*" (%read-from (%socket client) 4))))
+ (let ((v (unpack "N*" (%read-from client 4))))
(if (< v 1)
(progn
(close (%socket client))
- (setf (last-error client) "connection to socket failed"))
+ (setf (last-error client) "connection to socket failed")
+ ())
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
-(defun %read-from (socket size)
- (let ((rec (sockets:receive-from socket :size size)))
+(defmethod %read-from ((client sphinx-client) size)
+ (let ((rec (sockets:receive-from (%socket client) :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
-(defmethod %get-response ((client sphinx-client) &key fp client-version)
- (multiple-value-bind (status version len) (unpack "n2N" (%read-from fp 8))
+(defmethod %get-response ((client sphinx-client) &key client-version)
+ (multiple-value-bind (status version len) (unpack "n2N" (%read-from client 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
- (let ((chunk (%read-from fp left)))
+ (let ((chunk (%read-from client left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
- (close fp)
+ (close (%socket client))
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
-(defmethod %send ((client sphinx-client) &key fp data)
- #+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" fp)
+(defmethod %send ((client sphinx-client) data)
+ #+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
- (sockets:send-to fp (string-to-octets data :encoding :latin-1)))
+ (sockets:send-to (%socket client) (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(declare (ignore k))
(concatenate 'string
(pack "N/a*" (gethash 'attr entry))
(pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (gethash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (gethash 'min filter))
(pack "q>" (gethash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (gethash 'min filter))
(%pack-float (gethash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (gethash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
23486b8a1e8866a9101e250c3e8c26eabf11474f
|
Fix small typo in README
|
diff --git a/README.md b/README.md
index 9ae52da..52edd91 100644
--- a/README.md
+++ b/README.md
@@ -1,37 +1,37 @@
Common Lisp Sphinx Search API client
===========
-See docs in `docs/`
+See docs in `doc/`
### UNFINISHED & UNTESTED
This is a rather blunt port of the perl API found on CPAN, at
[Sphinx-Search-0.22](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/).
It is mostly unfinished (the only done parts are descibed in the
-documentation found in `docs/`), and most definitely **untested**. So
+documentation found in `doc/`), and most definitely **untested**. So
use at your own risk.
### ACKNOWLEDGEMENTS
This port is based on Sphinx.pm version 0.22 (deployed to CPAN
[here](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/)), which
itself says:
This module is based on Sphinx.pm (not deployed to CPAN) for
Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
based on the Sphinx PHP API.
Also used was the api for python which was supplied with the source code
download for Sphinx Search v0.9.9-rc2, in the `api/` directory.
### COPYRIGHT & LICENSE
Copyright 2009 M.L. Oppermann, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the MIT license, see the file `LICENSE`, and found
[here](http://www.opensource.org/licenses/mit-license.php).
|
thijs/cl-sphinx-search
|
dc987f2c58f449d8ba700eac445cab515ab5b90b
|
Stop SBCL from throwing errors for defconstant of a list
|
diff --git a/constants.lisp b/constants.lisp
index f07f140..c974ad9 100644
--- a/constants.lisp
+++ b/constants.lisp
@@ -1,93 +1,97 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
;; known searchd commands
(defconstant +searchd-command-search+ 0)
(defconstant +searchd-command-excerpt+ 1)
(defconstant +searchd-command-update+ 2)
(defconstant +searchd-command-keywords+ 3)
(defconstant +searchd-command-persist+ 4)
;; current client-side command implementation versions
(defconstant +ver-command-search+ #x116)
(defconstant +ver-command-excerpt+ #x100)
(defconstant +ver-command-update+ #x101)
(defconstant +ver-command-keywords+ #x100)
;; known searchd status codes
(defconstant +searchd-ok+ 0)
(defconstant +searchd-error+ 1)
(defconstant +searchd-retry+ 2)
(defconstant +searchd-warning+ 3)
;; known match modes
(defconstant +sph-match-all+ 0)
(defconstant +sph-match-any+ 1)
(defconstant +sph-match-phrase+ 2)
(defconstant +sph-match-boolean+ 3)
(defconstant +sph-match-extended+ 4)
(defconstant +sph-match-fullscan+ 5)
(defconstant +sph-match-extended2+ 6)
;; known ranking modes (extended2 mode only)
(defconstant +sph-rank-proximity-bm25+ 0) ;; default mode, phrase proximity major factor and BM25 minor one
(defconstant +sph-rank-bm25+ 1) ;; statistical mode, BM25 ranking only (faster but worse quality)
(defconstant +sph-rank-none+ 2) ;; no ranking, all matches get a weight of 1
(defconstant +sph-rank-wordcount+ 3) ;; simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
;; known sort modes
(defconstant +sph-sort-relevance+ 0)
(defconstant +sph-sort-attr-desc+ 1)
(defconstant +sph-sort-attr-asc+ 2)
(defconstant +sph-sort-time-segments+ 3)
(defconstant +sph-sort-extended+ 4)
(defconstant +sph-sort-expr+ 5)
;; known filter types
(defconstant +sph-filter-values+ 0)
(defconstant +sph-filter-range+ 1)
(defconstant +sph-filter-floatrange+ 2)
;; known attribute types
(defconstant +sph-attr-none+ 0)
(defconstant +sph-attr-integer+ 1)
(defconstant +sph-attr-timestamp+ 2)
(defconstant +sph-attr-ordinal+ 3)
(defconstant +sph-attr-bool+ 4)
(defconstant +sph-attr-float+ 5)
(defconstant +sph-attr-bigint+ 6)
;; SPH_ATTR_MULTI = 0X40000000L
(defconstant +sph-attr-multi+ #x40000000)
;; SPH_ATTR_TYPES = (SPH_ATTR_NONE,
;; SPH_ATTR_INTEGER,
;; SPH_ATTR_TIMESTAMP,
;; SPH_ATTR_ORDINAL,
;; SPH_ATTR_BOOL,
;; SPH_ATTR_FLOAT,
;; SPH_ATTR_BIGINT,
;; SPH_ATTR_MULTI)
-(defconstant +sph-attr-types+ (list +sph-attr-none+
- +sph-attr-integer+
- +sph-attr-timestamp+
- +sph-attr-ordinal+
- +sph-attr-bool+
- +sph-attr-float+
- +sph-attr-bigint+
- +sph-attr-multi+))
+(defmacro define-constant (name value &optional doc)
+ `(defconstant ,name (if (boundp ',name) (symbol-value ',name) ,value)
+ ,@(when doc (list doc))))
+
+(define-constant +sph-attr-types+ (list +sph-attr-none+
+ +sph-attr-integer+
+ +sph-attr-timestamp+
+ +sph-attr-ordinal+
+ +sph-attr-bool+
+ +sph-attr-float+
+ +sph-attr-bigint+
+ +sph-attr-multi+))
;; known grouping functions
(defconstant +sph-groupby-day+ 0)
(defconstant +sph-groupby-week+ 1)
(defconstant +sph-groupby-month+ 2)
(defconstant +sph-groupby-year+ 3)
(defconstant +sph-groupby-attr+ 4)
(defconstant +sph-groupby-attrpair+ 5)
|
thijs/cl-sphinx-search
|
36e53819b25cc73d8a04b09d653086e598c9be36
|
Add licensing text to all source code files
|
diff --git a/cl-sphinx-search-test.asd b/cl-sphinx-search-test.asd
index 2986c79..6e55793 100644
--- a/cl-sphinx-search-test.asd
+++ b/cl-sphinx-search-test.asd
@@ -1,34 +1,35 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package :cl-user)
(defpackage #:cl-sphinx-search-test-asd
(:use :cl :asdf))
(asdf:operate 'asdf:load-op :fiveam)
(asdf:operate 'asdf:load-op :alexandria)
(asdf:operate 'asdf:load-op :iolib.sockets)
(in-package :cl-sphinx-search-test-asd)
(defsystem #:cl-sphinx-search-test
:name "CL-SPHINX-SEARCH-TEST"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Test suite for CL-SPHINX-SEARCH"
:long-description "this is the test suite system for CL-SPHINX-SEARCH"
:serial t
:components ((:module "test"
:serial t
:components ((:file "package")
(:file "echo-server")
(:file "test"))))
:depends-on (:iolib.sockets
:cl-pack
:alexandria
:babel
:cl-sphinx-search))
diff --git a/cl-sphinx-search.asd b/cl-sphinx-search.asd
index 50ad6c1..08f1386 100644
--- a/cl-sphinx-search.asd
+++ b/cl-sphinx-search.asd
@@ -1,28 +1,29 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search-asd
(:use :cl :asdf))
(asdf:operate 'asdf:load-op :ieee-floats)
(asdf:operate 'asdf:load-op :cl-pack)
(in-package #:cl-sphinx-search-asd)
(defsystem #:cl-sphinx-search
:name "CL-SPHINX-SEARCH"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description ""
:long-description "CL-SPHINX-SEARCH is the Common Lisp connection layer to Sphinx Search <http://sphinxsearch.com/>"
:serial t
:components ((:file "package")
(:file "constants")
(:file "cl-sphinx-search"))
:depends-on (:iolib.sockets
:cl-pack
:babel))
diff --git a/cl-sphinx-search.lisp b/cl-sphinx-search.lisp
index 07ad6c0..c502f20 100644
--- a/cl-sphinx-search.lisp
+++ b/cl-sphinx-search.lisp
@@ -1,513 +1,514 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "list of requests for batched query runs"))
(:documentation
"@short{The sphinx-search class.}
The interface to the search daemon goes through this class.
Set options and settings of the search to be performed on an object
of this class, and then have it perform one search by calling
@fun{query}, or add a number of queries using @fun{add-query} and
then calling @fun{run-queries}.
Either get a result hash or a list of result hashes back, or an error
that can be retrieved with the @fun{get-last-error} function.
@see{set-server}
@see{set-limits}
@see{get-last-warning}
"))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defgeneric set-server (client &key host port path)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[host]{the host to connect to when using an INET socket}
@arg[port]{the port to connect to when using an INET socket}
@arg[path]{the path to the unix domain socket when not using INET}
@return{client}
@short{Set the server host:port or path to connect to.}
@begin{pre}
(set-server client :host host :port port)
(set-server client :path unix-path)
@end{pre}
In the first form, sets the @code{host} (string) and @code{port} (integer)
details for the searchd server using a network (INET) socket.
In the second form, where @code{unix-path} is a local filesystem path
(optionally prefixed by 'unix://'), sets the client to access the
searchd server via a local (UNIX domain) socket at the specified path.
"))
(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
(defgeneric set-limits (client &key offset limit max cutoff)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[offset]{the offset to start returning matches from}
@arg[limit]{how many matches to return starting from @code{offset}}
@arg[max]{maximum number of matches to return}
@arg[cutoff]{the cutoff to stop searching at}
@return{client}
@short{Set the offset, limit, cutoff and max matches to return.}
@begin{pre}
(set-limits client :limit limit)
(set-limits client :offset offset :limit limit)
(set-limits client :offset offset :limit limit :max max-matches)
@end{pre}
Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
"))
(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
(defgeneric get-last-error (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last error message returned from the @code{searchd}}
Get the last error message sent by searchd
"))
(defmethod get-last-error ((client sphinx-client))
(last-error client))
(defgeneric get-last-warning (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{a string; the last warning message returned from the @code{searchd}}
Get the last warning message sent by searchd
"))
(defmethod get-last-warning ((client sphinx-client))
(last-warning client))
(defgeneric query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{nil or a hash containing the query results}
@short{Run a query through @code{searchd}.}
@begin{pre}
(query client \"test\")
@end{pre}
Query @code{searchd}. This method runs a single query through @code{searchd}.
It returns the results in a hash with the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{add-query}
@see{run-queries}
"))
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
(defgeneric run-queries (client)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@return{nil or a list of hashes}
@short{Run the queries added with @code{add-query} through @code{searchd}.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\")
(run-queries client)
@end{pre}
Query @code{searchd} with the collected queries added with @code{add-query}.
It returns a list of hashes containing the result of each query. Each hash
has the following keys:
@begin{dl}
@dt[attributes]{a hash-table containing attributes}
@dt[fields]{a list of fields}
@dt[matches]{a hash-table containing the matches}
@dt[status]{the status returned by @code{searchd}}
@dt[status-message]{the status message returned by @code{searchd}}
@dt[time]{the time @code{searchd} took for the query}
@dt[total]{the total matches returned}
@dt[total-found]{the total number of matches found}
@dt[words]{a hash-table containing the matching words with their statistics}
@end{dl}
@see{query}
@see{add-query}
"))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(let ((fp (%connect client)))
(when fp
(%send client :fp fp :data data)
(let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client))))))))))
(defgeneric add-query (client query &key index comment)
(:documentation
"@arg[client]{a @class{sphinx-client}}
@arg[query]{the query to run through @code{searchd}}
@arg[index]{the index to use; defaults to \"*\"}
@arg[comment]{a comment describing this query; default none}
@return{length of query queue}
@short{Add a query to a batch request.}
@begin{pre}
(add-query client \"test\")
(add-query client \"word\" :index \"*\")
(run-queries client)
@end{pre}
Add a query to the queue of batched queries.
Batch queries enable @code{searchd} to perform internal optimizations,
if possible; and reduce network connection overhead in all cases.
For instance, running exactly the same query with different
group-by settings will enable @code{searchd} to perform expensive
full-text search and ranking operation only once, but compute
multiple group-by results from its output.
It returns the new length of the query queue, which is also the index
of the newly added query in the queue.
@see{query}
@see{run-queries}
"))
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun %read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from fp 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from fp left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
diff --git a/constants.lisp b/constants.lisp
index 2645233..f07f140 100644
--- a/constants.lisp
+++ b/constants.lisp
@@ -1,92 +1,93 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search)
;; known searchd commands
(defconstant +searchd-command-search+ 0)
(defconstant +searchd-command-excerpt+ 1)
(defconstant +searchd-command-update+ 2)
(defconstant +searchd-command-keywords+ 3)
(defconstant +searchd-command-persist+ 4)
;; current client-side command implementation versions
(defconstant +ver-command-search+ #x116)
(defconstant +ver-command-excerpt+ #x100)
(defconstant +ver-command-update+ #x101)
(defconstant +ver-command-keywords+ #x100)
;; known searchd status codes
(defconstant +searchd-ok+ 0)
(defconstant +searchd-error+ 1)
(defconstant +searchd-retry+ 2)
(defconstant +searchd-warning+ 3)
;; known match modes
(defconstant +sph-match-all+ 0)
(defconstant +sph-match-any+ 1)
(defconstant +sph-match-phrase+ 2)
(defconstant +sph-match-boolean+ 3)
(defconstant +sph-match-extended+ 4)
(defconstant +sph-match-fullscan+ 5)
(defconstant +sph-match-extended2+ 6)
;; known ranking modes (extended2 mode only)
(defconstant +sph-rank-proximity-bm25+ 0) ;; default mode, phrase proximity major factor and BM25 minor one
(defconstant +sph-rank-bm25+ 1) ;; statistical mode, BM25 ranking only (faster but worse quality)
(defconstant +sph-rank-none+ 2) ;; no ranking, all matches get a weight of 1
(defconstant +sph-rank-wordcount+ 3) ;; simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
;; known sort modes
(defconstant +sph-sort-relevance+ 0)
(defconstant +sph-sort-attr-desc+ 1)
(defconstant +sph-sort-attr-asc+ 2)
(defconstant +sph-sort-time-segments+ 3)
(defconstant +sph-sort-extended+ 4)
(defconstant +sph-sort-expr+ 5)
;; known filter types
(defconstant +sph-filter-values+ 0)
(defconstant +sph-filter-range+ 1)
(defconstant +sph-filter-floatrange+ 2)
;; known attribute types
(defconstant +sph-attr-none+ 0)
(defconstant +sph-attr-integer+ 1)
(defconstant +sph-attr-timestamp+ 2)
(defconstant +sph-attr-ordinal+ 3)
(defconstant +sph-attr-bool+ 4)
(defconstant +sph-attr-float+ 5)
(defconstant +sph-attr-bigint+ 6)
;; SPH_ATTR_MULTI = 0X40000000L
(defconstant +sph-attr-multi+ #x40000000)
;; SPH_ATTR_TYPES = (SPH_ATTR_NONE,
;; SPH_ATTR_INTEGER,
;; SPH_ATTR_TIMESTAMP,
;; SPH_ATTR_ORDINAL,
;; SPH_ATTR_BOOL,
;; SPH_ATTR_FLOAT,
;; SPH_ATTR_BIGINT,
;; SPH_ATTR_MULTI)
(defconstant +sph-attr-types+ (list +sph-attr-none+
+sph-attr-integer+
+sph-attr-timestamp+
+sph-attr-ordinal+
+sph-attr-bool+
+sph-attr-float+
+sph-attr-bigint+
+sph-attr-multi+))
;; known grouping functions
(defconstant +sph-groupby-day+ 0)
(defconstant +sph-groupby-week+ 1)
(defconstant +sph-groupby-month+ 2)
(defconstant +sph-groupby-year+ 3)
(defconstant +sph-groupby-attr+ 4)
(defconstant +sph-groupby-attrpair+ 5)
diff --git a/package.lisp b/package.lisp
index 46f4b8f..7f41e89 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,69 +1,70 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search
(:use :cl :iolib.sockets :babel :cl-pack)
(:export #:set-server
#:set-limits
#:query
#:add-query
#:run-queries
#:get-last-error
#:get-last-warning)
(:documentation
"This package provides an interface to the search daemon (@em{searchd})
for @a[http://www.sphinxsearch.com/]{Sphinx}.
@begin[About Sphinx]{section}
From the site:
@begin{pre}
Sphinx is a full-text search engine, distributed under GPL version 2.
Commercial license is also available for embedded use.
Generally, it's a standalone search engine, meant to provide fast,
size-efficient and relevant fulltext search functions to other applications.
Sphinx was specially designed to integrate well with SQL databases and
scripting languages. Currently built-in data sources support fetching data
either via direct connection to MySQL or PostgreSQL, or using XML pipe
mechanism (a pipe to indexer in special XML-based format which Sphinx
recognizes).
As for the name, Sphinx is an acronym which is officially decoded as
SQL Phrase Index. Yes, I know about CMU's Sphinx project.
@end{pre}
@end{section}
@begin[Synopsis]{section}
@begin{pre}
(let ((sph (make-instance 'sphinx-client)))
(add-query sph \"test\")
(run-queries sph))
@end{pre}
@end{section}
@begin[One class]{section}
There is just one class:
@aboutclass{sphinx-client}
@end{section}
@begin[Methods]{section}
Setting options/parameters:
@aboutfun{set-server}
@aboutfun{set-limits}
Running queries:
@aboutfun{query}
@aboutfun{add-query}
@aboutfun{run-queries}
@end{section}
"))
diff --git a/test/echo-server.lisp b/test/echo-server.lisp
index 242def5..e0769bf 100644
--- a/test/echo-server.lisp
+++ b/test/echo-server.lisp
@@ -1,113 +1,114 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; indent-tabs-mode: nil -*-
;;;
;;; --- Multiplexer example, adapted from Juho Snellman's version for SBCL
;;; which is available at http://jsnell.iki.fi/tmp/echo-server.lisp.
;;;
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search-test)
(defparameter *port* 9999)
(defvar *event-base* nil)
(defvar *sockets* (make-hash-table))
(defvar *counter* 0)
(defun add-socket (socket)
(push socket *sockets*))
(defun remove-socket (socket)
(removef *sockets* socket))
(defun close-socket (socket)
(let ((fd (iolib.sockets:socket-os-fd socket)))
(ignore-errors (iomux:remove-fd-handlers *event-base* fd))
(remove-socket socket)
(close socket)))
(defun make-echoer (stream id disconnector)
(lambda (fd event exception)
(declare (ignore fd event exception))
(handler-case
(let ((line (read-line stream)))
(cond ((string= line "quit")
(funcall disconnector))
(t
(format t "~A: ~A~%" id line)
(sockets:send-to stream #(1 2 3 4 5 6 7 8))
(finish-output stream)
(format stream "~A: ~A~%" id line)
(with-open-file (stream "/tmp/echo-server.log" :direction :output :if-exists :append
:element-type '(unsigned-byte 8))
(write-sequence (string-to-octets line) stream))
(ignore-some-conditions (iolib.streams:hangup)
(finish-output stream)))))
(end-of-file ()
(funcall disconnector)))))
(defun make-disconnector (socket id)
(lambda ()
(format t "~A: closing~%" id)
(close-socket socket)))
(defun serve (socket id)
(iomux:set-io-handler *event-base*
(iolib.sockets:socket-os-fd socket)
:read
(make-echoer socket id
(make-disconnector socket id))))
(defun make-listener-handler (socket)
(lambda (fd event exception)
(declare (ignore fd exception))
(block nil
(when (eql :timeout event)
(warn "Got a server timeout!")
(return))
(let ((client (iolib.sockets:accept-connection socket)))
(when client
(setf (iolib.streams:fd-non-blocking client) t)
(add-socket client)
(sockets:send-to client #(80 70 60 50))
(finish-output client)
(incf *counter*)
(format t "Accepted client ~A~%" *counter*)
(serve client *counter*))))))
(defun start-echo-server (host port)
(let ((socket
(iolib.sockets:make-socket :connect :passive :address-family :internet :type :stream
:local-host host :local-port port
:backlog 5 :reuse-address t
:external-format '(:latin-1 :eol-style :crlf) :ipv6 nil)))
(setf *counter* 0
*sockets* nil)
(unwind-protect-case ()
(progn
(setf (iolib.streams:fd-non-blocking socket) t)
(add-socket socket)
(iomux:set-io-handler *event-base*
(iolib.sockets:socket-os-fd socket)
:read
(make-listener-handler socket)
:timeout 15))
(:abort (close socket)))
socket))
(defun close-all-sockets ()
(map 'nil #'close-socket *sockets*))
(defun run-server (&key (host iolib.sockets:+ipv4-unspecified+)
(port *port*) (new-process t) (timeout 10))
(flet ((%run-server ()
(unwind-protect
(progn
(setf *event-base* (make-instance 'iomux:event-base))
(with-open-stream (sock (start-echo-server host port))
(iomux:event-dispatch *event-base* :timeout timeout)))
(close-all-sockets)
(close *event-base*))))
(let ((iolib.sockets:*ipv6* nil))
(if new-process
(bt:make-thread #'%run-server)
(%run-server)))))
diff --git a/test/package.lisp b/test/package.lisp
index 14c8034..147c98c 100644
--- a/test/package.lisp
+++ b/test/package.lisp
@@ -1,13 +1,14 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-user)
(defpackage #:cl-sphinx-search-test
(:nicknames :sphinx-search-api-test)
(:use :cl
:fiveam
:cl-pack
:babel
:iolib.sockets
:alexandria
:cl-sphinx-search))
diff --git a/test/test.lisp b/test/test.lisp
index 9bc6b05..8e2f58e 100644
--- a/test/test.lisp
+++ b/test/test.lisp
@@ -1,11 +1,12 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
+;;;; See the LICENSE file for licensing information.
(in-package #:cl-sphinx-search-test)
(5am:def-suite sphinx-search-api-tests
:description "Top-level test suite for sphinx-search-api")
(5am:in-suite sphinx-search-api-tests)
|
thijs/cl-sphinx-search
|
8e26c28808b5e3b90389ef4de7ddcca055610bcc
|
Added README for github & license file
|
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..12071ef
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,23 @@
+(This is the MIT license as taken from
+http://www.opensource.org/licenses/mit-license.php)
+
+Copyright (c) 2009 M.L. Oppermann
+
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/README b/README
deleted file mode 100644
index 9aaecc5..0000000
--- a/README
+++ /dev/null
@@ -1,6 +0,0 @@
-Common Lisp Sphinx Search API
-=============================
-
-See docs in ./docs/
-
-
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..9ae52da
--- /dev/null
+++ b/README.md
@@ -0,0 +1,37 @@
+Common Lisp Sphinx Search API client
+===========
+
+See docs in `docs/`
+
+
+### UNFINISHED & UNTESTED
+
+This is a rather blunt port of the perl API found on CPAN, at
+[Sphinx-Search-0.22](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/).
+
+It is mostly unfinished (the only done parts are descibed in the
+documentation found in `docs/`), and most definitely **untested**. So
+use at your own risk.
+
+
+### ACKNOWLEDGEMENTS
+
+This port is based on Sphinx.pm version 0.22 (deployed to CPAN
+[here](http://search.cpan.org/~jjschutz/Sphinx-Search-0.22/)), which
+itself says:
+
+ This module is based on Sphinx.pm (not deployed to CPAN) for
+ Sphinx version 0.9.7-rc1, by Len Kranendonk, which was in turn
+ based on the Sphinx PHP API.
+
+Also used was the api for python which was supplied with the source code
+download for Sphinx Search v0.9.9-rc2, in the `api/` directory.
+
+
+### COPYRIGHT & LICENSE
+
+Copyright 2009 M.L. Oppermann, all rights reserved.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the MIT license, see the file `LICENSE`, and found
+[here](http://www.opensource.org/licenses/mit-license.php).
diff --git a/perl-ex/PerlAPI.pm b/perl-ex/Sphinx.pm
similarity index 100%
rename from perl-ex/PerlAPI.pm
rename to perl-ex/Sphinx.pm
|
thijs/cl-sphinx-search
|
1c78e596697f8f6f236a9c65c6443de20f471508
|
Added docs generated by atdoc
|
diff --git a/doc/header.gif b/doc/header.gif
new file mode 100644
index 0000000..8fa7847
Binary files /dev/null and b/doc/header.gif differ
diff --git a/doc/index.css b/doc/index.css
new file mode 100644
index 0000000..e065098
--- /dev/null
+++ b/doc/index.css
@@ -0,0 +1,206 @@
+div.sidebar {
+ float: right;
+ min-width: 15%;
+ padding: 0pt 5pt 5pt 5pt;
+ font-family: verdana, arial;
+}
+
+a {
+ text-decoration: none;
+ color: black;
+ border-bottom: 1px solid #0070a0;
+}
+
+.nonlink {
+ border-bottom: 1px solid white;
+ border-top: 1px solid white;
+ border-left: 1px solid white;
+ border-right: 1px solid white;
+ padding-top: 1px;
+ padding-bottom: 1px;
+}
+
+.sidebar a {
+ border-top: 1px solid #eeeeee;
+ border-left: 1px solid #eeeeee;
+ border-right: 1px solid #eeeeee;
+}
+
+#headerlink {
+ border: none;
+}
+
+#headerlink:hover {
+ border: none;
+}
+
+body {
+ color: #000000;
+ background-color: #ffffff;
+ margin: 0 0 0 0;
+/*
+ margin-top: 2em;
+ margin-right: 20pt;
+ margin-bottom: 10%;
+ */
+ font-family: verdana, arial;
+ font-size: 8pt;
+}
+
+.main {
+ margin-top: 20px;
+ margin-left: 40px;
+}
+
+.padded {
+ padding-left: 30px;
+}
+
+.padded h1,h2 {
+ margin-left: -30px;
+}
+
+h2 {
+ color: #0070a0;
+}
+
+.page-title {
+ color: black;
+}
+
+h3 {
+ background-color: #f4f4f4;
+ padding-top: 4px;
+ padding-bottom: 4px;
+ border-bottom: 1px solid #80d8fd;
+ color: #000000;
+ width: 90%;
+ margin-top: 2em;
+ margin-left: -3px;
+ padding-left: 3px;
+ font-weight: bold;
+}
+
+h4 {
+}
+
+.grau {
+ padding-top: 1em;
+}
+
+pre {
+ background-color: #eeeeee;
+ border: solid 1px #d0d0d0;
+ padding: 1em;
+ margin-right: 10%;
+}
+
+.code {
+ border: solid 1px #d0d0d0;
+ padding: 1em;
+ margin-right: 10%;
+}
+
+.indent {
+ margin-left: 20px;
+ padding-bottom: 1em;
+ width: 88%;
+}
+
+.def {
+ padding: 1px 1px 1px 1px;
+ margin-bottom: 1px;
+ font-weight: bold;
+ margin-right: 40px;
+}
+
+.nomargin {
+ margin-bottom: 0;
+ margin-top: 0;
+}
+
+.noindent {
+ margin-left: -30px;
+ padding-bottom: 1em;
+}
+
+#header table {
+ width: 95%;
+ position: absolute;
+ bottom: 10px;
+ margin-right: 1em;
+}
+
+#header {
+ background: url(header.gif);
+ position: relative; /* so that the table is relativ to this */
+ width: 100%;
+ height: 70px;
+ font-family: verdana, arial;
+ font-size: 12pt;
+ padding-bottom: 1px;
+}
+
+#sp-package-list {
+ /* ... */
+}
+
+#sp-about-packages {
+ /* ... */
+}
+
+.sp-lambda-list {
+ width: 90%;
+ background-color: #f4f4f4;
+ padding: 3px 3px 3px 3px;
+}
+
+.sp-definition {
+ width: 90%;
+ border: 1px solid #cccccc;
+ padding: 3px 3px 3px 3px;
+}
+
+.sp-definition-body {
+ padding-left: 10%;
+ padding-bottom: 2em;
+}
+
+.sp-definition-body ul {
+ margin-top: 0;
+ margin-bottom: 0;
+}
+
+.sp-return {
+}
+
+.sph3 {
+ padding-top: 1em;
+ font-weight: bold;
+}
+
+dl {
+ width: 75em;
+ margin: .6em 0 1em;
+ padding: 0;
+}
+
+dt {
+ display: list-item;
+ width: 12em;
+ float: left;
+ margin: 0 0 0 2em;
+ padding: .25em;
+ font-weight: bold;
+}
+
+/* commented backslash hack for mac-ie5 \*/
+dt { clear: both; }
+/* end hack */
+
+dd {
+ float: left;
+ width: 50em;
+ margin: 0 0 0 0;
+ padding: .25em;
+}
diff --git a/doc/index.html b/doc/index.html
new file mode 100644
index 0000000..7f139d0
--- /dev/null
+++ b/doc/index.html
@@ -0,0 +1,8 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Sphinx Search API reference</title><link rel="stylesheet" type="text/css" href="index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded">
+ Index of packages:
+ </div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><h2 class="page-title"><a href="pages/com.oppermannen.sphinx-search-api.html">
+ Package
+ com.oppermannen.sphinx-search-api</a></h2><div style="left: 100px"><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> </div><div class="indent"><p><i>About this package:</i></p><ul><li><a href="pages/com.oppermannen.sphinx-search-api.html#d0d0e0e0e0e0">About Sphinx</a></li><li><a href="pages/com.oppermannen.sphinx-search-api.html#d0d0e0e0e0e1">Synopsis</a></li><li><a href="pages/com.oppermannen.sphinx-search-api.html#d0d0e0e0e0e2">One class</a></li><li><a href="pages/com.oppermannen.sphinx-search-api.html#d0d0e0e0e0e3">Methods</a></li></ul></div></div></div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__get-last-error.html"><tt>get-last-error</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__get-last-warning.html"><tt>get-last-warning</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top" align="right" nowrap><span class="nonlink"><tt><span style="color: #777777">com.oppermannen.sphinx-search-api:</span></tt></span></td><td valign="top"><a href="pages/com.oppermannen.sphinx-search-api__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api.html b/doc/pages/com.oppermannen.sphinx-search-api.html
new file mode 100644
index 0000000..f7e0599
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Package com.oppermannen.sphinx-search-api</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><h1>
+ Package
+ com.oppermannen.sphinx-search-api</h1><div class="indent">This package provides an interface to the search daemon (<i>searchd</i>) for <a href="http://www.sphinxsearch.com/">Sphinx</a>.<br><br> <br><br> <br><br> <br><br> </div></div><table cellspacing="0" cellpadding="0"><tr><td valign="top" width="60%"><div class="padded"><div style="margin-left: -30px"><h3>About This Package</h3></div><a href="#d0d0e0e0e0e0" style="font-weight: bold">About Sphinx</a><br><a href="#d0d0e0e0e0e1" style="font-weight: bold">Synopsis</a><br><a href="#d0d0e0e0e0e2" style="font-weight: bold">One class</a><br><a href="#d0d0e0e0e0e3" style="font-weight: bold">Methods</a><br><br><h2><a name="d0d0e0e0e0e0"></a>About Sphinx</h2><br><br>
+ From the site:<br><br> <pre>
+ Sphinx is a full-text search engine, distributed under GPL version 2.
+ Commercial license is also available for embedded use.<br><br>
+ Generally, it's a standalone search engine, meant to provide fast,
+ size-efficient and relevant fulltext search functions to other applications.
+ Sphinx was specially designed to integrate well with SQL databases and
+ scripting languages. Currently built-in data sources support fetching data
+ either via direct connection to MySQL or PostgreSQL, or using XML pipe
+ mechanism (a pipe to indexer in special XML-based format which Sphinx
+ recognizes).<br><br>
+ As for the name, Sphinx is an acronym which is officially decoded as
+ SQL Phrase Index. Yes, I know about CMU's Sphinx project. </pre> <h2><a name="d0d0e0e0e0e1"></a>Synopsis</h2> <pre><br><br>
+ (let ((sph (make-instance 'sphinx-client)))
+ (add-query sph "test")
+ (run-queries sph))<br><br> </pre> <h2><a name="d0d0e0e0e0e2"></a>One class</h2>
+ There is just one class:<br><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html">
+ Class
+ sphinx-client</a></div><div style="margin-left: 3em">The sphinx-search class. <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html#details">...</a></div><br> <h2><a name="d0d0e0e0e0e3"></a>Methods</h2>
+ Setting options/parameters:<br><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__fun__set-server.html">Function set-server (client &key host port path)</a></div><div style="margin-left: 3em">Set the server host:port or path to connect to. <a href="com.oppermannen.sphinx-search-api__fun__set-server.html#details">...</a></div><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__fun__set-limits.html">Function set-limits (client &key offset limit max cutoff)</a></div><div style="margin-left: 3em">Set the offset, limit, cutoff and max matches to return. <a href="com.oppermannen.sphinx-search-api__fun__set-limits.html#details">...</a></div><br><br><br>
+ Running queries:<br><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__fun__query.html">Function query (client query &key index comment)</a></div><div style="margin-left: 3em">Run a query through <tt>searchd</tt>. <a href="com.oppermannen.sphinx-search-api__fun__query.html#details">...</a></div><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__fun__add-query.html">Function add-query (client query &key index comment)</a></div><div style="margin-left: 3em">Add a query to a batch request. <a href="com.oppermannen.sphinx-search-api__fun__add-query.html#details">...</a></div><br> <div class="def"><a href="com.oppermannen.sphinx-search-api__fun__run-queries.html">Function run-queries (client)</a></div><div style="margin-left: 3em">Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>. <a href="com.oppermannen.sphinx-search-api__fun__run-queries.html#details">...</a></div><br><br><br> </div></td><td valign="top"><h3><a name="index"></a>Exported Symbol Index</h3><table cellspacing="0" cellpadding="0"><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__get-last-error.html"><tt>get-last-error</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__get-last-warning.html"><tt>get-last-warning</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__set-limits.html"><tt>set-limits</tt></a>, function</td></tr><tr><td valign="top"><a href="com.oppermannen.sphinx-search-api__fun__set-server.html"><tt>set-server</tt></a>, function</td></tr></table></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__class__sphinx-client.html b/doc/pages/com.oppermannen.sphinx-search-api__class__sphinx-client.html
new file mode 100644
index 0000000..4d2d849
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__class__sphinx-client.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Class sphinx-client</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Class sphinx-client</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Superclasses</h3><div class="indent"><tt style="color: #777777">common-lisp:standard-object</tt>, <tt style="color: #777777">sb-pcl::slot-object</tt>, <tt style="color: #777777">common-lisp:t</tt></div><h3>Documented Subclasses</h3><div class="indent">
+ None
+ </div><h3>Details<a name="details"></a></h3><div class="indent">The sphinx-search class.<br><br>
+ The interface to the search daemon goes through this class.<br><br>
+ Set options and settings of the search to be performed on an object
+ of this class, and then have it perform one search by calling <a href="com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a>, or add a number of queries using <a href="com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a> and then calling <a href="com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a>.<br><br>
+ Either get a result hash or a list of result hashes back, or an error that can be retrieved with the <a href="com.oppermannen.sphinx-search-api__fun__get-last-error.html"><tt>get-last-error</tt></a> function.<br><br> </div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__get-last-error.html"><tt>get-last-error</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__set-server.html"><tt>set-server</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__set-limits.html"><tt>set-limits</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__get-last-warning.html"><tt>get-last-warning</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___connect.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___connect.html
new file mode 100644
index 0000000..2c86fdb
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___connect.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %connect</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %connect</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%connect</tt> (<b>client</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___get-attributes.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-attributes.html
new file mode 100644
index 0000000..ae40e89
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-attributes.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-attributes</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %get-attributes</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-attributes</tt> (<b>response</b>Â <b>start</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___get-fields.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-fields.html
new file mode 100644
index 0000000..76c6517
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-fields.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-fields</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %get-fields</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-fields</tt> (<b>response</b>Â <b>start</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___get-matches.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-matches.html
new file mode 100644
index 0000000..95e82de
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-matches.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-matches</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %get-matches</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-matches</tt> (<b>response</b>Â <b>attribute-names</b>Â <b>attributes</b>Â <b>start</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response-status.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response-status.html
new file mode 100644
index 0000000..7c37d41
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response-status.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-response-status</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %get-response-status</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-response-status</tt> (<b>response</b>Â <b>start</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response.html
new file mode 100644
index 0000000..703b545
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___get-response.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %get-response</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %get-response</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%get-response</tt> (<b>client</b>Â <b>&key</b>Â <b>fp</b>Â <b>client-version</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___host.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___host.html
new file mode 100644
index 0000000..4da60f6
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___host.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %host</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %host</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%host</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-array-signed-quads.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-array-signed-quads.html
new file mode 100644
index 0000000..95fa8fc
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-array-signed-quads.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-array-signed-quads</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %pack-array-signed-quads</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-array-signed-quads</tt> (<b>values-list</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-filters.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-filters.html
new file mode 100644
index 0000000..833156e
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-filters.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-filters</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %pack-filters</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-filters</tt> (<b>filters</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-float.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-float.html
new file mode 100644
index 0000000..10b4c21
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-float.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-float</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %pack-float</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-float</tt> (<b>float-value</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-hash.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-hash.html
new file mode 100644
index 0000000..c07117b
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-hash.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-hash</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %pack-hash</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-hash</tt> (<b>hash-table</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-overrides.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-overrides.html
new file mode 100644
index 0000000..fbeefa1
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___pack-overrides.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %pack-overrides</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %pack-overrides</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%pack-overrides</tt> (<b>overrides</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___parse-response.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___parse-response.html
new file mode 100644
index 0000000..a841c0d
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___parse-response.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %parse-response</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %parse-response</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%parse-response</tt> (<b>response</b>Â <b>n-requests</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___path.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___path.html
new file mode 100644
index 0000000..34dbd87
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___path.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %path</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %path</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%path</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___port.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___port.html
new file mode 100644
index 0000000..994749a
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___port.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %port</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %port</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%port</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___read-from.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___read-from.html
new file mode 100644
index 0000000..4800877
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___read-from.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %read-from</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %read-from</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%read-from</tt> (<b>socket</b>Â <b>size</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___send.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___send.html
new file mode 100644
index 0000000..22f004b
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___send.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %send</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %send</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%send</tt> (<b>client</b>Â <b>&key</b>Â <b>fp</b>Â <b>data</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun___socket.html b/doc/pages/com.oppermannen.sphinx-search-api__fun___socket.html
new file mode 100644
index 0000000..049815c
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun___socket.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function %socket</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ %socket</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>%socket</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__add-query.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__add-query.html
new file mode 100644
index 0000000..1434ad3
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__add-query.html
@@ -0,0 +1,20 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function add-query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ add-query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>add-query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">length of query queue</div><h3>Details<a name="details"></a></h3><div class="indent"> Add a query to a batch request.<br><br> <pre>
+ (add-query client "test")
+ (add-query client "word" :index "*")
+ (run-queries client) </pre><br><br>
+ Add a query to the queue of batched queries.<br><br> Batch queries enable <tt>searchd</tt> to perform internal optimizations,
+ if possible; and reduce network connection overhead in all cases.<br><br>
+ For instance, running exactly the same query with different group-by settings will enable <tt>searchd</tt> to perform expensive
+ full-text search and ranking operation only once, but compute
+ multiple group-by results from its output.<br><br>
+ It returns the new length of the query queue, which is also the index
+ of the newly added query in the queue.<br><br> <br><br></div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__adv-p.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__adv-p.html
new file mode 100644
index 0000000..5249e88
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__adv-p.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Macro adv-p</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Macro
+ adv-p</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>adv-p</tt> (<b>n</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__anchor.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__anchor.html
new file mode 100644
index 0000000..7e24373
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__anchor.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function anchor</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ anchor</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>anchor</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__cutoff.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__cutoff.html
new file mode 100644
index 0000000..3e425e3
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__cutoff.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function cutoff</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ cutoff</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>cutoff</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__field-weights.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__field-weights.html
new file mode 100644
index 0000000..fa68f2a
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__field-weights.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function field-weights</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ field-weights</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>field-weights</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__filters.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__filters.html
new file mode 100644
index 0000000..656611a
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__filters.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function filters</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ filters</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>filters</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-error.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-error.html
new file mode 100644
index 0000000..7dcb908
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-error.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function get-last-error</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ get-last-error</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>get-last-error</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last error message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last error message sent by searchd</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-warning.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-warning.html
new file mode 100644
index 0000000..6e6539c
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__get-last-warning.html
@@ -0,0 +1,11 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function get-last-warning</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ get-last-warning</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>get-last-warning</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">a string; the last warning message returned from the <tt>searchd</tt></div><h3>Details<a name="details"></a></h3><div class="indent"> <br><br>
+ Get the last warning message sent by searchd</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__group-by.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-by.html
new file mode 100644
index 0000000..6410c87
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-by.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function group-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ group-by</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>group-by</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__group-distinct.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-distinct.html
new file mode 100644
index 0000000..878ab32
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-distinct.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function group-distinct</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ group-distinct</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>group-distinct</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__group-function.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-function.html
new file mode 100644
index 0000000..a4dcf29
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-function.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function group-function</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ group-function</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>group-function</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__group-sort.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-sort.html
new file mode 100644
index 0000000..f25da98
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__group-sort.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function group-sort</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ group-sort</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>group-sort</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__index-weights.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__index-weights.html
new file mode 100644
index 0000000..f15e06c
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__index-weights.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function index-weights</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ index-weights</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>index-weights</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__last-error.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__last-error.html
new file mode 100644
index 0000000..8e21f52
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__last-error.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-error</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ last-error</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-error</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__last-warning.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__last-warning.html
new file mode 100644
index 0000000..89193e6
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__last-warning.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function last-warning</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ last-warning</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>last-warning</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__limit.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__limit.html
new file mode 100644
index 0000000..4250dd0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__limit.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function limit</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ limit</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>limit</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__max-id.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-id.html
new file mode 100644
index 0000000..68d2181
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-id.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function max-id</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ max-id</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>max-id</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__max-matches.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-matches.html
new file mode 100644
index 0000000..bda3a2f
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-matches.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function max-matches</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ max-matches</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>max-matches</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__max-query-time.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-query-time.html
new file mode 100644
index 0000000..109f7fb
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__max-query-time.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function max-query-time</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ max-query-time</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>max-query-time</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__min-id.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__min-id.html
new file mode 100644
index 0000000..726179e
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__min-id.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function min-id</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ min-id</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>min-id</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__mode.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__mode.html
new file mode 100644
index 0000000..a5bb518
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__mode.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function mode</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ mode</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>mode</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__offset.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__offset.html
new file mode 100644
index 0000000..0331383
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__offset.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function offset</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ offset</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>offset</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__overrides.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__overrides.html
new file mode 100644
index 0000000..b502ca2
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__overrides.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function overrides</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ overrides</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>overrides</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__query.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__query.html
new file mode 100644
index 0000000..87f30cb
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__query.html
@@ -0,0 +1,12 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function query</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ query</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>query</tt> (<b>client</b>Â <b>query</b>Â <b>&key</b>Â <b>index</b>Â <b>comment</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>query</tt> -- the query to run through <tt>searchd</tt></li><li><tt>index</tt> -- the index to use; defaults to "*"</li><li><tt>comment</tt> -- a comment describing this query; default none</li></ul></div><h3>Return Value</h3><div class="indent">nil or a hash containing the query results</div><h3>Details<a name="details"></a></h3><div class="indent"> Run a query through <tt>searchd</tt>.<br><br> <pre>
+ (query client "test") </pre><br><br> Query <tt>searchd</tt>. This method runs a single query through <tt>searchd</tt>.<br><br>
+ It returns the results in a hash with the following keys: <dl><dt>attributes</dt><dd> : a hash-table containing attributes</dd><dt>fields</dt><dd> : a list of fields</dd><dt>matches</dt><dd> : a hash-table containing the matches</dd><dt>status</dt><dd> : the status returned by <tt>searchd</tt></dd><dt>status-message</dt><dd> : the status message returned by <tt>searchd</tt></dd><dt>time</dt><dd> : the time <tt>searchd</tt> took for the query</dd><dt>total</dt><dd> : the total matches returned</dd><dt>total-found</dt><dd> : the total number of matches found</dd><dt>words</dt><dd> : a hash-table containing the matching words with their statistics</dd></dl><br style="clear: both;"><br><br> <br><br></div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__run-queries.html"><tt>run-queries</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__ranker.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__ranker.html
new file mode 100644
index 0000000..d2054bf
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__ranker.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function ranker</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ ranker</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>ranker</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__reqs.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__reqs.html
new file mode 100644
index 0000000..9944446
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__reqs.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function reqs</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ reqs</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>reqs</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-count.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-count.html
new file mode 100644
index 0000000..d7d5b50
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-count.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function retry-count</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ retry-count</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>retry-count</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-delay.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-delay.html
new file mode 100644
index 0000000..023fa48
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__retry-delay.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function retry-delay</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ retry-delay</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>retry-delay</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__run-queries.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__run-queries.html
new file mode 100644
index 0000000..c5118ba
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__run-queries.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function run-queries</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ run-queries</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>run-queries</tt> (<b>client</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li></ul></div><h3>Return Value</h3><div class="indent">nil or a list of hashes</div><h3>Details<a name="details"></a></h3><div class="indent"> Run the queries added with <tt>add-query</tt> through <tt>searchd</tt>.<br><br> <pre>
+ (add-query client "test")
+ (add-query client "word")
+ (run-queries client) </pre><br><br> Query <tt>searchd</tt> with the collected queries added with <tt>add-query</tt>.<br><br>
+ It returns a list of hashes containing the result of each query. Each hash
+ has the following keys: <dl><dt>attributes</dt><dd> : a hash-table containing attributes</dd><dt>fields</dt><dd> : a list of fields</dd><dt>matches</dt><dd> : a hash-table containing the matches</dd><dt>status</dt><dd> : the status returned by <tt>searchd</tt></dd><dt>status-message</dt><dd> : the status message returned by <tt>searchd</tt></dd><dt>time</dt><dd> : the time <tt>searchd</tt> took for the query</dd><dt>total</dt><dd> : the total matches returned</dd><dt>total-found</dt><dd> : the total number of matches found</dd><dt>words</dt><dd> : a hash-table containing the matching words with their statistics</dd></dl><br style="clear: both;"><br><br> <br><br></div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__query.html"><tt>query</tt></a></td></tr><tr><td><a href="com.oppermannen.sphinx-search-api__fun__add-query.html"><tt>add-query</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__select.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__select.html
new file mode 100644
index 0000000..f442f37
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__select.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function select</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ select</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>select</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__set-limits.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__set-limits.html
new file mode 100644
index 0000000..ac67bd0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__set-limits.html
@@ -0,0 +1,14 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-limits</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ set-limits</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-limits</tt> (<b>client</b>Â <b>&key</b>Â <b>offset</b>Â <b>limit</b>Â <b>max</b>Â <b>cutoff</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>offset</tt> -- the offset to start returning matches from</li><li><tt>limit</tt> -- how many matches to return starting from <tt>offset</tt></li><li><tt>max</tt> -- maximum number of matches to return</li><li><tt>cutoff</tt> -- the cutoff to stop searching at</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the offset, limit, cutoff and max matches to return.<br><br> <pre>
+ (set-limits client :limit limit)
+ (set-limits client :offset offset :limit limit)
+ (set-limits client :offset offset :limit limit :max max-matches) </pre><br><br>
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__set-server.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__set-server.html
new file mode 100644
index 0000000..9818a1a
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__set-server.html
@@ -0,0 +1,15 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function set-server</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ set-server</h2></div><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="top" width="60%"><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>set-server</tt> (<b>client</b>Â <b>&key</b>Â <b>host</b>Â <b>port</b>Â <b>path</b>)</div><h3>Arguments</h3><div class="indent"><ul><li><tt>client</tt> -- a <a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></li><li><tt>host</tt> -- the host to connect to when using an INET socket</li><li><tt>port</tt> -- the port to connect to when using an INET socket</li><li><tt>path</tt> -- the path to the unix domain socket when not using INET</li></ul></div><h3>Return Value</h3><div class="indent">client</div><h3>Details<a name="details"></a></h3><div class="indent"> Set the server host:port or path to connect to.<br><br> <pre>
+ (set-server client :host host :port port)
+ (set-server client :path unix-path) </pre><br><br> In the first form, sets the <tt>host</tt> (string) and <tt>port</tt> (integer)
+ details for the searchd server using a network (INET) socket.<br><br> In the second form, where <tt>unix-path</tt> is a local filesystem path
+ (optionally prefixed by 'unix://'), sets the client to access the
+ searchd server via a local (UNIX domain) socket at the specified path.</div></div></td><td valign="top" width="5%">
+ Â
+ </td><td valign="top" width="35%"><h3>See also</h3><div class="indent"><table cellspacing="0" cellpadding="0"><tr><td><a href="com.oppermannen.sphinx-search-api__class__sphinx-client.html"><tt>sphinx-client</tt></a></td></tr></table></div></td></tr></table></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-by.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-by.html
new file mode 100644
index 0000000..3b5424d
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-by.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function sort-by</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ sort-by</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>sort-by</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-mode.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-mode.html
new file mode 100644
index 0000000..afc7c15
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__sort-mode.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function sort-mode</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ sort-mode</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>sort-mode</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__status.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__status.html
new file mode 100644
index 0000000..29d3da7
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__status.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function status</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ status</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>status</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__fun__weights.html b/doc/pages/com.oppermannen.sphinx-search-api__fun__weights.html
new file mode 100644
index 0000000..ddb6c9a
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__fun__weights.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Function weights</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Function
+ weights</h2></div><div class="padded"><h3>Lambda List</h3><div class="indent"><tt>weights</tt> (<b>object</b>)</div><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-excerpt+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-excerpt+.html
new file mode 100644
index 0000000..4296450
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-excerpt+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-command-excerpt+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-command-excerpt+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-keywords+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-keywords+.html
new file mode 100644
index 0000000..2b43f80
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-keywords+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-command-keywords+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-command-keywords+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-persist+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-persist+.html
new file mode 100644
index 0000000..54514c6
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-persist+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-command-persist+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-command-persist+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-search+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-search+.html
new file mode 100644
index 0000000..d03b6f3
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-search+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-command-search+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-command-search+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-update+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-update+.html
new file mode 100644
index 0000000..4a265f4
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-command-update+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-command-update+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-command-update+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-error+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-error+.html
new file mode 100644
index 0000000..639d229
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-error+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-error+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-error+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-ok+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-ok+.html
new file mode 100644
index 0000000..a3dff20
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-ok+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-ok+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-ok+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-retry+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-retry+.html
new file mode 100644
index 0000000..7535663
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-retry+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-retry+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-retry+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-warning+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-warning+.html
new file mode 100644
index 0000000..7f98206
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+searchd-warning+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +searchd-warning+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +searchd-warning+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bigint+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bigint+.html
new file mode 100644
index 0000000..cb9cbb0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bigint+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-bigint+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-bigint+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bool+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bool+.html
new file mode 100644
index 0000000..77997af
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-bool+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-bool+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-bool+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-float+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-float+.html
new file mode 100644
index 0000000..6cea57e
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-float+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-float+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-float+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-integer+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-integer+.html
new file mode 100644
index 0000000..420c95b
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-integer+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-integer+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-integer+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-multi+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-multi+.html
new file mode 100644
index 0000000..480ede2
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-multi+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-multi+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-multi+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-none+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-none+.html
new file mode 100644
index 0000000..4ea379b
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-none+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-none+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-none+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-ordinal+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-ordinal+.html
new file mode 100644
index 0000000..a4bbea5
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-ordinal+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-ordinal+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-ordinal+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-timestamp+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-timestamp+.html
new file mode 100644
index 0000000..82757d2
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-timestamp+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-timestamp+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-timestamp+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-types+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-types+.html
new file mode 100644
index 0000000..07c21b0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-attr-types+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-attr-types+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-attr-types+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-floatrange+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-floatrange+.html
new file mode 100644
index 0000000..c1a2d52
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-floatrange+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-filter-floatrange+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-filter-floatrange+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-range+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-range+.html
new file mode 100644
index 0000000..4c79fa0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-range+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-filter-range+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-filter-range+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-values+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-values+.html
new file mode 100644
index 0000000..6326a1c
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-filter-values+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-filter-values+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-filter-values+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attr+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attr+.html
new file mode 100644
index 0000000..d0232ea
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attr+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-attr+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-attr+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attrpair+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attrpair+.html
new file mode 100644
index 0000000..dc8575c
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-attrpair+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-attrpair+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-attrpair+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-day+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-day+.html
new file mode 100644
index 0000000..7b0489d
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-day+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-day+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-day+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-month+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-month+.html
new file mode 100644
index 0000000..de536d8
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-month+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-month+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-month+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-week+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-week+.html
new file mode 100644
index 0000000..b2a7e60
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-week+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-week+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-week+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-year+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-year+.html
new file mode 100644
index 0000000..e717e03
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-groupby-year+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-groupby-year+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-groupby-year+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-all+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-all+.html
new file mode 100644
index 0000000..afcefa8
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-all+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-all+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-all+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-any+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-any+.html
new file mode 100644
index 0000000..d82e162
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-any+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-any+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-any+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-boolean+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-boolean+.html
new file mode 100644
index 0000000..730bbba
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-boolean+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-boolean+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-boolean+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended+.html
new file mode 100644
index 0000000..7165224
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-extended+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-extended+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended2+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended2+.html
new file mode 100644
index 0000000..6ef61a0
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-extended2+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-extended2+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-extended2+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-fullscan+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-fullscan+.html
new file mode 100644
index 0000000..0265a03
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-fullscan+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-fullscan+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-fullscan+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-phrase+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-phrase+.html
new file mode 100644
index 0000000..252ac9f
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-match-phrase+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-match-phrase+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-match-phrase+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-bm25+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-bm25+.html
new file mode 100644
index 0000000..879bbae
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-bm25+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-rank-bm25+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-rank-bm25+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-none+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-none+.html
new file mode 100644
index 0000000..c22bca2
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-none+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-rank-none+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-rank-none+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-proximity-bm25+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-proximity-bm25+.html
new file mode 100644
index 0000000..258cd10
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-proximity-bm25+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-rank-proximity-bm25+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-rank-proximity-bm25+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-wordcount+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-wordcount+.html
new file mode 100644
index 0000000..49588e4
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-rank-wordcount+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-rank-wordcount+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-rank-wordcount+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-asc+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-asc+.html
new file mode 100644
index 0000000..22d4bbc
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-asc+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-attr-asc+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-attr-asc+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-desc+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-desc+.html
new file mode 100644
index 0000000..5fd9d73
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-attr-desc+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-attr-desc+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-attr-desc+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-expr+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-expr+.html
new file mode 100644
index 0000000..19f5110
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-expr+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-expr+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-expr+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-extended+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-extended+.html
new file mode 100644
index 0000000..9db3901
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-extended+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-extended+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-extended+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-relevance+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-relevance+.html
new file mode 100644
index 0000000..6a61026
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-relevance+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-relevance+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-relevance+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-time-segments+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-time-segments+.html
new file mode 100644
index 0000000..cf72a95
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+sph-sort-time-segments+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +sph-sort-time-segments+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +sph-sort-time-segments+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-excerpt+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-excerpt+.html
new file mode 100644
index 0000000..192b65f
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-excerpt+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +ver-command-excerpt+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +ver-command-excerpt+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-keywords+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-keywords+.html
new file mode 100644
index 0000000..feeb904
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-keywords+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +ver-command-keywords+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +ver-command-keywords+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-search+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-search+.html
new file mode 100644
index 0000000..f1bcc2b
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-search+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +ver-command-search+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +ver-command-search+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-update+.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-update+.html
new file mode 100644
index 0000000..98eaacc
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__+ver-command-update+.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable +ver-command-update+</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ +ver-command-update+</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable___debug_.html b/doc/pages/com.oppermannen.sphinx-search-api__variable___debug_.html
new file mode 100644
index 0000000..db15c3f
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable___debug_.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable *debug*</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ *debug*</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable___response-length_.html b/doc/pages/com.oppermannen.sphinx-search-api__variable___response-length_.html
new file mode 100644
index 0000000..bb3acb9
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable___response-length_.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable *response-length*</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ *response-length*</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
diff --git a/doc/pages/com.oppermannen.sphinx-search-api__variable__sph.html b/doc/pages/com.oppermannen.sphinx-search-api__variable__sph.html
new file mode 100644
index 0000000..b270902
--- /dev/null
+++ b/doc/pages/com.oppermannen.sphinx-search-api__variable__sph.html
@@ -0,0 +1,10 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
+<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"><title>Variable sph</title><link rel="stylesheet" type="text/css" href="../index.css"><meta http-equiv="Content-Type" content="text/html; charset=UTF-8"></head><body><div id="header"><table cellspacing="0" cellpadding="0" width="100%"><tr><td valign="center">
+ Â Â
+ <b> Common Lisp Sphinx Search API</b></td><td valign="center" align="right"><b>API documentation</b></td></tr></table></div><div class="main"><div class="padded"><p class="noindent">
+ Package:
+ <a href="com.oppermannen.sphinx-search-api.html">com.oppermannen.sphinx-search-api</a></p><h2 class="page-title">
+ Variable
+ sph</h2><p style="color: red; font-weight: bold">
+ No documentation string. Possibly unimplemented or incomplete.
+ </p></div></div></body></html>
\ No newline at end of file
|
thijs/cl-sphinx-search
|
47d001ad9f5d4647730bb31e4c7a8b1786366f9b
|
Added documentation to exported functions and package def (atdoc style); some small fixes
|
diff --git a/package.lisp b/package.lisp
index 00c684b..6c88986 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,9 +1,70 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api
(:nicknames "sphinx-search-api")
(:use :cl :iolib.sockets :babel :cl-pack)
- (:export #:bla))
+ (:export #:set-server
+ #:set-limits
+ #:query
+ #:add-query
+ #:run-queries
+ #:get-last-error
+ #:get-last-warning)
+ (:documentation
+ "This package provides an interface to the search daemon (@em{searchd})
+ for @a[http://www.sphinxsearch.com/]{Sphinx}.
+
+ @begin[About Sphinx]{section}
+
+ From the site:
+
+ @begin{pre}
+ Sphinx is a full-text search engine, distributed under GPL version 2.
+ Commercial license is also available for embedded use.
+
+ Generally, it's a standalone search engine, meant to provide fast,
+ size-efficient and relevant fulltext search functions to other applications.
+ Sphinx was specially designed to integrate well with SQL databases and
+ scripting languages. Currently built-in data sources support fetching data
+ either via direct connection to MySQL or PostgreSQL, or using XML pipe
+ mechanism (a pipe to indexer in special XML-based format which Sphinx
+ recognizes).
+
+ As for the name, Sphinx is an acronym which is officially decoded as
+ SQL Phrase Index. Yes, I know about CMU's Sphinx project.
+ @end{pre}
+ @end{section}
+
+ @begin[Synopsis]{section}
+ @begin{pre}
+
+ (let ((sph (make-instance 'sphinx-client)))
+ (add-query sph \"test\")
+ (run-queries sph))
+
+ @end{pre}
+ @end{section}
+
+ @begin[One class]{section}
+ There is just one class:
+
+ @aboutclass{sphinx-client}
+ @end{section}
+
+ @begin[Methods]{section}
+ Setting options/parameters:
+
+ @aboutfun{set-server}
+ @aboutfun{set-limits}
+
+ Running queries:
+
+ @aboutfun{query}
+ @aboutfun{add-query}
+ @aboutfun{run-queries}
+
+ @end{section}
+"))
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 14ea4ba..ee63eaa 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,622 +1,769 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(status
:accessor status
:initarg :status
:initform ()
:documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
- :documentation "requests array for multi-query")))
+ :documentation "list of requests for batched query runs"))
+ (:documentation
+ "@short{The sphinx-search class.}
+
+ The interface to the search daemon goes through this class.
+
+ Set options and settings of the search to be performed on an object
+ of this class, and then have it perform one search by calling
+ @fun{query}, or add a number of queries using @fun{add-query} and
+ then calling @fun{run-queries}.
+
+ Either get a result hash or a list of result hashes back, or an error
+ that can be retrieved with the @fun{get-last-error} function.
+
+ @see{set-server}
+ @see{set-limits}
+ @see{get-last-warning}
+"))
(defvar *response-length* ())
(defmacro adv-p (n)
`(setf p (+ p ,n)))
-(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
- "Method 'set-server'
+(defgeneric set-server (client &key host port path)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[host]{the host to connect to when using an INET socket}
+ @arg[port]{the port to connect to when using an INET socket}
+ @arg[path]{the path to the unix domain socket when not using INET}
+ @return{client}
+ @short{Set the server host:port or path to connect to.}
- (set-server sph :host host :port port)
- (set-server sph :path unix-path)
+ @begin{pre}
+ (set-server client :host host :port port)
+ (set-server client :path unix-path)
+ @end{pre}
-In the first form, sets the host (string) and port (integer) details for the
-searchd server using a network (INET) socket.
+ In the first form, sets the @code{host} (string) and @code{port} (integer)
+ details for the searchd server using a network (INET) socket.
-In the second form, where :path is a local filesystem path (optionally prefixed
-by 'unix://'), sets the client to access the searchd server via a local (UNIX
-domain) socket at the specified path.
+ In the second form, where @code{unix-path} is a local filesystem path
+ (optionally prefixed by 'unix://'), sets the client to access the
+ searchd server via a local (UNIX domain) socket at the specified path.
+"))
-Returns sph.
-"
+(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
(cond (path
(assert (stringp path))
(when (string= path "unix://" :start1 0 :end1 7)
(setf path (subseq path 6)))
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
(setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
(assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ())))
client)
-(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
- "Method 'set-limits'
+(defgeneric set-limits (client &key offset limit max cutoff)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[offset]{the offset to start returning matches from}
+ @arg[limit]{how many matches to return starting from @code{offset}}
+ @arg[max]{maximum number of matches to return}
+ @arg[cutoff]{the cutoff to stop searching at}
+ @return{client}
+ @short{Set the offset, limit, cutoff and max matches to return.}
- (set-limits sph :limit limit)
- (set-limits sph :offset offset :limit limit)
- (set-limits sph :offset offset :limit limit :max max-matches)
+ @begin{pre}
+ (set-limits client :limit limit)
+ (set-limits client :offset offset :limit limit)
+ (set-limits client :offset offset :limit limit :max max-matches)
+ @end{pre}
-Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
+ Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
+"))
-Returns sph.
-"
+(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff))
client)
+(defgeneric get-last-error (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last error message returned from the @code{searchd}}
+
+ Get the last error message sent by searchd
+"))
+
(defmethod get-last-error ((client sphinx-client))
- "Get the last error message sent by searchd"
(last-error client))
+(defgeneric get-last-warning (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{a string; the last warning message returned from the @code{searchd}}
+
+ Get the last warning message sent by searchd
+"))
+
(defmethod get-last-warning ((client sphinx-client))
- "Get the last warning message sent by searchd"
(last-warning client))
+(defgeneric query (client query &key index comment)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[query]{the query to run through @code{searchd}}
+ @arg[index]{the index to use; defaults to \"*\"}
+ @arg[comment]{a comment describing this query; default none}
+ @return{nil or a hash containing the query results}
+ @short{Run a query through @code{searchd}.}
+
+ @begin{pre}
+ (query client \"test\")
+ @end{pre}
+
+ Query @code{searchd}. This method runs a single query through @code{searchd}.
+
+ It returns the results in a hash with the following keys:
+ @begin{dl}
+ @dt[attributes]{a hash-table containing attributes}
+ @dt[fields]{a list of fields}
+ @dt[matches]{a hash-table containing the matches}
+ @dt[status]{the status returned by @code{searchd}}
+ @dt[status-message]{the status message returned by @code{searchd}}
+ @dt[time]{the time @code{searchd} took for the query}
+ @dt[total]{the total matches returned}
+ @dt[total-found]{the total number of matches found}
+ @dt[words]{a hash-table containing the matching words with their statistics}
+ @end{dl}
+
+ @see{add-query}
+ @see{run-queries}
+
+"))
+
(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
(assert (eql (length (reqs client)) 0))
(add-query client query :index index :comment comment)
(let* ((result (car (run-queries client))))
(when result
(setf (last-error client) (gethash 'status-message result))
(setf (last-warning client) (gethash 'status-message result))
(let ((status (gethash 'status result)))
(setf (status client) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
result)))))
+(defgeneric run-queries (client)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @return{nil or a list of hashes}
+ @short{Run the queries added with @code{add-query} through @code{searchd}.}
+
+ @begin{pre}
+ (add-query client \"test\")
+ (add-query client \"word\")
+ (run-queries client)
+ @end{pre}
+
+ Query @code{searchd} with the collected queries added with @code{add-query}.
+
+ It returns a list of hashes containing the result of each query. Each hash
+ has the following keys:
+ @begin{dl}
+ @dt[attributes]{a hash-table containing attributes}
+ @dt[fields]{a list of fields}
+ @dt[matches]{a hash-table containing the matches}
+ @dt[status]{the status returned by @code{searchd}}
+ @dt[status-message]{the status message returned by @code{searchd}}
+ @dt[time]{the time @code{searchd} took for the query}
+ @dt[total]{the total matches returned}
+ @dt[total-found]{the total number of matches found}
+ @dt[words]{a hash-table containing the matching words with their statistics}
+ @end{dl}
+
+ @see{query}
+ @see{add-query}
+
+"))
+
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(let ((fp (%connect client)))
(when fp
(%send client :fp fp :data data)
(let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client))))))))))
+(defgeneric add-query (client query &key index comment)
+ (:documentation
+ "@arg[client]{a @class{sphinx-client}}
+ @arg[query]{the query to run through @code{searchd}}
+ @arg[index]{the index to use; defaults to \"*\"}
+ @arg[comment]{a comment describing this query; default none}
+ @return{length of query queue}
+ @short{Add a query to a batch request.}
+
+ @begin{pre}
+ (add-query client \"test\")
+ (add-query client \"word\" :index \"*\")
+ (run-queries client)
+ @end{pre}
+
+ Add a query to the queue of batched queries.
+
+ Batch queries enable @code{searchd} to perform internal optimizations,
+ if possible; and reduce network connection overhead in all cases.
+
+ For instance, running exactly the same query with different
+ group-by settings will enable @code{searchd} to perform expensive
+ full-text search and ranking operation only once, but compute
+ multiple group-by results from its output.
+
+ It returns the new length of the query queue, which is also the index
+ of the newly added query in the queue.
+
+ @see{query}
+ @see{run-queries}
+
+"))
+
(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
#+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (%read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
- ;;(finish-output (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun %read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (%read-from fp 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (%read-from fp left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) &key fp data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" fp)
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to fp (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
+ (declare (ignore k))
(concatenate 'string
- (pack "N/a*" (get-hash 'attr entry))
- (pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
+ (pack "N/a*" (gethash 'attr entry))
+ (pack "NN" (gethash 'type entry) (hash-table-count (gethash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
- (cond ((eql (get-hash 'type entry) +sph-attr-float+)
+ (cond ((eql (gethash 'type entry) +sph-attr-float+)
(%pack-float v))
- ((eql (get-hash 'type entry) +sph-attr-bigint+)
+ ((eql (gethash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
- (get-hash 'values entry))))
+ (gethash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
- (%pack-array-signed-quads (get-hash 'values filter)))
+ (%pack-array-signed-quads (gethash 'values filter)))
((eql type +sph-filter-range+)
- (concatenate 'string (pack "q>" (get-hash 'min filter))
- (pack "q>" (get-hash 'max filter))))
+ (concatenate 'string (pack "q>" (gethash 'min filter))
+ (pack "q>" (gethash 'max filter))))
((eql type +sph-filter-floatrange+)
- (concatenate 'string (%pack-float (get-hash 'min filter))
- (%pack-float (get-hash 'max filter))))
+ (concatenate 'string (%pack-float (gethash 'min filter))
+ (%pack-float (gethash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
- (pack "N" (get-hash 'exclude filter)))))))
+ (pack "N" (gethash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
f87d50f1458e0c828c1ffb2406cf87c7fc8db8cd
|
Move stuff around and adding some docs
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 0bbf3fd..14ea4ba 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,567 +1,622 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
+ (status
+ :accessor status
+ :initarg :status
+ :initform ()
+ :documentation "status of last query")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
-(defmethod set-server ((client sphinx-client) &key host port)
- #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
- (assert (stringp host))
- (cond ((string= host "/" :start1 0 :end1 1)
- (setf (%path client) host)
- (setf (%host client) ())
- (setf (%port client) ()))
- ((string= host "unix://" :start1 0 :end1 7)
- (setf (%path client) (subseq host 6 (length host)))
+(defvar *response-length* ())
+
+
+(defmacro adv-p (n)
+ `(setf p (+ p ,n)))
+
+
+(defmethod set-server ((client sphinx-client) &key (host "localhost") (port 3312) path)
+ "Method 'set-server'
+
+ (set-server sph :host host :port port)
+ (set-server sph :path unix-path)
+
+In the first form, sets the host (string) and port (integer) details for the
+searchd server using a network (INET) socket.
+
+In the second form, where :path is a local filesystem path (optionally prefixed
+by 'unix://'), sets the client to access the searchd server via a local (UNIX
+domain) socket at the specified path.
+
+Returns sph.
+"
+ (cond (path
+ (assert (stringp path))
+ (when (string= path "unix://" :start1 0 :end1 7)
+ (setf path (subseq path 6)))
+ #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s~%" path)
+ (setf (%path client) path)
(setf (%host client) ())
(setf (%port client) ()))
(t
- #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
+ #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s~%" host port)
+ (assert (stringp host))
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
- (setf (%path client) ()))))
+ (setf (%path client) ())))
+ client)
+
+
+(defmethod set-limits ((client sphinx-client) &key (offset 0) limit (max 1000) cutoff)
+ "Method 'set-limits'
+
+ (set-limits sph :limit limit)
+ (set-limits sph :offset offset :limit limit)
+ (set-limits sph :offset offset :limit limit :max max-matches)
+
+Set limit of matches to return. Defaults to offset 0 and 1000 max matches.
+
+Returns sph.
+"
+ (assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
+ (assert (and (numberp max) (>= max 0)))
+ (setf (offset client) offset)
+ (setf (limit client) limit)
+ (when (> max 0)
+ (setf (max-matches client) max))
+ (when (and cutoff (>= cutoff 0))
+ (setf (cutoff client) cutoff))
+ client)
+
+
+(defmethod get-last-error ((client sphinx-client))
+ "Get the last error message sent by searchd"
+ (last-error client))
+
+
+(defmethod get-last-warning ((client sphinx-client))
+ "Get the last warning message sent by searchd"
+ (last-warning client))
+
+
+(defmethod query ((client sphinx-client) query &key (index "*") (comment ""))
+ (assert (eql (length (reqs client)) 0))
+ (add-query client query :index index :comment comment)
+ (let* ((result (car (run-queries client))))
+ (when result
+ (setf (last-error client) (gethash 'status-message result))
+ (setf (last-warning client) (gethash 'status-message result))
+ (let ((status (gethash 'status result)))
+ (setf (status client) status)
+ (when (or (eql status +searchd-ok+)
+ (eql status +searchd-warning+))
+ result)))))
+
+
+(defmethod run-queries ((client sphinx-client))
+ (assert (> (length (reqs client)) 0))
+ (let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
+ #+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
+ (let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
+ (setf (reqs client) ())
+ (let ((fp (%connect client)))
+ (when fp
+ (%send client :fp fp :data data)
+ (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
+ #+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
+ (when response
+ (setf *response-length* (length response))
+ (%parse-response response (length (reqs client))))))))))
+
+
+(defmethod add-query ((client sphinx-client) query &key (index "*") (comment ""))
+ (let ((req (concatenate 'string
+ (pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
+ (pack "N/a*" (sort-by client))
+ (pack "N/a*" query)
+ (pack "N*" (length (weights client)) (weights client))
+ (pack "N/a*" index)
+ (pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
+ (pack "N" (length (filters client)))
+ (%pack-filters (filters client))
+ (pack "NN/a*" (group-function client) (group-by client))
+ (pack "N" (max-matches client))
+ (pack "N/a*" (group-sort client))
+ (pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
+ (pack "N/a*" (group-distinct client))
+ (cond ((anchor client)
+ (concatenate 'string
+ (pack "N/a*" (first (anchor client)))
+ (pack "N/a*" (third (anchor client)))
+ (%pack-float (second (anchor client)))
+ (%pack-float (last (anchor client)))))
+ (t
+ (pack "N" 0)))
+ (%pack-hash (index-weights client))
+ (pack "N" (max-query-time client))
+ (%pack-hash (field-weights client))
+ (pack "N/a*" comment)
+ (pack "N" (hash-table-count (overrides client)))
+ (%pack-overrides (overrides client))
+ (pack "N/a*" (if (select client)
+ (select client)
+ "")))))
+ #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
+ (setf (reqs client) (append (reqs client) (list req))))
+ (length (reqs client)))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
- (let ((v (unpack "N*" (read-from (%socket client) 4))))
+ (let ((v (unpack "N*" (%read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
;;(finish-output (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
-(defun read-from (socket size)
+
+(defun %read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
+
(defmethod %get-response ((client sphinx-client) &key fp client-version)
- (multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
+ (multiple-value-bind (status version len) (unpack "n2N" (%read-from fp 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
- (let ((chunk (read-from fp left)))
+ (let ((chunk (%read-from fp left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
-(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
- (assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
- (assert (and (numberp max) (>= max 0)))
- (setf (offset client) offset)
- (setf (limit client) limit)
- (when (> max 0)
- (setf (max-matches client) max))
- (when (and cutoff (>= cutoff 0))
- (setf (cutoff client) cutoff)))
-
-
-(defvar *response-length* ())
-
-(defmethod run-queries ((client sphinx-client))
- (assert (> (length (reqs client)) 0))
- (let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
- #+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
- (let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
- (setf (reqs client) ())
- (let ((fp (%connect client)))
- (when fp
- (%send client :fp fp :data data)
- (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
- #+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
- (when response
- (setf *response-length* (length response))
- (%parse-response response (length (reqs client))))))))))
-
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
-(defmacro adv-p (n)
- `(setf p (+ p ,n)))
-
-
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) &key fp data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" fp)
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
- (sockets:send-to fp (string-to-octets data :encoding :latin-1))
- ;;(finish-output fp)
- )
-
-
-(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
- (let ((req (concatenate 'string
- (pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
- (pack "N/a*" (sort-by client))
- (pack "N/a*" query)
- (pack "N*" (length (weights client)) (weights client))
- (pack "N/a*" index)
- (pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
- (pack "N" (length (filters client)))
- (%pack-filters (filters client))
- (pack "NN/a*" (group-function client) (group-by client))
- (pack "N" (max-matches client))
- (pack "N/a*" (group-sort client))
- (pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
- (pack "N/a*" (group-distinct client))
- (cond ((anchor client)
- (concatenate 'string
- (pack "N/a*" (first (anchor client)))
- (pack "N/a*" (third (anchor client)))
- (%pack-float (second (anchor client)))
- (%pack-float (last (anchor client)))))
- (t
- (pack "N" 0)))
- (%pack-hash (index-weights client))
- (pack "N" (max-query-time client))
- (%pack-hash (field-weights client))
- (pack "N/a*" comment)
- (pack "N" (hash-table-count (overrides client)))
- (%pack-overrides (overrides client))
- (pack "N/a*" (if (select client)
- (select client)
- "")))))
- #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
- (setf (reqs client) (append (reqs client) (list req))))
- (length (reqs client)))
+ (sockets:send-to fp (string-to-octets data :encoding :latin-1)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(concatenate 'string
(pack "N/a*" (get-hash 'attr entry))
(pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (get-hash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (get-hash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(get-hash 'values entry))))
overrides)))
+
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (get-hash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (get-hash 'min filter))
(pack "q>" (get-hash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (get-hash 'min filter))
(%pack-float (get-hash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (get-hash 'exclude filter)))))))
filters))
-
-
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
+
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
-
|
thijs/cl-sphinx-search
|
aa908351bf092bd0a51ac6e1262e104e6ae3d654
|
indentation
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index fc3f0ff..0bbf3fd 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,567 +1,567 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (%path client) host)
(setf (%host client) ())
(setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (%path client) (subseq host 6 (length host)))
(setf (%host client) ())
(setf (%port client) ()))
(t
#+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
- :local-filename (namestring (%path client)))))
+ :local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
;;(finish-output (%socket client))
#+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
#+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
#+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
#+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
#+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (read-from fp left)))
#+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
#+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
#+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
(subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
(defvar *response-length* ())
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
#+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(let ((fp (%connect client)))
(when fp
(%send client :fp fp :data data)
(let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
#+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
(setf *response-length* (length response))
(%parse-response response (length (reqs client))))))))))
(defun %parse-response (response n-requests)
(let ((p 0)
(results ()))
(loop for i from 0 to n-requests
do
(multiple-value-bind (status new-p message) (%get-response-status response p)
(let ((result (make-hash-table)))
(setf p new-p)
(setf (gethash 'status-message result) message)
(setf (gethash 'status result) status)
(when (or (eql status +searchd-ok+)
(eql status +searchd-warning+))
(let ((attribute-names ()))
(multiple-value-bind (fields new-p) (%get-fields response p)
(setf p new-p)
(setf (gethash 'fields result) fields))
#+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
(setf p new-p)
(setf (gethash 'attributes result) attributes)
(setf attribute-names attr-names))
#+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
(setf p new-p)
(setf (gethash 'matches result) matches))
#+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
(multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
(adv-p 16)
#+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
(setf (gethash 'total result) total)
(setf (gethash 'total-found result) total-found)
(let ((time-str (with-output-to-string (s)
(format s "~,8f" (/ time 1000)))))
(setf (gethash 'time result) time-str))
(let ((words (make-hash-table :test 'equal)))
(dotimes (n word-count)
(let* ((len (unpack "N*" (subseq response p (+ p 4))))
(word (subseq response (+ p 4) (+ p 4 len)))
(docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
(hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
(word-info (make-hash-table)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
#+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
#+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
#+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
#+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
(adv-p (+ len 12))
(setf (gethash 'docs word-info) docs)
(setf (gethash 'hits word-info) hits)
(setf (gethash word words) word-info)
(when (> p *response-length*)
(return))))
(setf (gethash 'words result) words)))))
(push result results))))
results))
(defun %get-matches (response attribute-names attributes start)
(let ((count (unpack "N*" (subseq response start (+ start 4))))
(id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
(p (+ start 8))
(matches ()))
#+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
#+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
(dotimes (i count)
(let ((data (make-hash-table :test 'equal)))
(cond ((not (eql id-64 0))
(setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
(adv-p 8)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4))
(t
(setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)
(setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
(adv-p 4)))
#+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
(dolist (attr attribute-names)
(cond ((eql (gethash attr attributes) +sph-attr-bigint+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
(setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
(adv-p 8))
((eql (gethash attr attributes) +sph-attr-float+)
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
(let* ((uval (unpack "N*" (subseq response p (+ p 4))))
(tmp (pack "L" uval))
(floats (multiple-value-list (unpack "f*" tmp))))
(adv-p 4)
(setf (gethash attr data) floats)))
(t
(let ((val (unpack "N*" (subseq response p (+ p 4)))))
(adv-p 4)
#+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
(cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
(let ((vals ()))
(dotimes (i val)
(push (unpack "N*" (subseq response p (+ p 4))) vals)
(adv-p 4)
(when (> p *response-length*)
(return)))
#+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
(setf (gethash attr data) (nreverse vals))))
(t
#+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
(setf (gethash attr data) val)))))))
(push data matches)))
#+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
(values (nreverse matches) p)))
(defun %get-attributes (response start)
(let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
(p (+ start 4))
(attribute-names ())
(attributes (make-hash-table :test 'equal)))
#+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
(dotimes (i nattrs)
(let ((len (unpack "N*" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
(adv-p 4)
(let ((attr-name (subseq response p (+ p len))))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
#+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
(adv-p len)
(setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
#+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
(adv-p 4)
(push attr-name attribute-names)
(when (> p *response-length*)
(return)))))
#+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
(values attributes (nreverse attribute-names) p)))
(defun %get-fields (response start)
(let ((nfields (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4))
(fields ()))
#+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
#+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
#+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
(dotimes (i nfields)
(let ((len (unpack "N" (subseq response p (+ p 4)))))
#+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
(adv-p 4)
(push (subseq response p (+ p len)) fields)
(adv-p len)
(when (> p *response-length*)
(return))))
#+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
(values (nreverse fields) p)))
(defmacro adv-p (n)
`(setf p (+ p ,n)))
(defun %get-response-status (response start)
(let ((status (unpack "N" (subseq response start (+ start 4))))
(p (+ start 4)))
(cond ((not (eql status +searchd-ok+))
(let ((len (unpack "N" (subseq response p (+ p 4)))))
(setf p (+ p 4))
(let ((message (subseq response p (+ p len))))
(values status (+ p len) message))))
(t
(values status p "ok")))))
(defmethod %send ((client sphinx-client) &key fp data)
#+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" fp)
#+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
#+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to fp (string-to-octets data :encoding :latin-1))
;;(finish-output fp)
-)
+ )
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
- (let ((req (concatenate 'string
+ (let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
- #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
- (setf (reqs client) (append (reqs client) (list req))))
- (length (reqs client)))
+ #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
+ (setf (reqs client) (append (reqs client) (list req))))
+ (length (reqs client)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(concatenate 'string
(pack "N/a*" (get-hash 'attr entry))
(pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (get-hash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (get-hash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(get-hash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (get-hash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (get-hash 'min filter))
(pack "q>" (get-hash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (get-hash 'min filter))
(%pack-float (get-hash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (get-hash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map 'string #'(lambda (value)
- (pack "q>" value)) values-list)))
+ (pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
d23896bdda240820dc47d14ac5576e25607478b6
|
Working run-queries; getting same results as from perlapi
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 93b7df7..fc3f0ff 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,413 +1,567 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
-
(defmethod set-server ((client sphinx-client) &key host port)
- (format t "set-server -> ~s : ~s" host port)
+ #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (%path client) host)
(setf (%host client) ())
(setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (%path client) (subseq host 6 (length host)))
(setf (%host client) ())
(setf (%port client) ()))
(t
- (format t "set-server -> ~s : ~s" host port)
+ #+SPHINX-SEARCH-DEBUG (format t "set-server -> ~s : ~s" host port)
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
;;(finish-output (%socket client))
- (format t "recieved version number: ~a~%" v)
+ #+SPHINX-SEARCH-DEBUG (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
- (format t "recieved bytes: ~a~%" rec)
+ #+SPHINX-SEARCH-DEBUG (format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
- (format t "octets-to-string gives: ~a~%" res)
+ #+SPHINX-SEARCH-DEBUG (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
- (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
+ #+SPHINX-SEARCH-DEBUG (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
- (format t "left: ~a~%" left)
+ #+SPHINX-SEARCH-DEBUG (format t "left: ~a~%" left)
(let ((chunk (read-from fp left)))
- (format t "chunk: ~a~%" chunk)
- (format t "chunk length: ~a~%" (length chunk))
+ #+SPHINX-SEARCH-DEBUG (format t "chunk: ~a~%" chunk)
+ #+SPHINX-SEARCH-DEBUG (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
- (setf response (concatenate 'vector response chunk))
+ (setf response (concatenate 'string response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
- (format t "got response of length: ~a~%raw response: ~a~%" done response)
+ #+SPHINX-SEARCH-DEBUG (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
- (setf (last-warning client) (subseq response 4 warn-length))
- (subseq response warn-length)))
+ (setf (last-warning client) (subseq response 4 (+ 4 warn-length)))
+ (subseq response (+ 4 warn-length))))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
+(defvar *response-length* ())
+
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
- (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
+ #+SPHINX-SEARCH-DEBUG (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(let ((fp (%connect client)))
(when fp
(%send client :fp fp :data data)
(let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
- (format t "run-queries response: ~a~%" response)
+ #+SPHINX-SEARCH-DEBUG (format t "run-queries response: ~a~%" response)
(when response
- (%parse-response response))))))))
+ (setf *response-length* (length response))
+ (%parse-response response (length (reqs client))))))))))
-(defmethod %parse-response ((client sphinx-client) response n-requests)
- (let ((results ())
- (p 0))
+
+(defun %parse-response (response n-requests)
+ (let ((p 0)
+ (results ()))
(loop for i from 0 to n-requests
- (do
- (let ((status (unpack "N" (subseq response p 4))))
- (setf p (+ p 4))
- (cond ((not (eql status +searchd-ok+))
- (let ((len (unpack "N" (subseq response p 4))))
- (setf p (+ p 4))
- (let ((message (subseq response p len)))
- (setf p (+ p len))
- (cond ((eql status +searchd-warning+)
- (setf (gethash 'warning result) message))
- (t
- (setf (gethash 'error result) message)
- (
- )
+ do
+ (multiple-value-bind (status new-p message) (%get-response-status response p)
+ (let ((result (make-hash-table)))
+ (setf p new-p)
+ (setf (gethash 'status-message result) message)
+ (setf (gethash 'status result) status)
+ (when (or (eql status +searchd-ok+)
+ (eql status +searchd-warning+))
+ (let ((attribute-names ()))
+ (multiple-value-bind (fields new-p) (%get-fields response p)
+ (setf p new-p)
+ (setf (gethash 'fields result) fields))
+ #+SPHINX-SEARCH-DEBUG (format t "after get-fields:~% p: ~a~% rest: ~a~%" p (subseq response p))
+ (multiple-value-bind (attributes attr-names new-p) (%get-attributes response p)
+ (setf p new-p)
+ (setf (gethash 'attributes result) attributes)
+ (setf attribute-names attr-names))
+ #+SPHINX-SEARCH-DEBUG (format t "after get-attributes:~% p: ~a~% rest: ~a~%" p (subseq response p))
+ (multiple-value-bind (matches new-p) (%get-matches response attribute-names (gethash 'attributes result) p)
+ (setf p new-p)
+ (setf (gethash 'matches result) matches))
+ #+SPHINX-SEARCH-DEBUG (format t "after get-matches:~% p: ~a~% rest: ~a~%" p (subseq response p))
+ (multiple-value-bind (total total-found time word-count) (unpack "N*N*N*N*" (subseq response p (+ p 16)))
+ (adv-p 16)
+ #+SPHINX-SEARCH-DEBUG (format t "total: ~a~%total-found: ~a~%time: ~a~%word-count: ~a~%" total total-found time word-count)
+ (setf (gethash 'total result) total)
+ (setf (gethash 'total-found result) total-found)
+ (let ((time-str (with-output-to-string (s)
+ (format s "~,8f" (/ time 1000)))))
+ (setf (gethash 'time result) time-str))
+ (let ((words (make-hash-table :test 'equal)))
+ (dotimes (n word-count)
+ (let* ((len (unpack "N*" (subseq response p (+ p 4))))
+ (word (subseq response (+ p 4) (+ p 4 len)))
+ (docs (unpack "N*" (subseq response (+ p 4 len) (+ p 4 len 4))))
+ (hits (unpack "N*" (subseq response (+ p 8 len) (+ p 8 len 4))))
+ (word-info (make-hash-table)))
+ #+SPHINX-SEARCH-DEBUG (format t "len: ~a~%p: ~a~%" *response-length* p)
+ #+SPHINX-SEARCH-DEBUG (format t "rest: '~a'~%" (subseq response p))
+ #+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response p (+ p 4)))
+ #+SPHINX-SEARCH-DEBUG (format t "len: ~a~%" len)
+ #+SPHINX-SEARCH-DEBUG (format t "subseq: ~a~%" (subseq response (+ p 4) (+ p 4 len)))
+ #+SPHINX-SEARCH-DEBUG (format t "word: ~a~%docs: ~a~%hits: ~a~%" word docs hits)
+ (adv-p (+ len 12))
+ (setf (gethash 'docs word-info) docs)
+ (setf (gethash 'hits word-info) hits)
+ (setf (gethash word words) word-info)
+ (when (> p *response-length*)
+ (return))))
+ (setf (gethash 'words result) words)))))
+ (push result results))))
+ results))
+
+
+(defun %get-matches (response attribute-names attributes start)
+ (let ((count (unpack "N*" (subseq response start (+ start 4))))
+ (id-64 (unpack "N*" (subseq response (+ start 4) (+ start 4 4))))
+ (p (+ start 8))
+ (matches ()))
+ #+SPHINX-SEARCH-DEBUG (format t "get-matches:~% start: ~a~% rest: ~a~%" start (subseq response start))
+ #+SPHINX-SEARCH-DEBUG (format t " count: ~a~% id-64: ~a~%" count id-64)
+ (dotimes (i count)
+ (let ((data (make-hash-table :test 'equal)))
+ (cond ((not (eql id-64 0))
+ (setf (gethash "doc" data) (unpack "Q>" (subseq response p (+ p 8))))
+ (adv-p 8)
+ (setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
+ (adv-p 4))
+ (t
+ (setf (gethash "doc" data) (unpack "N*" (subseq response p (+ p 4))))
+ (adv-p 4)
+ (setf (gethash "weight" data) (unpack "N*" (subseq response p (+ p 4))))
+ (adv-p 4)))
+ #+SPHINX-SEARCH-DEBUG (format t " -> doc: ~a~% -> weight: ~a~%" (gethash "doc" data) (gethash "weight" data))
+ (dolist (attr attribute-names)
+ (cond ((eql (gethash attr attributes) +sph-attr-bigint+)
+ #+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is bigint~%" attr)
+ (setf (gethash attr data) (unpack "q>" (subseq response p (+ p 8))))
+ (adv-p 8))
+ ((eql (gethash attr attributes) +sph-attr-float+)
+ #+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is float~%" attr)
+ (let* ((uval (unpack "N*" (subseq response p (+ p 4))))
+ (tmp (pack "L" uval))
+ (floats (multiple-value-list (unpack "f*" tmp))))
+ (adv-p 4)
+ (setf (gethash attr data) floats)))
+ (t
+ (let ((val (unpack "N*" (subseq response p (+ p 4)))))
+ (adv-p 4)
+ #+SPHINX-SEARCH-DEBUG (format t " -> attr '~a': val: ~a~%" attr val)
+ (cond ((not (eql (logand +sph-attr-multi+ (gethash attr attributes)) 0))
+ #+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is multival~%" attr)
+ (let ((vals ()))
+ (dotimes (i val)
+ (push (unpack "N*" (subseq response p (+ p 4))) vals)
+ (adv-p 4)
+ (when (> p *response-length*)
+ (return)))
+ #+SPHINX-SEARCH-DEBUG (format t " -> vals: ~a~%" vals)
+ (setf (gethash attr data) (nreverse vals))))
+ (t
+ #+SPHINX-SEARCH-DEBUG (format t " -> attribute '~a' is other: val = ~a~%" attr val)
+ (setf (gethash attr data) val)))))))
+ (push data matches)))
+ #+SPHINX-SEARCH-DEBUG (format t " -> matches: ~a~%" matches)
+ (values (nreverse matches) p)))
+
+
+(defun %get-attributes (response start)
+ (let ((nattrs (unpack "N*" (subseq response start (+ start 4))))
+ (p (+ start 4))
+ (attribute-names ())
+ (attributes (make-hash-table :test 'equal)))
+ #+SPHINX-SEARCH-DEBUG (format t "get-attributes:~% nattrs: ~a~%" nattrs)
+ (dotimes (i nattrs)
+ (let ((len (unpack "N*" (subseq response p (+ p 4)))))
+ #+SPHINX-SEARCH-DEBUG (format t " attr: ~a~% -> len: ~a~%" i len)
+ (adv-p 4)
+ (let ((attr-name (subseq response p (+ p len))))
+ #+SPHINX-SEARCH-DEBUG (format t " -> attr-name subseq: ~a~%" (subseq response p (+ p len)))
+ #+SPHINX-SEARCH-DEBUG (format t " -> attr-name: ~a~%" attr-name)
+ (adv-p len)
+ (setf (gethash attr-name attributes) (unpack "N*" (subseq response p (+ p 4))))
+ #+SPHINX-SEARCH-DEBUG (format t " -> attributes{~a}: ~a~%" attr-name (gethash attr-name attributes))
+ (adv-p 4)
+ (push attr-name attribute-names)
+ (when (> p *response-length*)
+ (return)))))
+ #+SPHINX-SEARCH-DEBUG (format t " attribute-names: ~a~%" attribute-names)
+ (values attributes (nreverse attribute-names) p)))
+
+
+(defun %get-fields (response start)
+ (let ((nfields (unpack "N" (subseq response start (+ start 4))))
+ (p (+ start 4))
+ (fields ()))
+ #+SPHINX-SEARCH-DEBUG (format t "get-fields:~%")
+ #+SPHINX-SEARCH-DEBUG (format t " subseq starting at ~a: '~a'~%" start (subseq response start (+ start 4)))
+ #+SPHINX-SEARCH-DEBUG (format t " start: ~a~% nfields: ~a~% p: ~a~%" start nfields p)
+ (dotimes (i nfields)
+ (let ((len (unpack "N" (subseq response p (+ p 4)))))
+ #+SPHINX-SEARCH-DEBUG (format t "i: ~a~% len: ~a~%" i len)
+ (adv-p 4)
+ (push (subseq response p (+ p len)) fields)
+ (adv-p len)
+ (when (> p *response-length*)
+ (return))))
+ #+SPHINX-SEARCH-DEBUG (format t " fields: ~a~%" fields)
+ (values (nreverse fields) p)))
+
+
+(defmacro adv-p (n)
+ `(setf p (+ p ,n)))
+
+
+(defun %get-response-status (response start)
+ (let ((status (unpack "N" (subseq response start (+ start 4))))
+ (p (+ start 4)))
+ (cond ((not (eql status +searchd-ok+))
+ (let ((len (unpack "N" (subseq response p (+ p 4)))))
+ (setf p (+ p 4))
+ (let ((message (subseq response p (+ p len))))
+ (values status (+ p len) message))))
+ (t
+ (values status p "ok")))))
(defmethod %send ((client sphinx-client) &key fp data)
- (format t "writing to socket ~a~%" fp)
- (format t "data to be sent: ~a~%" data)
- (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
+ #+SPHINX-SEARCH-DEBUG (format t "writing to socket ~a~%" fp)
+ #+SPHINX-SEARCH-DEBUG (format t "data to be sent: ~a~%" data)
+ #+SPHINX-SEARCH-DEBUG (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to fp (string-to-octets data :encoding :latin-1))
;;(finish-output fp)
)
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
- (format t "req is: ~a~%" (string-to-octets req))
+ #+SPHINX-SEARCH-DEBUG (format t "req is: ~a~%" (string-to-octets req))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(concatenate 'string
(pack "N/a*" (get-hash 'attr entry))
(pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (get-hash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (get-hash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(get-hash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (get-hash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (get-hash 'min filter))
(pack "q>" (get-hash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (get-hash 'min filter))
(%pack-float (get-hash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (get-hash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
- (map #'(lambda (value)
+ (map 'string #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/test-perlapi.pl b/test-perlapi.pl
index 2759dee..5aed2fd 100644
--- a/test-perlapi.pl
+++ b/test-perlapi.pl
@@ -1,23 +1,23 @@
#!/bin/env perl
use strict;
use warnings;
use lib ".";
use Data::Dumper;
use Sphinx::Search;
my $sphinx = Sphinx::Search->new();
warn 'sphinx: ' . Dumper $sphinx;
-$sphinx->AddQuery("first");
+$sphinx->AddQuery("test");
my $results = $sphinx->RunQueries;
warn 'results: ' . Dumper $results;
1;
|
thijs/cl-sphinx-search
|
a5eb71d07c42626cd96b6b7e1d13397d22960a7a
|
started parse-response
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index b919dcf..a3ce0b3 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,392 +1,398 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "set-server -> ~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (%path client) host)
(setf (%host client) ())
(setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (%path client) (subseq host 6 (length host)))
(setf (%host client) ())
(setf (%port client) ()))
(t
(format t "set-server -> ~s : ~s" host port)
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :latin-1))
;;(finish-output (%socket client))
(format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "recieved bytes: ~a~%" rec)
(let ((res
(octets-to-string (coerce rec '(vector (unsigned-byte 8)))
:encoding :latin-1)))
(format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
(format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (<= left 0)
(return))
(format t "left: ~a~%" left)
(let ((chunk (read-from fp left)))
(format t "chunk: ~a~%" chunk)
(format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(setf left (- left (length chunk))))
(return))))
(close fp)
(let ((done (length response)))
(format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
(let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
(format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
(let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
(setf (reqs client) ())
(let ((fp (%connect client)))
(when fp
(%send client :fp fp :data data)
(let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
- (format t "run-queries response: ~a~%" response)))))))
+ (format t "run-queries response: ~a~%" response)
+ (when response
+ (%parse-response response))))))))
+
+(defmethod %parse-response ((client sphinx-client) response n-requests)
+
+ )
(defmethod %send ((client sphinx-client) &key fp data)
(format t "writing to socket ~a~%" fp)
(format t "data to be sent: ~a~%" data)
(format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
(sockets:send-to fp (string-to-octets data :encoding :latin-1))
;;(finish-output fp)
)
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
(format t "req is: ~a~%" (string-to-octets req))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(concatenate 'string
(pack "N/a*" (get-hash 'attr entry))
(pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (get-hash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (get-hash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(get-hash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (get-hash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (get-hash 'min filter))
(pack "q>" (get-hash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (get-hash 'min filter))
(%pack-float (get-hash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (get-hash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
(when (hash-table-count hash-table)
(maphash #'(lambda (k v)
(pack "N/a*N" k v))
hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
e281abfbb104701fd148aa3403d2d8d0e44bd7d9
|
A WHOLE lot of debugging output; but working add-query and run-queries; YAY
|
diff --git a/PerlAPI.pm b/PerlAPI.pm
index 2b3f5d4..20702fd 100644
--- a/PerlAPI.pm
+++ b/PerlAPI.pm
@@ -1,2136 +1,2189 @@
package Sphinx::Search;
use warnings;
use strict;
use base 'Exporter';
use Carp;
use Socket;
use Config;
use Math::BigInt;
use IO::Socket::INET;
use IO::Socket::UNIX;
use Encode qw/encode_utf8 decode_utf8/;
my $is_native64 = $Config{longsize} == 8 || defined $Config{use64bitint} || defined $Config{use64bitall};
+use Devel::Peek;
+
+use utf8;
+binmode(STDERR, ":utf8");
+
+
=head1 NAME
Sphinx::Search - Sphinx search engine API Perl client
=head1 VERSION
Please note that you *MUST* install a version which is compatible with your version of Sphinx.
Use version 0.22 for Sphinx 0.9.9-rc2 and later (Please read the Compatibility Note under L<SetEncoders> regarding encoding changes)
Use version 0.15 for Sphinx 0.9.9-svn-r1674
Use version 0.12 for Sphinx 0.9.8
Use version 0.11 for Sphinx 0.9.8-rc1
Use version 0.10 for Sphinx 0.9.8-svn-r1112
Use version 0.09 for Sphinx 0.9.8-svn-r985
Use version 0.08 for Sphinx 0.9.8-svn-r871
Use version 0.06 for Sphinx 0.9.8-svn-r820
Use version 0.05 for Sphinx 0.9.8-cvs-20070907
Use version 0.02 for Sphinx 0.9.8-cvs-20070818
=cut
our $VERSION = '0.22';
=head1 SYNOPSIS
use Sphinx::Search;
$sphinx = Sphinx::Search->new();
$results = $sphinx->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms");
=head1 DESCRIPTION
This is the Perl API client for the Sphinx open-source SQL full-text indexing
search engine, L<http://www.sphinxsearch.com>.
=cut
# Constants to export.
our @EXPORT = qw(
SPH_MATCH_ALL SPH_MATCH_ANY SPH_MATCH_PHRASE SPH_MATCH_BOOLEAN SPH_MATCH_EXTENDED
SPH_MATCH_FULLSCAN SPH_MATCH_EXTENDED2
SPH_RANK_PROXIMITY_BM25 SPH_RANK_BM25 SPH_RANK_NONE SPH_RANK_WORDCOUNT
SPH_SORT_RELEVANCE SPH_SORT_ATTR_DESC SPH_SORT_ATTR_ASC SPH_SORT_TIME_SEGMENTS
SPH_SORT_EXTENDED SPH_SORT_EXPR
SPH_GROUPBY_DAY SPH_GROUPBY_WEEK SPH_GROUPBY_MONTH SPH_GROUPBY_YEAR SPH_GROUPBY_ATTR
SPH_GROUPBY_ATTRPAIR
);
# known searchd commands
use constant SEARCHD_COMMAND_SEARCH => 0;
use constant SEARCHD_COMMAND_EXCERPT => 1;
use constant SEARCHD_COMMAND_UPDATE => 2;
use constant SEARCHD_COMMAND_KEYWORDS => 3;
use constant SEARCHD_COMMAND_PERSIST => 4;
use constant SEARCHD_COMMAND_STATUS => 5;
# current client-side command implementation versions
use constant VER_COMMAND_SEARCH => 0x116;
use constant VER_COMMAND_EXCERPT => 0x100;
use constant VER_COMMAND_UPDATE => 0x102;
use constant VER_COMMAND_KEYWORDS => 0x100;
use constant VER_COMMAND_STATUS => 0x100;
# known searchd status codes
use constant SEARCHD_OK => 0;
use constant SEARCHD_ERROR => 1;
use constant SEARCHD_RETRY => 2;
use constant SEARCHD_WARNING => 3;
# known match modes
use constant SPH_MATCH_ALL => 0;
use constant SPH_MATCH_ANY => 1;
use constant SPH_MATCH_PHRASE => 2;
use constant SPH_MATCH_BOOLEAN => 3;
use constant SPH_MATCH_EXTENDED => 4;
use constant SPH_MATCH_FULLSCAN => 5;
use constant SPH_MATCH_EXTENDED2 => 6; # extended engine V2 (TEMPORARY, WILL BE REMOVED
# known ranking modes (ext2 only)
use constant SPH_RANK_PROXIMITY_BM25 => 0; # default mode, phrase proximity major factor and BM25 minor one
use constant SPH_RANK_BM25 => 1; # statistical mode, BM25 ranking only (faster but worse quality)
use constant SPH_RANK_NONE => 2; # no ranking, all matches get a weight of 1
use constant SPH_RANK_WORDCOUNT => 3; # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
use constant SPH_RANK_PROXIMITY => 4;
use constant SPH_RANK_MATCHANY => 5;
# known sort modes
use constant SPH_SORT_RELEVANCE => 0;
use constant SPH_SORT_ATTR_DESC => 1;
use constant SPH_SORT_ATTR_ASC => 2;
use constant SPH_SORT_TIME_SEGMENTS => 3;
use constant SPH_SORT_EXTENDED => 4;
use constant SPH_SORT_EXPR => 5;
# known filter types
use constant SPH_FILTER_VALUES => 0;
use constant SPH_FILTER_RANGE => 1;
use constant SPH_FILTER_FLOATRANGE => 2;
# known attribute types
use constant SPH_ATTR_INTEGER => 1;
use constant SPH_ATTR_TIMESTAMP => 2;
use constant SPH_ATTR_ORDINAL => 3;
use constant SPH_ATTR_BOOL => 4;
use constant SPH_ATTR_FLOAT => 5;
use constant SPH_ATTR_BIGINT => 6;
use constant SPH_ATTR_MULTI => 0x40000000;
# known grouping functions
use constant SPH_GROUPBY_DAY => 0;
use constant SPH_GROUPBY_WEEK => 1;
use constant SPH_GROUPBY_MONTH => 2;
use constant SPH_GROUPBY_YEAR => 3;
use constant SPH_GROUPBY_ATTR => 4;
use constant SPH_GROUPBY_ATTRPAIR => 5;
# Floating point number matching expression
my $num_re = qr/^-?\d*\.?\d*(?:[eE][+-]?\d+)?$/;
# portably pack numeric to 64 signed bits, network order
sub _sphPackI64 {
my $self = shift;
my $v = shift;
# x64 route
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
# portably pack numeric to 64 unsigned bits, network order
sub _sphPackU64 {
my $self = shift;
my $v = shift;
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
sub _sphPackI64array {
my $self = shift;
my $values = shift || [];
my $s = pack("N", scalar @$values);
$s .= $self->_sphPackI64($_) for @$values;
return $s;
}
# portably unpack 64 unsigned bits, network order to numeric
sub _sphUnpackU64
{
my $self = shift;
my $v = shift;
my ($h,$l) = unpack ( "N*N*", $v );
# x64 route
return ($h<<32) + $l if $is_native64;
# x32 route, BigInt
$h = Math::BigInt->new($h);
$h->blsft(32)->badd($l);
return $h->bstr;
}
# portably unpack 64 signed bits, network order to numeric
sub _sphUnpackI64
{
my $self = shift;
my $v = shift;
my ($h,$l) = unpack ( "N*N*", $v );
my $neg = ($h & 0x80000000) ? 1 : 0;
# x64 route
if ( $is_native64 ) {
return -(~(($h<<32) + $l) + 1) if $neg;
return ($h<<32) + $l;
}
# x32 route, BigInt
if ($neg) {
$h = ~$h;
$l = ~$l;
}
my $x = Math::BigInt->new($h);
$x->blsft(32)->badd($l);
$x->binc()->bneg() if $neg;
return $x->bstr;
}
=head1 CONSTRUCTOR
=head2 new
$sph = Sphinx::Search->new;
$sph = Sphinx::Search->new(\%options);
Create a new Sphinx::Search instance.
OPTIONS
=over 4
=item log
Specify an optional logger instance. This can be any class that provides error,
warn, info, and debug methods (e.g. see L<Log::Log4perl>). Logging is disabled
if no logger instance is provided.
=item debug
Debug flag. If set (and a logger instance is specified), debugging messages
will be generated.
=back
=cut
# create a new client object and fill defaults
sub new {
my ($class, $options) = @_;
my $self = {
# per=client-object settings
_host => 'localhost',
_port => 3312,
_path => undef,
_socket => undef,
# per-query settings
_offset => 0,
_limit => 20,
_mode => SPH_MATCH_ALL,
_weights => [],
_sort => SPH_SORT_RELEVANCE,
_sortby => "",
_min_id => 0,
_max_id => 0,
_filters => [],
_groupby => "",
_groupdistinct => "",
_groupfunc => SPH_GROUPBY_DAY,
_groupsort => '@group desc',
_maxmatches => 1000,
_cutoff => 0,
_retrycount => 0,
_retrydelay => 0,
_anchor => undef,
_indexweights => undef,
_ranker => SPH_RANK_PROXIMITY_BM25,
_maxquerytime => 0,
_fieldweights => {},
_overrides => {},
_select => q{*},
# per-reply fields (for single-query case)
_error => '',
_warning => '',
_connerror => '',
# request storage (for multi-query case)
_reqs => [],
_timeout => 0,
_string_encoder => \&encode_utf8,
_string_decoder => \&decode_utf8,
};
bless $self, ref($class) || $class;
# These options are supported in the constructor, but not recommended
# since there is no validation. Use the Set* methods instead.
my %legal_opts = map { $_ => 1 } qw/host port offset limit mode weights sort sortby groupby groupbyfunc maxmatches cutoff retrycount retrydelay log debug string_encoder string_decoder/;
for my $opt (keys %$options) {
$self->{'_' . $opt} = $options->{$opt} if $legal_opts{$opt};
}
# Disable debug unless we have something to log to
$self->{_debug} = 0 unless $self->{_log};
return $self;
}
=head1 METHODS
=cut
sub _Error {
my ($self, $msg) = @_;
$self->{_error} = $msg;
$self->{_log}->error($msg) if $self->{_log};
}
=head2 GetLastError
$error = $sph->GetLastError;
Get last error message (string)
=cut
sub GetLastError {
my $self = shift;
return $self->{_error};
}
sub _Warning {
my ($self, $msg) = @_;
$self->{_warning} = $msg;
$self->{_log}->warn($msg) if $self->{_log};
}
=head2 GetLastWarning
$warning = $sph->GetLastWarning;
Get last warning message (string)
=cut
sub GetLastWarning {
my $self = shift;
return $self->{_warning};
}
=head2 IsConnectError
Check connection error flag (to differentiate between network connection errors
and bad responses). Returns true value on connection error.
=cut
sub IsConnectError {
return shift->{_connerror};
}
=head2 SetEncoders
$sph->SetEncoders(\&encode_function, \&decode_function)
COMPATIBILITY NOTE: SetEncoders() was introduced in version 0.17.
Prior to that, all strings were considered to be sequences of bytes
which may have led to issues with multi-byte characters. If you were
previously encoding/decoding strings external to Sphinx::Search, you
will need to disable encoding/decoding by setting Sphinx::Search to
use raw values as explained below (or modify your code and let
Sphinx::Search do the recoding).
Set the string encoder/decoder functions for transferring strings
between perl and Sphinx. The encoder should take the perl internal
representation and convert to the bytestream that searchd expects, and
the decoder should take the bytestream returned by searchd and convert to
perl format.
The searchd format will depend on the 'charset_type' index setting in
the Sphinx configuration file.
The coders default to encode_utf8 and decode_utf8 respectively, which
are compatible with the 'utf8' charset_type.
If either the encoder or decoder functions are left undefined in the
call to SetEncoders, they return to their default values.
If you wish to send raw values (no encoding/decoding), supply a
function that simply returns its argument, e.g.
$sph->SetEncoders( sub { shift }, sub { shift });
Returns $sph.
=cut
sub SetEncoders {
my $self = shift;
my $encoder = shift;
my $decoder = shift;
$self->{_string_encoder} = $encoder ? $encoder : \&encode_utf8;
$self->{_string_decoder} = $decoder ? $decoder : \&decode_utf8;
return $self;
}
=head2 SetServer
$sph->SetServer($host, $port);
$sph->SetServer($path, $port);
In the first form, sets the host (string) and port (integer) details for the
searchd server using a network (INET) socket.
In the second form, where $path is a local filesystem path (optionally prefixed
by 'unix://'), sets the client to access the searchd server via a local (UNIX
domain) socket at the specified path.
Returns $sph.
=cut
sub SetServer {
my $self = shift;
my $host = shift;
my $port = shift;
croak("host is not defined") unless defined($host);
$self->{_path} = $host, return if substr($host, 0, 1) eq '/';
$self->{_path} = substr($host, 7), return if substr($host, 0, 7) eq 'unix://';
croak("port is not an integer") unless defined($port) && $port =~ m/^\d+$/o;
$self->{_host} = $host;
$self->{_port} = $port;
$self->{_path} = undef;
return $self;
}
=head2 SetConnectTimeout
$sph->SetConnectTimeout($timeout)
Set server connection timeout (in seconds).
Returns $sph.
=cut
sub SetConnectTimeout {
my $self = shift;
my $timeout = shift;
croak("timeout is not numeric") unless ($timeout =~ m/$num_re/);
$self->{_timeout} = $timeout;
}
sub _Send {
my $self = shift;
my $fp = shift;
my $data = shift;
$self->{_log}->debug("Writing to socket") if $self->{_debug};
+
+ print STDERR "writing data:\n";
+ print STDERR Dump($data) . "\n";
+
$fp->write($data); return 1;
if ($fp->eof || ! $fp->write($data)) {
$self->_Error("connection unexpectedly closed (timed out?): $!");
$self->{_connerror} = 1;
return 0;
}
return 1;
}
# connect to searchd server
sub _Connect {
my $self = shift;
return $self->{_socket} if $self->{_socket};
my $debug = $self->{_debug};
my $str_dest = $self->{_path} ? 'unix://' . $self->{_path} : "$self->{_host}:$self->{_port}";
$self->{_log}->debug("Connecting to $str_dest") if $debug;
# connect socket
$self->{_connerror} = q{};
my $fp;
my %params = (); # ( Blocking => 0 );
$params{Timeout} = $self->{_timeout} if $self->{_timeout};
if ($self->{_path}) {
$fp = IO::Socket::UNIX->new( Peer => $self->{_path},
%params,
);
} else {
$fp = IO::Socket::INET->new( PeerPort => $self->{_port},
PeerAddr => $self->{_host},
Proto => 'tcp',
%params,
);
}
if (! $fp) {
$self->_Error("Failed to open connection to $str_dest: $!");
$self->{_connerror} = 1;
return 0;
}
binmode($fp, ':bytes');
# check version
my $buf = '';
$fp->read($buf, 4) or do {
$self->_Error("Failed on initial read from $str_dest: $!");
$self->{_connerror} = 1;
return 0;
};
my $v = unpack("N*", $buf);
$v = int($v);
$self->{_log}->debug("Got version $v from searchd") if $debug;
if ($v < 1) {
close($fp);
$self->_Error("expected searchd protocol version 1+, got version '$v'");
return 0;
}
$self->{_log}->debug("Sending version") if $debug;
# All ok, send my version
$self->_Send($fp, pack("N", 1)) or return 0;
$self->{_log}->debug("Connection complete") if $debug;
return $fp;
}
#-------------------------------------------------------------
# get and check response packet from searchd server
sub _GetResponse {
my $self = shift;
my $fp = shift;
my $client_ver = shift;
my $header;
defined($fp->read($header, 8, 0)) or do {
$self->_Error("read failed: $!");
return 0;
};
my ($status, $ver, $len ) = unpack("n2N", $header);
my $response = q{};
my $lasterror = q{};
my $lentotal = 0;
while (my $rlen = $fp->read(my $chunk, $len)) {
$lasterror = $!, last if $rlen < 0;
$response .= $chunk;
$lentotal += $rlen;
last if $lentotal >= $len;
}
close($fp) unless $self->{_socket};
# check response
if ( length($response) != $len ) {
$self->_Error( $len
? "failed to read searchd response (status=$status, ver=$ver, len=$len, read=". length($response) . ", last error=$lasterror)"
: "received zero-sized searchd response");
return 0;
}
# check status
if ( $status==SEARCHD_WARNING ) {
my ($wlen) = unpack ( "N*", substr ( $response, 0, 4 ) );
$self->_Warning(substr ( $response, 4, $wlen ));
return substr ( $response, 4+$wlen );
}
if ( $status==SEARCHD_ERROR ) {
$self->_Error("searchd error: " . substr ( $response, 4 ));
return 0;
}
if ( $status==SEARCHD_RETRY ) {
$self->_Error("temporary searchd error: " . substr ( $response, 4 ));
return 0;
}
if ( $status!=SEARCHD_OK ) {
$self->_Error("unknown status code '$status'");
return 0;
}
# check version
if ( $ver<$client_ver ) {
$self->_Warning(sprintf ( "searchd command v.%d.%d older than client's v.%d.%d, some options might not work",
$ver>>8, $ver&0xff, $client_ver>>8, $client_ver&0xff ));
}
return $response;
}
=head2 SetLimits
$sph->SetLimits($offset, $limit);
$sph->SetLimits($offset, $limit, $max);
Set match offset/limits, and optionally the max number of matches to return.
Returns $sph.
=cut
sub SetLimits {
my $self = shift;
my $offset = shift;
my $limit = shift;
my $max = shift || 0;
croak("offset should be an integer >= 0") unless ($offset =~ /^\d+$/ && $offset >= 0) ;
croak("limit should be an integer >= 0") unless ($limit =~ /^\d+$/ && $limit >= 0);
$self->{_offset} = $offset;
$self->{_limit} = $limit;
if ($max > 0) {
$self->{_maxmatches} = $max;
}
return $self;
}
=head2 SetMaxQueryTime
$sph->SetMaxQueryTime($millisec);
Set maximum query time, in milliseconds, per index.
The value may not be negative; 0 means "do not limit".
Returns $sph.
=cut
sub SetMaxQueryTime {
my $self = shift;
my $max = shift;
croak("max value should be an integer >= 0") unless ($max =~ /^\d+$/ && $max >= 0) ;
$self->{_maxquerytime} = $max;
return $self;
}
=head2 SetMatchMode
$sph->SetMatchMode($mode);
Set match mode, which may be one of:
=over 4
=item * SPH_MATCH_ALL
Match all words
=item * SPH_MATCH_ANY
Match any words
=item * SPH_MATCH_PHRASE
Exact phrase match
=item * SPH_MATCH_BOOLEAN
Boolean match, using AND (&), OR (|), NOT (!,-) and parenthetic grouping.
=item * SPH_MATCH_EXTENDED
Extended match, which includes the Boolean syntax plus field, phrase and
proximity operators.
=back
Returns $sph.
=cut
sub SetMatchMode {
my $self = shift;
my $mode = shift;
croak("Match mode not defined") unless defined($mode);
croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
|| $mode==SPH_MATCH_ANY
|| $mode==SPH_MATCH_PHRASE
|| $mode==SPH_MATCH_BOOLEAN
|| $mode==SPH_MATCH_EXTENDED
|| $mode==SPH_MATCH_FULLSCAN
|| $mode==SPH_MATCH_EXTENDED2 );
$self->{_mode} = $mode;
return $self;
}
=head2 SetRankingMode
$sph->SetRankingMode(SPH_RANK_BM25);
Set ranking mode, which may be one of:
=over 4
=item * SPH_RANK_PROXIMITY_BM25
Default mode, phrase proximity major factor and BM25 minor one
=item * SPH_RANK_BM25
Statistical mode, BM25 ranking only (faster but worse quality)
=item * SPH_RANK_NONE
No ranking, all matches get a weight of 1
=item * SPH_RANK_WORDCOUNT
Simple word-count weighting, rank is a weighted sum of per-field keyword
occurence counts
=back
Returns $sph.
=cut
sub SetRankingMode {
my $self = shift;
my $ranker = shift;
croak("Unknown ranking mode: $ranker") unless ( $ranker==SPH_RANK_PROXIMITY_BM25
|| $ranker==SPH_RANK_BM25
|| $ranker==SPH_RANK_NONE
|| $ranker==SPH_RANK_WORDCOUNT
|| $ranker==SPH_RANK_PROXIMITY );
$self->{_ranker} = $ranker;
return $self;
}
=head2 SetSortMode
$sph->SetSortMode(SPH_SORT_RELEVANCE);
$sph->SetSortMode($mode, $sortby);
Set sort mode, which may be any of:
=over 4
=item SPH_SORT_RELEVANCE - sort by relevance
=item SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC
Sort by attribute descending/ascending. $sortby specifies the sorting attribute.
=item SPH_SORT_TIME_SEGMENTS
Sort by time segments (last hour/day/week/month) in descending order, and then
by relevance in descending order. $sortby specifies the time attribute.
=item SPH_SORT_EXTENDED
Sort by SQL-like syntax. $sortby is the sorting specification.
=item SPH_SORT_EXPR
=back
Returns $sph.
=cut
sub SetSortMode {
my $self = shift;
my $mode = shift;
my $sortby = shift || "";
croak("Sort mode not defined") unless defined($mode);
croak("Unknown sort mode: $mode") unless ( $mode == SPH_SORT_RELEVANCE
|| $mode == SPH_SORT_ATTR_DESC
|| $mode == SPH_SORT_ATTR_ASC
|| $mode == SPH_SORT_TIME_SEGMENTS
|| $mode == SPH_SORT_EXTENDED
|| $mode == SPH_SORT_EXPR
);
croak("Sortby must be defined") unless ($mode==SPH_SORT_RELEVANCE || length($sortby));
$self->{_sort} = $mode;
$self->{_sortby} = $sortby;
return $self;
}
=head2 SetWeights
$sph->SetWeights([ 1, 2, 3, 4]);
This method is deprecated. Use L<SetFieldWeights> instead.
Set per-field (integer) weights. The ordering of the weights correspond to the
ordering of fields as indexed.
Returns $sph.
=cut
sub SetWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not an array reference") unless (ref($weights) eq 'ARRAY');
foreach my $weight (@$weights) {
croak("Weight: $weight is not an integer") unless ($weight =~ /^\d+$/);
}
$self->{_weights} = $weights;
return $self;
}
=head2 SetFieldWeights
$sph->SetFieldWeights(\%weights);
Set per-field (integer) weights by field name. The weights hash provides field
name to weight mappings.
Takes precedence over L<SetWeights>.
Unknown names will be silently ignored. Missing fields will be given a weight of 1.
Returns $sph.
=cut
sub SetFieldWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
foreach my $field (keys %$weights) {
croak("Weight: $weights->{$field} is not an integer >= 0") unless ($weights->{$field} =~ /^\d+$/);
}
$self->{_fieldweights} = $weights;
return $self;
}
=head2 SetIndexWeights
$sph->SetIndexWeights(\%weights);
Set per-index (integer) weights. The weights hash is a mapping of index name to integer weight.
Returns $sph.
=cut
sub SetIndexWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
foreach (keys %$weights) {
croak("IndexWeight $_: $weights->{$_} is not an integer") unless ($weights->{$_} =~ /^\d+$/);
}
$self->{_indexweights} = $weights;
return $self;
}
=head2 SetIDRange
$sph->SetIDRange($min, $max);
Set IDs range only match those records where document ID
is between $min and $max (including $min and $max)
Returns $sph.
=cut
sub SetIDRange {
my $self = shift;
my $min = shift;
my $max = shift;
croak("min_id is not numeric") unless ($min =~ m/$num_re/);
croak("max_id is not numeric") unless ($max =~ m/$num_re/);
croak("min_id is larger than or equal to max_id") unless ($min < $max);
$self->{_min_id} = $min;
$self->{_max_id} = $max;
return $self;
}
=head2 SetFilter
$sph->SetFilter($attr, \@values);
$sph->SetFilter($attr, \@values, $exclude);
Sets the results to be filtered on the given attribute. Only results which have
attributes matching the given (numeric) values will be returned.
This may be called multiple times with different attributes to select on
multiple attributes.
If 'exclude' is set, excludes results that match the filter.
Returns $sph.
=cut
sub SetFilter {
my ($self, $attribute, $values, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("values is not an array reference") unless (ref($values) eq 'ARRAY');
croak("values reference is empty") unless (scalar(@$values));
foreach my $value (@$values) {
croak("value $value is not numeric") unless ($value =~ m/$num_re/);
}
push(@{$self->{_filters}}, {
type => SPH_FILTER_VALUES,
attr => $attribute,
values => $values,
exclude => $exclude ? 1 : 0,
});
return $self;
}
=head2 SetFilterRange
$sph->SetFilterRange($attr, $min, $max);
$sph->SetFilterRange($attr, $min, $max, $exclude);
Sets the results to be filtered on a range of values for the given
attribute. Only those records where $attr column value is between $min and $max
(including $min and $max) will be returned.
If 'exclude' is set, excludes results that fall within the given range.
Returns $sph.
=cut
sub SetFilterRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not an integer") unless ($min =~ m/$num_re/);
croak("max: $max is not an integer") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
type => SPH_FILTER_RANGE,
attr => $attribute,
min => $min,
max => $max,
exclude => $exclude ? 1 : 0,
});
return $self;
}
=head2 SetFilterFloatRange
$sph->SetFilterFloatRange($attr, $min, $max, $exclude);
Same as L<SetFilterRange>, but allows floating point values.
Returns $sph.
=cut
sub SetFilterFloatRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not numeric") unless ($min =~ m/$num_re/);
croak("max: $max is not numeric") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
type => SPH_FILTER_FLOATRANGE,
attr => $attribute,
min => $min,
max => $max,
exclude => $exclude ? 1 : 0,
});
return $self;
}
=head2 SetGeoAnchor
$sph->SetGeoAnchor($attrlat, $attrlong, $lat, $long);
Setup anchor point for using geosphere distance calculations in filters and sorting.
Distance will be computed with respect to this point
=over 4
=item $attrlat is the name of latitude attribute
=item $attrlong is the name of longitude attribute
=item $lat is anchor point latitude, in radians
=item $long is anchor point longitude, in radians
=back
Returns $sph.
=cut
sub SetGeoAnchor {
my ($self, $attrlat, $attrlong, $lat, $long) = @_;
croak("attrlat is not defined") unless defined $attrlat;
croak("attrlong is not defined") unless defined $attrlong;
croak("lat: $lat is not numeric") unless ($lat =~ m/$num_re/);
croak("long: $long is not numeric") unless ($long =~ m/$num_re/);
$self->{_anchor} = {
attrlat => $attrlat,
attrlong => $attrlong,
lat => $lat,
long => $long,
};
return $self;
}
=head2 SetGroupBy
$sph->SetGroupBy($attr, $func);
$sph->SetGroupBy($attr, $func, $groupsort);
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on grouping
function value. Each group keeps track of the total match count, and the best
match (in this group) according to current sorting function. The final result
set contains one best match per group, with grouping function value and matches
count attached.
$attr is any valid attribute. Use L<ResetGroupBy> to disable grouping.
$func is one of:
=over 4
=item * SPH_GROUPBY_DAY
Group by day (assumes timestamp type attribute of form YYYYMMDD)
=item * SPH_GROUPBY_WEEK
Group by week (assumes timestamp type attribute of form YYYYNNN)
=item * SPH_GROUPBY_MONTH
Group by month (assumes timestamp type attribute of form YYYYMM)
=item * SPH_GROUPBY_YEAR
Group by year (assumes timestamp type attribute of form YYYY)
=item * SPH_GROUPBY_ATTR
Group by attribute value
=item * SPH_GROUPBY_ATTRPAIR
Group by two attributes, being the given attribute and the attribute that
immediately follows it in the sequence of indexed attributes. The specified
attribute may therefore not be the last of the indexed attributes.
=back
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal Sphinx
attributes:
=over 4
=item @id - document ID;
=item @weight, @rank, @relevance - match weight;
=item @group - group by function value;
=item @count - number of matches in group.
=back
The default mode is to sort by groupby value in descending order,
ie. by "@group desc".
In the results set, "total_found" contains the total amount of matching groups
over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in total_found than actually present. @count might also
be underestimated.
For example, if sorting by relevance and grouping by a "published"
attribute with SPH_GROUPBY_DAY function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
=cut
sub SetGroupBy {
my $self = shift;
my $attribute = shift;
my $func = shift;
my $groupsort = shift || '@group desc';
croak("attribute is not defined") unless (defined $attribute);
croak("Unknown grouping function: $func") unless ($func==SPH_GROUPBY_DAY
|| $func==SPH_GROUPBY_WEEK
|| $func==SPH_GROUPBY_MONTH
|| $func==SPH_GROUPBY_YEAR
|| $func==SPH_GROUPBY_ATTR
|| $func==SPH_GROUPBY_ATTRPAIR
);
$self->{_groupby} = $attribute;
$self->{_groupfunc} = $func;
$self->{_groupsort} = $groupsort;
return $self;
}
=head2 SetGroupDistinct
$sph->SetGroupDistinct($attr);
Set count-distinct attribute for group-by queries
=cut
sub SetGroupDistinct {
my $self = shift;
my $attribute = shift;
croak("attribute is not defined") unless (defined $attribute);
$self->{_groupdistinct} = $attribute;
return $self;
}
=head2 SetRetries
$sph->SetRetries($count, $delay);
Set distributed retries count and delay
=cut
sub SetRetries {
my $self = shift;
my $count = shift;
my $delay = shift || 0;
croak("count: $count is not an integer >= 0") unless ($count =~ /^\d+$/o && $count >= 0);
croak("delay: $delay is not an integer >= 0") unless ($delay =~ /^\d+$/o && $delay >= 0);
$self->{_retrycount} = $count;
$self->{_retrydelay} = $delay;
return $self;
}
=head2 SetOverride
$sph->SetOverride($attrname, $attrtype, $values);
Set attribute values override. There can be only one override per attribute.
$values must be a hash that maps document IDs to attribute values
=cut
sub SetOverride {
my $self = shift;
my $attrname = shift;
my $attrtype = shift;
my $values = shift;
croak("attribute name is not defined") unless defined $attrname;
croak("Uknown attribute type: $attrtype") unless ($attrtype == SPH_ATTR_INTEGER
|| $attrtype == SPH_ATTR_TIMESTAMP
|| $attrtype == SPH_ATTR_BOOL
|| $attrtype == SPH_ATTR_FLOAT
|| $attrtype == SPH_ATTR_BIGINT);
$self->{_overrides}->{$attrname} = { attr => $attrname,
type => $attrtype,
values => $values,
};
return $self;
}
=head2 SetSelect
$sph->SetSelect($select)
Set select list (attributes or expressions). SQL-like syntax.
=cut
sub SetSelect {
my $self = shift;
$self->{_select} = shift;
return $self;
}
=head2 ResetFilters
$sph->ResetFilters;
Clear all filters.
=cut
sub ResetFilters {
my $self = shift;
$self->{_filters} = [];
$self->{_anchor} = undef;
return $self;
}
=head2 ResetGroupBy
$sph->ResetGroupBy;
Clear all group-by settings (for multi-queries)
=cut
sub ResetGroupBy {
my $self = shift;
$self->{_groupby} = "";
$self->{_groupfunc} = SPH_GROUPBY_DAY;
$self->{_groupsort} = '@group desc';
$self->{_groupdistinct} = "";
return $self;
}
=head2 ResetOverrides
Clear all attribute value overrides (for multi-queries)
=cut
sub ResetOverrides {
my $self = shift;
$self->{_select} = undef;
return $self;
}
=head2 Query
$results = $sph->Query($query, $index);
Connect to searchd server and run given search query.
=over 4
=item query is query string
=item index is index name to query, default is "*" which means to query all indexes. Use a space or comma separated list to search multiple indexes.
=back
Returns undef on failure
Returns hash which has the following keys on success:
=over 4
=item matches
Array containing hashes with found documents ( "doc", "weight", "group", "stamp" )
=item total
Total amount of matches retrieved (upto SPH_MAX_MATCHES, see sphinx.h)
=item total_found
Total amount of matching documents in index
=item time
Search time
=item words
Hash which maps query terms (stemmed!) to ( "docs", "hits" ) hash
=back
Returns the results array on success, undef on error.
=cut
sub Query {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
croak("_reqs is not empty") unless @{$self->{_reqs}} == 0;
$self->AddQuery($query, $index, $comment);
my $results = $self->RunQueries or return;
$self->_Error($results->[0]->{error}) if $results->[0]->{error};
$self->_Warning($results->[0]->{warning}) if $results->[0]->{warning};
return if $results->[0]->{status} && $results->[0]->{status} == SEARCHD_ERROR;
return $results->[0];
}
# helper to pack floats in network byte order
sub _PackFloat {
my $f = shift;
my $t1 = pack ( "f", $f ); # machine order
my $t2 = unpack ( "L*", $t1 ); # int in machine order
return pack ( "N", $t2 );
}
=head2 AddQuery
$sph->AddQuery($query, $index);
Add a query to a batch request.
Batch queries enable searchd to perform internal optimizations,
if possible; and reduce network connection overheads in all cases.
For instance, running exactly the same query with different
groupby settings will enable searched to perform expensive
full-text search and ranking operation only once, but compute
multiple groupby results from its output.
Parameters are exactly the same as in Query() call.
Returns corresponding index to the results array returned by RunQueries() call.
=cut
sub AddQuery {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
##################
# build request
##################
my $req;
$req = pack ( "NNNNN", $self->{_offset}, $self->{_limit}, $self->{_mode}, $self->{_ranker}, $self->{_sort} ); # mode and limits
+
+ print STDERR Dump( pack( "N", 20 ) );
+ print STDERR "req a: $self->{_offset} $self->{_limit} \n";
+ print STDERR Dump($req);
+
$req .= pack ( "N/a*", $self->{_sortby});
$req .= pack ( "N/a*", $self->{_string_encoder}->($query) ); # query itself
$req .= pack ( "N*", scalar(@{$self->{_weights}}), @{$self->{_weights}});
$req .= pack ( "N/a*", $index); # indexes
$req .= pack ( "N", 1)
. $self->_sphPackU64($self->{_min_id})
. $self->_sphPackU64($self->{_max_id}); # id64 range
# filters
$req .= pack ( "N", scalar @{$self->{_filters}} );
foreach my $filter (@{$self->{_filters}}) {
$req .= pack ( "N/a*", $filter->{attr});
$req .= pack ( "N", $filter->{type});
my $t = $filter->{type};
if ($t == SPH_FILTER_VALUES) {
$req .= $self->_sphPackI64array($filter->{values});
} elsif ($t == SPH_FILTER_RANGE) {
$req .= $self->_sphPackI64($filter->{min}) . $self->_sphPackI64($filter->{max});
} elsif ($t == SPH_FILTER_FLOATRANGE) {
$req .= _PackFloat ( $filter->{"min"} ) . _PackFloat ( $filter->{"max"} );
} else {
croak("Unhandled filter type $t");
}
$req .= pack ( "N", $filter->{exclude});
}
# group-by clause, max-matches count, group-sort clause, cutoff count
$req .= pack ( "NN/a*", $self->{_groupfunc}, $self->{_groupby} );
$req .= pack ( "N", $self->{_maxmatches} );
$req .= pack ( "N/a*", $self->{_groupsort});
$req .= pack ( "NNN", $self->{_cutoff}, $self->{_retrycount}, $self->{_retrydelay} );
$req .= pack ( "N/a*", $self->{_groupdistinct});
if (!defined $self->{_anchor}) {
$req .= pack ( "N", 0);
} else {
my $a = $self->{_anchor};
$req .= pack ( "N", 1);
$req .= pack ( "N/a*", $a->{attrlat});
$req .= pack ( "N/a*", $a->{attrlong});
$req .= _PackFloat($a->{lat}) . _PackFloat($a->{long});
}
+ print STDERR "before per-indexe weights\n";
+ print STDERR Dump($req);
+
# per-index weights
$req .= pack( "N", scalar keys %{$self->{_indexweights}});
$req .= pack ( "N/a*N", $_, $self->{_indexweights}->{$_} ) for keys %{$self->{_indexweights}};
+ print STDERR "AFTER per-indexe weights\n";
+ print STDERR Dump($req);
+
# max query time
$req .= pack ( "N", $self->{_maxquerytime} );
+ print STDERR "before per-field weights\n";
+ print STDERR Dump($req);
+
# per-field weights
$req .= pack ( "N", scalar keys %{$self->{_fieldweights}} );
$req .= pack ( "N/a*N", $_, $self->{_fieldweights}->{$_}) for keys %{$self->{_fieldweights}};
+
+ print STDERR "AFTER per-field weights\n";
+ print STDERR Dump($req);
+
+
# comment
$req .= pack ( "N/a*", $comment);
# attribute overrides
$req .= pack ( "N", scalar keys %{$self->{_overrides}} );
for my $entry (values %{$self->{_overrides}}) {
$req .= pack ("N/a*", $entry->{attr})
. pack ("NN", $entry->{type}, scalar keys %{$entry->{values}});
for my $id (keys %{$entry->{values}}) {
croak "Attribute value key is not numeric" unless $id =~ m/$num_re/;
my $v = $entry->{values}->{$id};
croak "Attribute value key is not numeric" unless $v =~ m/$num_re/;
$req .= $self->_sphPackU64($id);
if ($entry->{type} == SPH_ATTR_FLOAT) {
$req .= $self->_packfloat($v);
} elsif ($entry->{type} == SPH_ATTR_BIGINT) {
$req .= $self->_sphPackI64($v);
} else {
$req .= pack("N", $v);
}
}
}
# select list
$req .= pack("N/a*", $self->{_select} || '');
+ print STDERR "added req:\n";
+ print STDERR Dump($req) . "\n";
+
push(@{$self->{_reqs}}, $req);
return scalar $#{$self->{_reqs}};
}
=head2 RunQueries
$sph->RunQueries
Run batch of queries, as added by AddQuery.
Returns undef on network IO failure.
Returns an array of result sets on success.
Each result set in the returned array is a hash which contains
the same keys as the hash returned by L<Query>, plus:
=over 4
=item * error
Errors, if any, for this query.
=item * warnings
Any warnings associated with the query.
=back
=cut
sub RunQueries {
my $self = shift;
unless (@{$self->{_reqs}}) {
$self->_Error("no queries defined, issue AddQuery() first");
return;
}
my $fp = $self->_Connect() or do { $self->{_reqs} = []; return };
##################
# send query, get response
##################
my $nreqs = @{$self->{_reqs}};
my $req = pack("Na*", $nreqs, join("", @{$self->{_reqs}}));
- $req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
+
+ #$req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
+
+ my $reqa = pack ( "nn", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH );
+
+ print STDERR "runqueries req header:\n";
+ print STDERR Dump($reqa) . "\n";
+
+ print STDERR 'len req: ' . length( $req ) . "\n";
+
+ $req = $reqa . pack ( "N/a*", $req); # add header
+
+ print STDERR "runqueries sending command:\n";
+ print STDERR Dump($req) . "\n";
+
$self->_Send($fp, $req);
$self->{_reqs} = [];
my $response = $self->_GetResponse ( $fp, VER_COMMAND_SEARCH );
return unless $response;
+ print STDERR "runqueries: got response:\n";
+ print STDERR Dump($response) . "\n";
+
##################
# parse response
##################
my $p = 0;
my $max = length($response); # Protection from broken response
my @results;
for (my $ires = 0; $ires < $nreqs; $ires++) {
my $result = {}; # Empty hash ref
push(@results, $result);
$result->{matches} = []; # Empty array ref
$result->{error} = "";
$result->{warnings} = "";
# extract status
my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
if ($status != SEARCHD_OK) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $message = substr ( $response, $p, $len ); $p += $len;
if ($status == SEARCHD_WARNING) {
$result->{warning} = $message;
} else {
$result->{error} = $message;
next;
}
}
# read schema
my @fields;
my (%attrs, @attr_list);
my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nfields-->0 && $p<$max ) {
my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
push(@fields, substr ( $response, $p, $len )); $p += $len;
}
$result->{"fields"} = \@fields;
my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nattrs-->0 && $p<$max ) {
my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $attr = substr ( $response, $p, $len ); $p += $len;
my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$attrs{$attr} = $type;
push(@attr_list, $attr);
}
$result->{"attrs"} = \%attrs;
# read match count
my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
# read matches
while ( $count-->0 && $p<$max ) {
my $data = {};
if ($id64) {
$data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
$data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
} else {
( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
}
foreach my $attr (@attr_list) {
if ($attrs{$attr} == SPH_ATTR_BIGINT) {
$data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
next;
}
if ($attrs{$attr} == SPH_ATTR_FLOAT) {
my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
next;
}
my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
if ($attrs{$attr} & SPH_ATTR_MULTI) {
my $nvalues = $val;
$data->{$attr} = [];
while ($nvalues-->0 && $p < $max) {
$val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@{$data->{$attr}}, $val);
}
} else {
$data->{$attr} = $val;
}
}
push(@{$result->{matches}}, $data);
}
my $words;
($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
$result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
$p += 16;
while ( $words-->0 && $p < $max) {
my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
$p += 4;
my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
$p += $len;
my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
$p += 8;
$result->{words}{$word} = {
"docs" => $docs,
"hits" => $hits
};
}
}
return \@results;
}
=head2 BuildExcerpts
$excerpts = $sph->BuildExcerpts($docs, $index, $words, $opts)
Generate document excerpts for the specified documents.
=over 4
=item docs
An array reference of strings which represent the document
contents
=item index
A string specifiying the index whose settings will be used
for stemming, lexing and case folding
=item words
A string which contains the words to highlight
=item opts
A hash which contains additional optional highlighting parameters:
=over 4
=item before_match - a string to insert before a set of matching words, default is "<b>"
=item after_match - a string to insert after a set of matching words, default is "<b>"
=item chunk_separator - a string to insert between excerpts chunks, default is " ... "
=item limit - max excerpt size in symbols (codepoints), default is 256
=item around - how many words to highlight around each match, default is 5
=item exact_phrase - whether to highlight exact phrase matches only, default is false
=item single_passage - whether to extract single best passage only, default is false
=item use_boundaries
=item weight_order
=back
=back
Returns undef on failure.
Returns an array ref of string excerpts on success.
=cut
sub BuildExcerpts {
my ($self, $docs, $index, $words, $opts) = @_;
$opts ||= {};
croak("BuildExcepts() called with incorrect parameters")
unless (ref($docs) eq 'ARRAY'
&& defined($index)
&& defined($words)
&& ref($opts) eq 'HASH');
my $fp = $self->_Connect() or return;
##################
# fixup options
##################
$opts->{"before_match"} ||= "<b>";
$opts->{"after_match"} ||= "</b>";
$opts->{"chunk_separator"} ||= " ... ";
$opts->{"limit"} ||= 256;
$opts->{"around"} ||= 5;
$opts->{"exact_phrase"} ||= 0;
$opts->{"single_passage"} ||= 0;
$opts->{"use_boundaries"} ||= 0;
$opts->{"weight_order"} ||= 0;
##################
# build request
##################
# v.1.0 req
my $req;
my $flags = 1; # remove spaces
$flags |= 2 if ( $opts->{"exact_phrase"} );
$flags |= 4 if ( $opts->{"single_passage"} );
$flags |= 8 if ( $opts->{"use_boundaries"} );
$flags |= 16 if ( $opts->{"weight_order"} );
$req = pack ( "NN", 0, $flags ); # mode=0, flags=$flags
$req .= pack ( "N/a*", $index ); # req index
$req .= pack ( "N/a*", $self->{_string_encoder}->($words)); # req words
# options
$req .= pack ( "N/a*", $opts->{"before_match"});
$req .= pack ( "N/a*", $opts->{"after_match"});
$req .= pack ( "N/a*", $opts->{"chunk_separator"});
$req .= pack ( "N", int($opts->{"limit"}) );
$req .= pack ( "N", int($opts->{"around"}) );
# documents
$req .= pack ( "N", scalar(@$docs) );
foreach my $doc (@$docs) {
croak('BuildExcerpts: Found empty document in $docs') unless ($doc);
$req .= pack("N/a*", $self->{_string_encoder}->($doc));
}
##########################
# send query, get response
##########################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, $req); # add header
+
+ print STDERR "sending:\n";
+ print STDERR Dump($req) . "\n";
+
$self->_Send($fp, $req);
my $response = $self->_GetResponse($fp, VER_COMMAND_EXCERPT);
return unless $response;
my ($pos, $i) = 0;
my $res = []; # Empty hash ref
my $rlen = length($response);
for ( $i=0; $i< scalar(@$docs); $i++ ) {
my $len = unpack ( "N*", substr ( $response, $pos, 4 ) );
$pos += 4;
if ( $pos+$len > $rlen ) {
$self->_Error("incomplete reply");
return;
}
push(@$res, $self->{_string_decoder}->( substr ( $response, $pos, $len ) ));
$pos += $len;
}
return $res;
}
=head2 BuildKeywords
$results = $sph->BuildKeywords($query, $index, $hits)
Generate keyword list for a given query
Returns undef on failure,
Returns an array of hashes, where each hash describes a word in the query with the following keys:
=over 4
=item * tokenized
Tokenised term from query
=item * normalized
Normalised term from query
=item * docs
Number of docs in which word was found (if $hits is true)
=item * hits
Number of occurrences of word (if $hits is true)
=back
=cut
sub BuildKeywords {
my ( $self, $query, $index, $hits ) = @_;
my $fp = $self->_Connect() or return;
# v.1.0 req
my $req = pack("N/a*", $self->{_string_encoder}->($query) );
$req .= pack("N/a*", $index);
$req .= pack("N", $self->{_string_encoder}->($hits) );
##################
# send query, get response
##################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, $req);
$self->_Send($fp, $req);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_KEYWORDS );
return unless $response;
##################
# parse response
##################
my $p = 0;
my @res;
my $rlen = length($response);
my $nwords = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
for (my $i=0; $i < $nwords; $i++ ) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
$len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
my %data = ( tokenized => $tokenized, normalized => $normalized );
if ($hits) {
( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
}
push(@res, \%data);
}
if ( $p > $rlen ) {
$self->_Error("incomplete reply");
return;
}
return \@res;
}
=head2 EscapeString
$escaped = $sph->EscapeString('abcde!@#$%')
Inserts backslash before all non-word characters in the given string.
=cut
sub EscapeString {
my $self = shift;
return quotemeta(shift);
}
=head2 UpdateAttributes
$sph->UpdateAttributes($index, \@attrs, \%values);
$sph->UpdateAttributes($index, \@attrs, \%values, $mva);
Update specified attributes on specified documents
=over 4
=item index
Name of the index to be updated
=item attrs
Array of attribute name strings
=item values
A hash with key as document id, value as an array of new attribute values
=back
Returns number of actually updated documents (0 or more) on success
Returns undef on failure
Usage example:
$sph->UpdateAttributes("test1", [ qw/group_id/ ], { 1 => [ 456] }) );
=cut
sub UpdateAttributes {
my ($self, $index, $attrs, $values, $mva ) = @_;
croak("index is not defined") unless (defined $index);
croak("attrs must be an array") unless ref($attrs) eq "ARRAY";
for my $attr (@$attrs) {
croak("attribute is not defined") unless (defined $attr);
}
croak("values must be a hashref") unless ref($values) eq "HASH";
for my $id (keys %$values) {
my $entry = $values->{$id};
croak("value id $id is not numeric") unless ($id =~ /$num_re/);
croak("value entry must be an array") unless ref($entry) eq "ARRAY";
croak("size of values must match size of attrs") unless @$entry == @$attrs;
for my $v (@$entry) {
if ($mva) {
croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
for my $vv (@$v) {
croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
}
} else {
croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
}
}
}
## build request
my $req = pack ( "N/a*", $index);
$req .= pack ( "N", scalar @$attrs );
for my $attr (@$attrs) {
$req .= pack ( "N/a*", $attr)
. pack("N", $mva ? 1 : 0);
}
$req .= pack ( "N", scalar keys %$values );
foreach my $id (keys %$values) {
my $entry = $values->{$id};
$req .= $self->_sphPackU64($id);
if ($mva) {
for my $v ( @$entry ) {
$req .= pack ( "N", @$v );
for my $vv (@$v) {
$req .= pack ("N", $vv);
}
}
} else {
for my $v ( @$entry ) {
$req .= pack ( "N", $v );
}
}
}
## connect, send query, get response
my $fp = $self->_Connect() or return;
$req = pack ( "nnN/a*", SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, $req); ## add header
send ( $fp, $req, 0);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_UPDATE );
return unless $response;
## parse response
my ($updated) = unpack ( "N*", substr ( $response, 0, 4 ) );
return $updated;
}
=head2 Open
$sph->Open()
Opens a persistent connection for subsequent queries.
To reduce the network connection overhead of making Sphinx queries, you can call
$sph->Open(), then run any number of queries, and call $sph->Close() when
finished.
Returns 1 on success, 0 on failure.
=cut
sub Open {
my $self = shift;
if ($self->{_socket}) {
$self->_Error("already connected");
return 0;
}
my $fp = $self->_Connect() or return 0;
my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
$self->_Send($fp, $req) or return 0;
$self->{_socket} = $fp;
return 1;
}
=head2 Close
$sph->Close()
Closes a persistent connection.
Returns 1 on success, 0 on failure.
=cut
sub Close {
my $self = shift;
if (! $self->{_socket}) {
$self->_Error("not connected");
return 0;
}
close($self->{_socket});
$self->{_socket} = undef;
return 1;
}
=head2 Status
$status = $sph->Status()
Queries searchd status, and returns a hash of status variable name and value pairs.
Returns undef on failure.
=cut
sub Status {
my $self = shift;
my $fp = $self->_Connect() or return;
my $req = pack("nnNN", SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 ); # len=4, body=1
$self->_Send($fp, $req) or return;
my $response = $self->_GetResponse ( $fp, VER_COMMAND_STATUS );
return unless $response;
my $p = 0;
my ($rows, $cols) = unpack("N*N*", substr ( $response, $p, 8 ) ); $p += 8;
return {} unless $rows && $cols;
my %res;
for (1 .. $rows ) {
my @entry;
for ( 1 .. $cols) {
my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
}
if ($cols <= 2) {
$res{$entry[0]} = $entry[1];
} else {
my $name = shift @entry;
$res{$name} = \@entry;
}
}
return \%res;
}
=head1 SEE ALSO
L<http://www.sphinxsearch.com>
=head1 NOTES
There is (or was) a bundled Sphinx.pm in the contrib area of the Sphinx source
distribution, which was used as the starting point of Sphinx::Search.
Maintenance of that version appears to have lapsed at sphinx-0.9.7, so many of
the newer API calls are not available there. Sphinx::Search is mostly
compatible with the old Sphinx.pm except:
=over 4
=item On failure, Sphinx::Search returns undef rather than 0 or -1.
=Item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
Sphinx::Search->new
->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms")
=back
Sphinx::Search also provides documentation and unit tests, which were the main
motivations for branching from the earlier work.
=head1 AUTHOR
Jon Schutz
=head1 BUGS
Please report any bugs or feature requests to
C<bug-sphinx-search at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Sphinx-Search>.
I will be notified, and then you'll automatically be notified of progress on
your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Sphinx::Search
You can also look for information at:
=over 4
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Sphinx-Search>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Sphinx-Search>
=item * RT: CPAN's request tracker
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Sphinx-Search>
=item * Search CPAN
L<http://search.cpan.org/dist/Sphinx-Search>
=back
=head1 ACKNOWLEDGEMENTS
This module is based on Sphinx.pm (not deployed to CPAN) for Sphinx version
0.9.7-rc1, by Len Kranendonk, which was in turn based on the Sphinx PHP API.
=head1 COPYRIGHT & LICENSE
Copyright 2007 Jon Schutz, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License.
=cut
1;
diff --git a/sphinx-search-api-test.asd b/sphinx-search-api-test.asd
index 538250e..0023406 100644
--- a/sphinx-search-api-test.asd
+++ b/sphinx-search-api-test.asd
@@ -1,26 +1,34 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package :cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-test-asd
(:use :cl :asdf))
(asdf:operate 'asdf:load-op :fiveam)
+(asdf:operate 'asdf:load-op :alexandria)
+(asdf:operate 'asdf:load-op :iolib.sockets)
(in-package :com.oppermannen.sphinx-search-api-test-asd)
(defsystem #:sphinx-search-api-test
:name "SPHINX-SEARCH-API-TEST"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Test suite for SPHINX-SEARCH-API"
:long-description "this is the test suite system for SPHINX-SEARCH-API"
+ :serial t
:components ((:module "test"
+ :serial t
:components ((:file "package")
- (:file "test" :depends-on ("package")))))
+ (:file "echo-server")
+ (:file "test"))))
:depends-on (:iolib.sockets
+ :cl-pack
+ :alexandria
+ :babel
:sphinx-search-api))
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 4edc4b8..b919dcf 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,383 +1,392 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
- :initform ()
+ :initform 0
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
:initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
:initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
- (format t "~s : ~s" host port)
+ (format t "set-server -> ~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (%path client) host)
(setf (%host client) ())
(setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (%path client) (subseq host 6 (length host)))
(setf (%host client) ())
(setf (%port client) ()))
(t
- (format t "~s : ~s" host port)
+ (format t "set-server -> ~s : ~s" host port)
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
- (string-to-octets (pack "N" 1) :encoding :utf-8))
- (format t "~a~%" v)
+ (string-to-octets (pack "N" 1) :encoding :latin-1))
+ ;;(finish-output (%socket client))
+ (format t "recieved version number: ~a~%" v)
(%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
- (format t "~a~%" rec)
+ (format t "recieved bytes: ~a~%" rec)
(let ((res
- (octets-to-string
- (coerce rec
- '(vector (unsigned-byte 8)))
- :encoding :utf-8)))
- (format t "res: ~a~%" res)
+ (octets-to-string (coerce rec '(vector (unsigned-byte 8)))
+ :encoding :latin-1)))
+ (format t "octets-to-string gives: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key fp client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
- (format t "~a : ~a : ~a~%" status version len)
+ (format t "status: ~a~%version: ~a~%length: ~a~%" status version len)
(let ((response ())
(left len))
(loop
- (when (< left 0)
+ (when (<= left 0)
(return))
+ (format t "left: ~a~%" left)
(let ((chunk (read-from fp left)))
+ (format t "chunk: ~a~%" chunk)
+ (format t "chunk length: ~a~%" (length chunk))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
- (- left (length chunk)))
+ (setf left (- left (length chunk))))
(return))))
+ (close fp)
(let ((done (length response)))
+ (format t "got response of length: ~a~%raw response: ~a~%" done response)
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
(defmethod run-queries ((client sphinx-client))
(assert (> (length (reqs client)) 0))
- (let* ((requests (pack "Na*" (length (reqs client)) (reqs client)))
- (data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
- (setf (reqs client) ())
- (let ((fp (%connect client)))
- (when fp
- (%send client :fp fp :data data)
- (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
- (format t "~a~%" response))))))
+ (let ((requests (pack "Na*" (length (reqs client)) (reqs client))))
+ (format t "requests:~%~A~%length requests: ~a~%" requests (length requests))
+ (let ((data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
+ (setf (reqs client) ())
+ (let ((fp (%connect client)))
+ (when fp
+ (%send client :fp fp :data data)
+ (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
+ (format t "run-queries response: ~a~%" response)))))))
(defmethod %send ((client sphinx-client) &key fp data)
- (format t "Writing to socket ~a~%" fp)
- (sockets:send-to fp (string-to-octets data :encoding :utf-8)))
+ (format t "writing to socket ~a~%" fp)
+ (format t "data to be sent: ~a~%" data)
+ (format t "data as octets: ~a~%" (string-to-octets data :encoding :latin-1))
+ (sockets:send-to fp (string-to-octets data :encoding :latin-1))
+ ;;(finish-output fp)
+)
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
(%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
(%pack-float (second (anchor client)))
(%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
(%pack-hash (index-weights client))
(pack "N" (max-query-time client))
(%pack-hash (field-weights client))
(pack "N/a*" comment)
(pack "N" (hash-table-count (overrides client)))
(%pack-overrides (overrides client))
(pack "N/a*" (if (select client)
(select client)
"")))))
- (format t "req is: ~a~%" req)
+ (format t "req is: ~a~%" (string-to-octets req))
(setf (reqs client) (append (reqs client) (list req))))
(length (reqs client)))
(defun %pack-overrides (overrides)
(when (hash-table-p overrides)
(maphash #'(lambda (k entry)
(concatenate 'string
(pack "N/a*" (get-hash 'attr entry))
(pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
(maphash #'(lambda (id v)
(concatenate 'string
(assert (and (numberp id) (numberp v)))
(pack "Q>" id)
(cond ((eql (get-hash 'type entry) +sph-attr-float+)
(%pack-float v))
((eql (get-hash 'type entry) +sph-attr-bigint+)
(pack "q>" v))
(t
(pack "N" v)))))
(get-hash 'values entry))))
overrides)))
(defun %pack-filters (filters)
(map 'string #'(lambda (filter)
(when (hash-table-p filter)
(concatenate 'string
(pack "N/a*" (gethash 'attr filter))
(let ((type (gethash 'type filter)))
(concatenate 'string
(pack "N" type)
(cond ((eql type +sph-filter-values+)
(%pack-array-signed-quads (get-hash 'values filter)))
((eql type +sph-filter-range+)
(concatenate 'string (pack "q>" (get-hash 'min filter))
(pack "q>" (get-hash 'max filter))))
((eql type +sph-filter-floatrange+)
(concatenate 'string (%pack-float (get-hash 'min filter))
(%pack-float (get-hash 'max filter))))
(t
(error "Unhandled filter type ~S" type)))
(pack "N" (get-hash 'exclude filter)))))))
filters))
(defun %pack-hash (hash-table)
- (when (hash-table-count hash-table)
(concatenate 'string
(pack "N" (hash-table-count hash-table))
- (maphash #'(lambda (k v)
- (pack "N/a*N" k v))
- hash-table))))
+ (when (hash-table-count hash-table)
+ (maphash #'(lambda (k v)
+ (pack "N/a*N" k v))
+ hash-table))))
(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map #'(lambda (value)
(pack "q>" value)) values-list)))
(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
diff --git a/test-perlapi.pl b/test-perlapi.pl
new file mode 100644
index 0000000..2759dee
--- /dev/null
+++ b/test-perlapi.pl
@@ -0,0 +1,23 @@
+#!/bin/env perl
+
+use strict;
+use warnings;
+
+use lib ".";
+
+use Data::Dumper;
+use Sphinx::Search;
+
+
+my $sphinx = Sphinx::Search->new();
+
+warn 'sphinx: ' . Dumper $sphinx;
+
+$sphinx->AddQuery("first");
+
+my $results = $sphinx->RunQueries;
+
+
+warn 'results: ' . Dumper $results;
+
+1;
diff --git a/test/echo-server.lisp b/test/echo-server.lisp
new file mode 100644
index 0000000..ceb604e
--- /dev/null
+++ b/test/echo-server.lisp
@@ -0,0 +1,113 @@
+;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; indent-tabs-mode: nil -*-
+;;;
+;;; --- Multiplexer example, adapted from Juho Snellman's version for SBCL
+;;; which is available at http://jsnell.iki.fi/tmp/echo-server.lisp.
+;;;
+
+(in-package #:com.oppermannen.sphinx-search-api-test)
+
+
+(defparameter *port* 9999)
+(defvar *event-base* nil)
+(defvar *sockets* (make-hash-table))
+(defvar *counter* 0)
+
+(defun add-socket (socket)
+ (push socket *sockets*))
+
+(defun remove-socket (socket)
+ (removef *sockets* socket))
+
+(defun close-socket (socket)
+ (let ((fd (iolib.sockets:socket-os-fd socket)))
+ (ignore-errors (iomux:remove-fd-handlers *event-base* fd))
+ (remove-socket socket)
+ (close socket)))
+
+(defun make-echoer (stream id disconnector)
+ (lambda (fd event exception)
+ (declare (ignore fd event exception))
+ (handler-case
+ (let ((line (read-line stream)))
+ (cond ((string= line "quit")
+ (funcall disconnector))
+ (t
+ (format t "~A: ~A~%" id line)
+
+ (sockets:send-to stream #(1 2 3 4 5 6 7 8))
+ (finish-output stream)
+ (format stream "~A: ~A~%" id line)
+ (with-open-file (stream "/tmp/echo-server.log" :direction :output :if-exists :append
+ :element-type '(unsigned-byte 8))
+ (write-sequence (string-to-octets line) stream))
+ (ignore-some-conditions (iolib.streams:hangup)
+ (finish-output stream)))))
+ (end-of-file ()
+ (funcall disconnector)))))
+
+(defun make-disconnector (socket id)
+ (lambda ()
+ (format t "~A: closing~%" id)
+ (close-socket socket)))
+
+(defun serve (socket id)
+ (iomux:set-io-handler *event-base*
+ (iolib.sockets:socket-os-fd socket)
+ :read
+ (make-echoer socket id
+ (make-disconnector socket id))))
+
+(defun make-listener-handler (socket)
+ (lambda (fd event exception)
+ (declare (ignore fd exception))
+ (block nil
+ (when (eql :timeout event)
+ (warn "Got a server timeout!")
+ (return))
+ (let ((client (iolib.sockets:accept-connection socket)))
+ (when client
+ (setf (iolib.streams:fd-non-blocking client) t)
+ (add-socket client)
+ (sockets:send-to client #(80 70 60 50))
+ (finish-output client)
+ (incf *counter*)
+ (format t "Accepted client ~A~%" *counter*)
+ (serve client *counter*))))))
+
+(defun start-echo-server (host port)
+ (let ((socket
+ (iolib.sockets:make-socket :connect :passive :address-family :internet :type :stream
+ :local-host host :local-port port
+ :backlog 5 :reuse-address t
+ :external-format '(:latin-1 :eol-style :crlf) :ipv6 nil)))
+ (setf *counter* 0
+ *sockets* nil)
+ (unwind-protect-case ()
+ (progn
+ (setf (iolib.streams:fd-non-blocking socket) t)
+ (add-socket socket)
+ (iomux:set-io-handler *event-base*
+ (iolib.sockets:socket-os-fd socket)
+ :read
+ (make-listener-handler socket)
+ :timeout 15))
+ (:abort (close socket)))
+ socket))
+
+(defun close-all-sockets ()
+ (map 'nil #'close-socket *sockets*))
+
+(defun run-server (&key (host iolib.sockets:+ipv4-unspecified+)
+ (port *port*) (new-process t) (timeout 10))
+ (flet ((%run-server ()
+ (unwind-protect
+ (progn
+ (setf *event-base* (make-instance 'iomux:event-base))
+ (with-open-stream (sock (start-echo-server host port))
+ (iomux:event-dispatch *event-base* :timeout timeout)))
+ (close-all-sockets)
+ (close *event-base*))))
+ (let ((iolib.sockets:*ipv6* nil))
+ (if new-process
+ (bt:make-thread #'%run-server)
+ (%run-server)))))
diff --git a/test/package.lisp b/test/package.lisp
index 1b14f64..1cde0ed 100644
--- a/test/package.lisp
+++ b/test/package.lisp
@@ -1,10 +1,13 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-test
(:nicknames :sphinx-search-api-test)
(:use :cl
:fiveam
+ :cl-pack
+ :babel
:iolib.sockets
+ :alexandria
:com.oppermannen.sphinx-search-api))
|
thijs/cl-sphinx-search
|
3a7a7f8df9ad67af9fade4821af3af3af605e782
|
Renamed test stuff
|
diff --git a/sphinx-search-api-config.asd b/sphinx-search-api-test.asd
similarity index 87%
rename from sphinx-search-api-config.asd
rename to sphinx-search-api-test.asd
index c4b70de..538250e 100644
--- a/sphinx-search-api-config.asd
+++ b/sphinx-search-api-test.asd
@@ -1,23 +1,26 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package :cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-test-asd
(:use :cl :asdf))
+(asdf:operate 'asdf:load-op :fiveam)
+
(in-package :com.oppermannen.sphinx-search-api-test-asd)
(defsystem #:sphinx-search-api-test
:name "SPHINX-SEARCH-API-TEST"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Test suite for SPHINX-SEARCH-API"
:long-description "this is the test suite system for SPHINX-SEARCH-API"
:components ((:module "test"
:components ((:file "package")
(:file "test" :depends-on ("package")))))
- :depends-on (:sphinx-search-api))
+ :depends-on (:iolib.sockets
+ :sphinx-search-api))
diff --git a/test/package.lisp b/test/package.lisp
index 4ce28b0..1b14f64 100644
--- a/test/package.lisp
+++ b/test/package.lisp
@@ -1,9 +1,10 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
-(defpackage #:com.oppermannen.bayes-classifier-test
- (:nicknames :bayes-classifier-test)
+(defpackage #:com.oppermannen.sphinx-search-api-test
+ (:nicknames :sphinx-search-api-test)
(:use :cl
:fiveam
- :com.oppermannen.bayes-classifier))
+ :iolib.sockets
+ :com.oppermannen.sphinx-search-api))
diff --git a/test/test.lisp b/test/test.lisp
index 101b279..d335b09 100644
--- a/test/test.lisp
+++ b/test/test.lisp
@@ -1,9 +1,11 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
-(in-package #:com.oppermannen.bayes-classifier-test)
+(in-package #:com.oppermannen.sphinx-search-api-test)
-(5am:def-suite bayes-classifier-tests
- :description "Top-level test suite for bayes-classifier")
+(5am:def-suite sphinx-search-api-tests
+ :description "Top-level test suite for sphinx-search-api")
+
+(5am:in-suite sphinx-search-api-tests)
+
-(5am:in-suite bayes-classifier-tests)
|
thijs/cl-sphinx-search
|
d03b6ed37506dc869687bd3ea417d77ad6630af5
|
Reformat
|
diff --git a/PerlAPI.pm b/PerlAPI.pm
index acdb805..2b3f5d4 100644
--- a/PerlAPI.pm
+++ b/PerlAPI.pm
@@ -1,2143 +1,2136 @@
package Sphinx::Search;
use warnings;
use strict;
use base 'Exporter';
use Carp;
use Socket;
use Config;
use Math::BigInt;
use IO::Socket::INET;
use IO::Socket::UNIX;
use Encode qw/encode_utf8 decode_utf8/;
my $is_native64 = $Config{longsize} == 8 || defined $Config{use64bitint} || defined $Config{use64bitall};
=head1 NAME
Sphinx::Search - Sphinx search engine API Perl client
=head1 VERSION
Please note that you *MUST* install a version which is compatible with your version of Sphinx.
Use version 0.22 for Sphinx 0.9.9-rc2 and later (Please read the Compatibility Note under L<SetEncoders> regarding encoding changes)
Use version 0.15 for Sphinx 0.9.9-svn-r1674
Use version 0.12 for Sphinx 0.9.8
Use version 0.11 for Sphinx 0.9.8-rc1
Use version 0.10 for Sphinx 0.9.8-svn-r1112
Use version 0.09 for Sphinx 0.9.8-svn-r985
Use version 0.08 for Sphinx 0.9.8-svn-r871
Use version 0.06 for Sphinx 0.9.8-svn-r820
Use version 0.05 for Sphinx 0.9.8-cvs-20070907
Use version 0.02 for Sphinx 0.9.8-cvs-20070818
=cut
our $VERSION = '0.22';
=head1 SYNOPSIS
use Sphinx::Search;
$sphinx = Sphinx::Search->new();
$results = $sphinx->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms");
=head1 DESCRIPTION
This is the Perl API client for the Sphinx open-source SQL full-text indexing
search engine, L<http://www.sphinxsearch.com>.
=cut
# Constants to export.
our @EXPORT = qw(
- SPH_MATCH_ALL SPH_MATCH_ANY SPH_MATCH_PHRASE SPH_MATCH_BOOLEAN SPH_MATCH_EXTENDED
- SPH_MATCH_FULLSCAN SPH_MATCH_EXTENDED2
- SPH_RANK_PROXIMITY_BM25 SPH_RANK_BM25 SPH_RANK_NONE SPH_RANK_WORDCOUNT
- SPH_SORT_RELEVANCE SPH_SORT_ATTR_DESC SPH_SORT_ATTR_ASC SPH_SORT_TIME_SEGMENTS
- SPH_SORT_EXTENDED SPH_SORT_EXPR
- SPH_GROUPBY_DAY SPH_GROUPBY_WEEK SPH_GROUPBY_MONTH SPH_GROUPBY_YEAR SPH_GROUPBY_ATTR
- SPH_GROUPBY_ATTRPAIR
- );
+ SPH_MATCH_ALL SPH_MATCH_ANY SPH_MATCH_PHRASE SPH_MATCH_BOOLEAN SPH_MATCH_EXTENDED
+ SPH_MATCH_FULLSCAN SPH_MATCH_EXTENDED2
+ SPH_RANK_PROXIMITY_BM25 SPH_RANK_BM25 SPH_RANK_NONE SPH_RANK_WORDCOUNT
+ SPH_SORT_RELEVANCE SPH_SORT_ATTR_DESC SPH_SORT_ATTR_ASC SPH_SORT_TIME_SEGMENTS
+ SPH_SORT_EXTENDED SPH_SORT_EXPR
+ SPH_GROUPBY_DAY SPH_GROUPBY_WEEK SPH_GROUPBY_MONTH SPH_GROUPBY_YEAR SPH_GROUPBY_ATTR
+ SPH_GROUPBY_ATTRPAIR
+ );
# known searchd commands
use constant SEARCHD_COMMAND_SEARCH => 0;
use constant SEARCHD_COMMAND_EXCERPT => 1;
use constant SEARCHD_COMMAND_UPDATE => 2;
use constant SEARCHD_COMMAND_KEYWORDS => 3;
use constant SEARCHD_COMMAND_PERSIST => 4;
use constant SEARCHD_COMMAND_STATUS => 5;
# current client-side command implementation versions
use constant VER_COMMAND_SEARCH => 0x116;
use constant VER_COMMAND_EXCERPT => 0x100;
use constant VER_COMMAND_UPDATE => 0x102;
use constant VER_COMMAND_KEYWORDS => 0x100;
use constant VER_COMMAND_STATUS => 0x100;
# known searchd status codes
use constant SEARCHD_OK => 0;
use constant SEARCHD_ERROR => 1;
use constant SEARCHD_RETRY => 2;
use constant SEARCHD_WARNING => 3;
# known match modes
use constant SPH_MATCH_ALL => 0;
use constant SPH_MATCH_ANY => 1;
use constant SPH_MATCH_PHRASE => 2;
use constant SPH_MATCH_BOOLEAN => 3;
use constant SPH_MATCH_EXTENDED => 4;
use constant SPH_MATCH_FULLSCAN => 5;
use constant SPH_MATCH_EXTENDED2 => 6; # extended engine V2 (TEMPORARY, WILL BE REMOVED
# known ranking modes (ext2 only)
use constant SPH_RANK_PROXIMITY_BM25 => 0; # default mode, phrase proximity major factor and BM25 minor one
use constant SPH_RANK_BM25 => 1; # statistical mode, BM25 ranking only (faster but worse quality)
use constant SPH_RANK_NONE => 2; # no ranking, all matches get a weight of 1
use constant SPH_RANK_WORDCOUNT => 3; # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
use constant SPH_RANK_PROXIMITY => 4;
use constant SPH_RANK_MATCHANY => 5;
# known sort modes
use constant SPH_SORT_RELEVANCE => 0;
use constant SPH_SORT_ATTR_DESC => 1;
use constant SPH_SORT_ATTR_ASC => 2;
use constant SPH_SORT_TIME_SEGMENTS => 3;
use constant SPH_SORT_EXTENDED => 4;
use constant SPH_SORT_EXPR => 5;
# known filter types
use constant SPH_FILTER_VALUES => 0;
use constant SPH_FILTER_RANGE => 1;
use constant SPH_FILTER_FLOATRANGE => 2;
# known attribute types
use constant SPH_ATTR_INTEGER => 1;
use constant SPH_ATTR_TIMESTAMP => 2;
use constant SPH_ATTR_ORDINAL => 3;
use constant SPH_ATTR_BOOL => 4;
use constant SPH_ATTR_FLOAT => 5;
use constant SPH_ATTR_BIGINT => 6;
use constant SPH_ATTR_MULTI => 0x40000000;
# known grouping functions
use constant SPH_GROUPBY_DAY => 0;
use constant SPH_GROUPBY_WEEK => 1;
use constant SPH_GROUPBY_MONTH => 2;
use constant SPH_GROUPBY_YEAR => 3;
use constant SPH_GROUPBY_ATTR => 4;
use constant SPH_GROUPBY_ATTRPAIR => 5;
# Floating point number matching expression
my $num_re = qr/^-?\d*\.?\d*(?:[eE][+-]?\d+)?$/;
# portably pack numeric to 64 signed bits, network order
sub _sphPackI64 {
my $self = shift;
my $v = shift;
# x64 route
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
# portably pack numeric to 64 unsigned bits, network order
sub _sphPackU64 {
my $self = shift;
my $v = shift;
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
sub _sphPackI64array {
my $self = shift;
my $values = shift || [];
my $s = pack("N", scalar @$values);
$s .= $self->_sphPackI64($_) for @$values;
return $s;
}
# portably unpack 64 unsigned bits, network order to numeric
sub _sphUnpackU64
-{
- my $self = shift;
- my $v = shift;
+ {
+ my $self = shift;
+ my $v = shift;
- my ($h,$l) = unpack ( "N*N*", $v );
+ my ($h,$l) = unpack ( "N*N*", $v );
- # x64 route
- return ($h<<32) + $l if $is_native64;
+ # x64 route
+ return ($h<<32) + $l if $is_native64;
- # x32 route, BigInt
- $h = Math::BigInt->new($h);
- $h->blsft(32)->badd($l);
+ # x32 route, BigInt
+ $h = Math::BigInt->new($h);
+ $h->blsft(32)->badd($l);
- return $h->bstr;
-}
+ return $h->bstr;
+ }
# portably unpack 64 signed bits, network order to numeric
sub _sphUnpackI64
-{
- my $self = shift;
- my $v = shift;
+ {
+ my $self = shift;
+ my $v = shift;
- my ($h,$l) = unpack ( "N*N*", $v );
+ my ($h,$l) = unpack ( "N*N*", $v );
- my $neg = ($h & 0x80000000) ? 1 : 0;
+ my $neg = ($h & 0x80000000) ? 1 : 0;
- # x64 route
- if ( $is_native64 ) {
- return -(~(($h<<32) + $l) + 1) if $neg;
- return ($h<<32) + $l;
- }
+ # x64 route
+ if ( $is_native64 ) {
+ return -(~(($h<<32) + $l) + 1) if $neg;
+ return ($h<<32) + $l;
+ }
- # x32 route, BigInt
- if ($neg) {
- $h = ~$h;
- $l = ~$l;
- }
+ # x32 route, BigInt
+ if ($neg) {
+ $h = ~$h;
+ $l = ~$l;
+ }
- my $x = Math::BigInt->new($h);
- $x->blsft(32)->badd($l);
- $x->binc()->bneg() if $neg;
+ my $x = Math::BigInt->new($h);
+ $x->blsft(32)->badd($l);
+ $x->binc()->bneg() if $neg;
- return $x->bstr;
-}
+ return $x->bstr;
+ }
=head1 CONSTRUCTOR
=head2 new
$sph = Sphinx::Search->new;
$sph = Sphinx::Search->new(\%options);
Create a new Sphinx::Search instance.
OPTIONS
=over 4
=item log
Specify an optional logger instance. This can be any class that provides error,
warn, info, and debug methods (e.g. see L<Log::Log4perl>). Logging is disabled
if no logger instance is provided.
=item debug
Debug flag. If set (and a logger instance is specified), debugging messages
will be generated.
=back
=cut
# create a new client object and fill defaults
sub new {
my ($class, $options) = @_;
my $self = {
- # per=client-object settings
- _host => 'localhost',
- _port => 3312,
- _path => undef,
- _socket => undef,
-
- # per-query settings
- _offset => 0,
- _limit => 20,
- _mode => SPH_MATCH_ALL,
- _weights => [],
- _sort => SPH_SORT_RELEVANCE,
- _sortby => "",
- _min_id => 0,
- _max_id => 0,
- _filters => [],
- _groupby => "",
- _groupdistinct => "",
- _groupfunc => SPH_GROUPBY_DAY,
- _groupsort => '@group desc',
- _maxmatches => 1000,
- _cutoff => 0,
- _retrycount => 0,
- _retrydelay => 0,
- _anchor => undef,
- _indexweights => undef,
- _ranker => SPH_RANK_PROXIMITY_BM25,
- _maxquerytime => 0,
- _fieldweights => {},
- _overrides => {},
- _select => q{*},
-
- # per-reply fields (for single-query case)
- _error => '',
- _warning => '',
- _connerror => '',
-
- # request storage (for multi-query case)
- _reqs => [],
- _timeout => 0,
-
- _string_encoder => \&encode_utf8,
- _string_decoder => \&decode_utf8,
- };
+ # per=client-object settings
+ _host => 'localhost',
+ _port => 3312,
+ _path => undef,
+ _socket => undef,
+
+ # per-query settings
+ _offset => 0,
+ _limit => 20,
+ _mode => SPH_MATCH_ALL,
+ _weights => [],
+ _sort => SPH_SORT_RELEVANCE,
+ _sortby => "",
+ _min_id => 0,
+ _max_id => 0,
+ _filters => [],
+ _groupby => "",
+ _groupdistinct => "",
+ _groupfunc => SPH_GROUPBY_DAY,
+ _groupsort => '@group desc',
+ _maxmatches => 1000,
+ _cutoff => 0,
+ _retrycount => 0,
+ _retrydelay => 0,
+ _anchor => undef,
+ _indexweights => undef,
+ _ranker => SPH_RANK_PROXIMITY_BM25,
+ _maxquerytime => 0,
+ _fieldweights => {},
+ _overrides => {},
+ _select => q{*},
+
+ # per-reply fields (for single-query case)
+ _error => '',
+ _warning => '',
+ _connerror => '',
+
+ # request storage (for multi-query case)
+ _reqs => [],
+ _timeout => 0,
+
+ _string_encoder => \&encode_utf8,
+ _string_decoder => \&decode_utf8,
+ };
bless $self, ref($class) || $class;
# These options are supported in the constructor, but not recommended
# since there is no validation. Use the Set* methods instead.
my %legal_opts = map { $_ => 1 } qw/host port offset limit mode weights sort sortby groupby groupbyfunc maxmatches cutoff retrycount retrydelay log debug string_encoder string_decoder/;
for my $opt (keys %$options) {
- $self->{'_' . $opt} = $options->{$opt} if $legal_opts{$opt};
+ $self->{'_' . $opt} = $options->{$opt} if $legal_opts{$opt};
}
# Disable debug unless we have something to log to
$self->{_debug} = 0 unless $self->{_log};
return $self;
}
=head1 METHODS
=cut
sub _Error {
my ($self, $msg) = @_;
$self->{_error} = $msg;
$self->{_log}->error($msg) if $self->{_log};
}
=head2 GetLastError
$error = $sph->GetLastError;
Get last error message (string)
=cut
sub GetLastError {
my $self = shift;
return $self->{_error};
}
sub _Warning {
my ($self, $msg) = @_;
$self->{_warning} = $msg;
$self->{_log}->warn($msg) if $self->{_log};
}
=head2 GetLastWarning
$warning = $sph->GetLastWarning;
Get last warning message (string)
=cut
sub GetLastWarning {
my $self = shift;
return $self->{_warning};
}
=head2 IsConnectError
Check connection error flag (to differentiate between network connection errors
and bad responses). Returns true value on connection error.
=cut
sub IsConnectError {
return shift->{_connerror};
}
=head2 SetEncoders
$sph->SetEncoders(\&encode_function, \&decode_function)
COMPATIBILITY NOTE: SetEncoders() was introduced in version 0.17.
Prior to that, all strings were considered to be sequences of bytes
which may have led to issues with multi-byte characters. If you were
previously encoding/decoding strings external to Sphinx::Search, you
will need to disable encoding/decoding by setting Sphinx::Search to
use raw values as explained below (or modify your code and let
Sphinx::Search do the recoding).
Set the string encoder/decoder functions for transferring strings
between perl and Sphinx. The encoder should take the perl internal
representation and convert to the bytestream that searchd expects, and
the decoder should take the bytestream returned by searchd and convert to
perl format.
The searchd format will depend on the 'charset_type' index setting in
the Sphinx configuration file.
The coders default to encode_utf8 and decode_utf8 respectively, which
are compatible with the 'utf8' charset_type.
If either the encoder or decoder functions are left undefined in the
call to SetEncoders, they return to their default values.
If you wish to send raw values (no encoding/decoding), supply a
function that simply returns its argument, e.g.
$sph->SetEncoders( sub { shift }, sub { shift });
Returns $sph.
=cut
sub SetEncoders {
my $self = shift;
my $encoder = shift;
my $decoder = shift;
$self->{_string_encoder} = $encoder ? $encoder : \&encode_utf8;
$self->{_string_decoder} = $decoder ? $decoder : \&decode_utf8;
return $self;
}
=head2 SetServer
$sph->SetServer($host, $port);
$sph->SetServer($path, $port);
In the first form, sets the host (string) and port (integer) details for the
searchd server using a network (INET) socket.
In the second form, where $path is a local filesystem path (optionally prefixed
by 'unix://'), sets the client to access the searchd server via a local (UNIX
domain) socket at the specified path.
Returns $sph.
=cut
sub SetServer {
my $self = shift;
my $host = shift;
my $port = shift;
croak("host is not defined") unless defined($host);
$self->{_path} = $host, return if substr($host, 0, 1) eq '/';
$self->{_path} = substr($host, 7), return if substr($host, 0, 7) eq 'unix://';
croak("port is not an integer") unless defined($port) && $port =~ m/^\d+$/o;
$self->{_host} = $host;
$self->{_port} = $port;
$self->{_path} = undef;
return $self;
}
=head2 SetConnectTimeout
$sph->SetConnectTimeout($timeout)
Set server connection timeout (in seconds).
Returns $sph.
=cut
sub SetConnectTimeout {
my $self = shift;
my $timeout = shift;
croak("timeout is not numeric") unless ($timeout =~ m/$num_re/);
$self->{_timeout} = $timeout;
}
sub _Send {
my $self = shift;
my $fp = shift;
my $data = shift;
$self->{_log}->debug("Writing to socket") if $self->{_debug};
$fp->write($data); return 1;
if ($fp->eof || ! $fp->write($data)) {
- $self->_Error("connection unexpectedly closed (timed out?): $!");
- $self->{_connerror} = 1;
- return 0;
+ $self->_Error("connection unexpectedly closed (timed out?): $!");
+ $self->{_connerror} = 1;
+ return 0;
}
return 1;
}
# connect to searchd server
sub _Connect {
my $self = shift;
return $self->{_socket} if $self->{_socket};
my $debug = $self->{_debug};
my $str_dest = $self->{_path} ? 'unix://' . $self->{_path} : "$self->{_host}:$self->{_port}";
$self->{_log}->debug("Connecting to $str_dest") if $debug;
# connect socket
$self->{_connerror} = q{};
my $fp;
- my %params = (); # ( Blocking => 0 );
+ my %params = (); # ( Blocking => 0 );
$params{Timeout} = $self->{_timeout} if $self->{_timeout};
if ($self->{_path}) {
$fp = IO::Socket::UNIX->new( Peer => $self->{_path},
- %params,
- );
- }
- else {
+ %params,
+ );
+ } else {
$fp = IO::Socket::INET->new( PeerPort => $self->{_port},
- PeerAddr => $self->{_host},
- Proto => 'tcp',
- %params,
- );
+ PeerAddr => $self->{_host},
+ Proto => 'tcp',
+ %params,
+ );
}
if (! $fp) {
$self->_Error("Failed to open connection to $str_dest: $!");
$self->{_connerror} = 1;
return 0;
}
binmode($fp, ':bytes');
# check version
my $buf = '';
$fp->read($buf, 4) or do {
$self->_Error("Failed on initial read from $str_dest: $!");
$self->{_connerror} = 1;
return 0;
};
my $v = unpack("N*", $buf);
$v = int($v);
$self->{_log}->debug("Got version $v from searchd") if $debug;
if ($v < 1) {
close($fp);
$self->_Error("expected searchd protocol version 1+, got version '$v'");
return 0;
}
$self->{_log}->debug("Sending version") if $debug;
# All ok, send my version
$self->_Send($fp, pack("N", 1)) or return 0;
$self->{_log}->debug("Connection complete") if $debug;
return $fp;
}
#-------------------------------------------------------------
# get and check response packet from searchd server
sub _GetResponse {
my $self = shift;
my $fp = shift;
my $client_ver = shift;
my $header;
defined($fp->read($header, 8, 0)) or do {
$self->_Error("read failed: $!");
return 0;
};
my ($status, $ver, $len ) = unpack("n2N", $header);
- my $response = q{};
+ my $response = q{};
my $lasterror = q{};
my $lentotal = 0;
while (my $rlen = $fp->read(my $chunk, $len)) {
$lasterror = $!, last if $rlen < 0;
$response .= $chunk;
$lentotal += $rlen;
last if $lentotal >= $len;
}
- close($fp) unless $self->{_socket};
+ close($fp) unless $self->{_socket};
# check response
- if ( length($response) != $len ) {
+ if ( length($response) != $len ) {
$self->_Error( $len
- ? "failed to read searchd response (status=$status, ver=$ver, len=$len, read=". length($response) . ", last error=$lasterror)"
- : "received zero-sized searchd response");
+ ? "failed to read searchd response (status=$status, ver=$ver, len=$len, read=". length($response) . ", last error=$lasterror)"
+ : "received zero-sized searchd response");
return 0;
}
# check status
- if ( $status==SEARCHD_WARNING ) {
+ if ( $status==SEARCHD_WARNING ) {
my ($wlen) = unpack ( "N*", substr ( $response, 0, 4 ) );
$self->_Warning(substr ( $response, 4, $wlen ));
return substr ( $response, 4+$wlen );
}
- if ( $status==SEARCHD_ERROR ) {
+ if ( $status==SEARCHD_ERROR ) {
$self->_Error("searchd error: " . substr ( $response, 4 ));
return 0;
}
if ( $status==SEARCHD_RETRY ) {
$self->_Error("temporary searchd error: " . substr ( $response, 4 ));
- return 0;
- }
- if ( $status!=SEARCHD_OK ) {
- $self->_Error("unknown status code '$status'");
- return 0;
+ return 0;
+ }
+ if ( $status!=SEARCHD_OK ) {
+ $self->_Error("unknown status code '$status'");
+ return 0;
}
# check version
- if ( $ver<$client_ver ) {
+ if ( $ver<$client_ver ) {
$self->_Warning(sprintf ( "searchd command v.%d.%d older than client's v.%d.%d, some options might not work",
- $ver>>8, $ver&0xff, $client_ver>>8, $client_ver&0xff ));
+ $ver>>8, $ver&0xff, $client_ver>>8, $client_ver&0xff ));
}
- return $response;
+ return $response;
}
=head2 SetLimits
$sph->SetLimits($offset, $limit);
$sph->SetLimits($offset, $limit, $max);
Set match offset/limits, and optionally the max number of matches to return.
Returns $sph.
=cut
sub SetLimits {
my $self = shift;
my $offset = shift;
my $limit = shift;
my $max = shift || 0;
croak("offset should be an integer >= 0") unless ($offset =~ /^\d+$/ && $offset >= 0) ;
croak("limit should be an integer >= 0") unless ($limit =~ /^\d+$/ && $limit >= 0);
$self->{_offset} = $offset;
$self->{_limit} = $limit;
- if($max > 0) {
- $self->{_maxmatches} = $max;
+ if ($max > 0) {
+ $self->{_maxmatches} = $max;
}
return $self;
}
=head2 SetMaxQueryTime
$sph->SetMaxQueryTime($millisec);
Set maximum query time, in milliseconds, per index.
The value may not be negative; 0 means "do not limit".
Returns $sph.
=cut
sub SetMaxQueryTime {
my $self = shift;
my $max = shift;
croak("max value should be an integer >= 0") unless ($max =~ /^\d+$/ && $max >= 0) ;
$self->{_maxquerytime} = $max;
return $self;
}
=head2 SetMatchMode
$sph->SetMatchMode($mode);
Set match mode, which may be one of:
=over 4
=item * SPH_MATCH_ALL
Match all words
=item * SPH_MATCH_ANY
Match any words
=item * SPH_MATCH_PHRASE
Exact phrase match
=item * SPH_MATCH_BOOLEAN
Boolean match, using AND (&), OR (|), NOT (!,-) and parenthetic grouping.
=item * SPH_MATCH_EXTENDED
Extended match, which includes the Boolean syntax plus field, phrase and
proximity operators.
=back
Returns $sph.
=cut
sub SetMatchMode {
- my $self = shift;
- my $mode = shift;
- croak("Match mode not defined") unless defined($mode);
- croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
- || $mode==SPH_MATCH_ANY
- || $mode==SPH_MATCH_PHRASE
- || $mode==SPH_MATCH_BOOLEAN
- || $mode==SPH_MATCH_EXTENDED
- || $mode==SPH_MATCH_FULLSCAN
- || $mode==SPH_MATCH_EXTENDED2 );
- $self->{_mode} = $mode;
+ my $self = shift;
+ my $mode = shift;
+ croak("Match mode not defined") unless defined($mode);
+ croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
+ || $mode==SPH_MATCH_ANY
+ || $mode==SPH_MATCH_PHRASE
+ || $mode==SPH_MATCH_BOOLEAN
+ || $mode==SPH_MATCH_EXTENDED
+ || $mode==SPH_MATCH_FULLSCAN
+ || $mode==SPH_MATCH_EXTENDED2 );
+ $self->{_mode} = $mode;
return $self;
}
=head2 SetRankingMode
$sph->SetRankingMode(SPH_RANK_BM25);
Set ranking mode, which may be one of:
=over 4
=item * SPH_RANK_PROXIMITY_BM25
Default mode, phrase proximity major factor and BM25 minor one
=item * SPH_RANK_BM25
Statistical mode, BM25 ranking only (faster but worse quality)
=item * SPH_RANK_NONE
No ranking, all matches get a weight of 1
=item * SPH_RANK_WORDCOUNT
Simple word-count weighting, rank is a weighted sum of per-field keyword
occurence counts
=back
Returns $sph.
=cut
sub SetRankingMode {
my $self = shift;
my $ranker = shift;
croak("Unknown ranking mode: $ranker") unless ( $ranker==SPH_RANK_PROXIMITY_BM25
- || $ranker==SPH_RANK_BM25
- || $ranker==SPH_RANK_NONE
- || $ranker==SPH_RANK_WORDCOUNT
- || $ranker==SPH_RANK_PROXIMITY );
+ || $ranker==SPH_RANK_BM25
+ || $ranker==SPH_RANK_NONE
+ || $ranker==SPH_RANK_WORDCOUNT
+ || $ranker==SPH_RANK_PROXIMITY );
$self->{_ranker} = $ranker;
return $self;
}
=head2 SetSortMode
$sph->SetSortMode(SPH_SORT_RELEVANCE);
$sph->SetSortMode($mode, $sortby);
Set sort mode, which may be any of:
=over 4
=item SPH_SORT_RELEVANCE - sort by relevance
=item SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC
Sort by attribute descending/ascending. $sortby specifies the sorting attribute.
=item SPH_SORT_TIME_SEGMENTS
Sort by time segments (last hour/day/week/month) in descending order, and then
by relevance in descending order. $sortby specifies the time attribute.
=item SPH_SORT_EXTENDED
Sort by SQL-like syntax. $sortby is the sorting specification.
=item SPH_SORT_EXPR
=back
Returns $sph.
=cut
sub SetSortMode {
- my $self = shift;
- my $mode = shift;
+ my $self = shift;
+ my $mode = shift;
my $sortby = shift || "";
- croak("Sort mode not defined") unless defined($mode);
- croak("Unknown sort mode: $mode") unless ( $mode == SPH_SORT_RELEVANCE
- || $mode == SPH_SORT_ATTR_DESC
- || $mode == SPH_SORT_ATTR_ASC
- || $mode == SPH_SORT_TIME_SEGMENTS
- || $mode == SPH_SORT_EXTENDED
- || $mode == SPH_SORT_EXPR
- );
+ croak("Sort mode not defined") unless defined($mode);
+ croak("Unknown sort mode: $mode") unless ( $mode == SPH_SORT_RELEVANCE
+ || $mode == SPH_SORT_ATTR_DESC
+ || $mode == SPH_SORT_ATTR_ASC
+ || $mode == SPH_SORT_TIME_SEGMENTS
+ || $mode == SPH_SORT_EXTENDED
+ || $mode == SPH_SORT_EXPR
+ );
croak("Sortby must be defined") unless ($mode==SPH_SORT_RELEVANCE || length($sortby));
- $self->{_sort} = $mode;
+ $self->{_sort} = $mode;
$self->{_sortby} = $sortby;
return $self;
}
=head2 SetWeights
$sph->SetWeights([ 1, 2, 3, 4]);
This method is deprecated. Use L<SetFieldWeights> instead.
Set per-field (integer) weights. The ordering of the weights correspond to the
ordering of fields as indexed.
Returns $sph.
=cut
sub SetWeights {
- my $self = shift;
- my $weights = shift;
- croak("Weights is not an array reference") unless (ref($weights) eq 'ARRAY');
- foreach my $weight (@$weights) {
- croak("Weight: $weight is not an integer") unless ($weight =~ /^\d+$/);
- }
- $self->{_weights} = $weights;
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not an array reference") unless (ref($weights) eq 'ARRAY');
+ foreach my $weight (@$weights) {
+ croak("Weight: $weight is not an integer") unless ($weight =~ /^\d+$/);
+ }
+ $self->{_weights} = $weights;
return $self;
}
=head2 SetFieldWeights
$sph->SetFieldWeights(\%weights);
Set per-field (integer) weights by field name. The weights hash provides field
name to weight mappings.
Takes precedence over L<SetWeights>.
Unknown names will be silently ignored. Missing fields will be given a weight of 1.
Returns $sph.
=cut
sub SetFieldWeights {
- my $self = shift;
- my $weights = shift;
- croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
- foreach my $field (keys %$weights) {
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
+ foreach my $field (keys %$weights) {
croak("Weight: $weights->{$field} is not an integer >= 0") unless ($weights->{$field} =~ /^\d+$/);
- }
- $self->{_fieldweights} = $weights;
+ }
+ $self->{_fieldweights} = $weights;
return $self;
}
=head2 SetIndexWeights
$sph->SetIndexWeights(\%weights);
Set per-index (integer) weights. The weights hash is a mapping of index name to integer weight.
Returns $sph.
=cut
sub SetIndexWeights {
- my $self = shift;
- my $weights = shift;
- croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
- foreach (keys %$weights) {
- croak("IndexWeight $_: $weights->{$_} is not an integer") unless ($weights->{$_} =~ /^\d+$/);
- }
- $self->{_indexweights} = $weights;
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
+ foreach (keys %$weights) {
+ croak("IndexWeight $_: $weights->{$_} is not an integer") unless ($weights->{$_} =~ /^\d+$/);
+ }
+ $self->{_indexweights} = $weights;
return $self;
}
=head2 SetIDRange
$sph->SetIDRange($min, $max);
Set IDs range only match those records where document ID
is between $min and $max (including $min and $max)
Returns $sph.
=cut
sub SetIDRange {
my $self = shift;
my $min = shift;
my $max = shift;
croak("min_id is not numeric") unless ($min =~ m/$num_re/);
croak("max_id is not numeric") unless ($max =~ m/$num_re/);
croak("min_id is larger than or equal to max_id") unless ($min < $max);
$self->{_min_id} = $min;
$self->{_max_id} = $max;
return $self;
}
=head2 SetFilter
$sph->SetFilter($attr, \@values);
$sph->SetFilter($attr, \@values, $exclude);
Sets the results to be filtered on the given attribute. Only results which have
attributes matching the given (numeric) values will be returned.
This may be called multiple times with different attributes to select on
multiple attributes.
If 'exclude' is set, excludes results that match the filter.
Returns $sph.
=cut
sub SetFilter {
my ($self, $attribute, $values, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("values is not an array reference") unless (ref($values) eq 'ARRAY');
croak("values reference is empty") unless (scalar(@$values));
foreach my $value (@$values) {
- croak("value $value is not numeric") unless ($value =~ m/$num_re/);
+ croak("value $value is not numeric") unless ($value =~ m/$num_re/);
}
push(@{$self->{_filters}}, {
- type => SPH_FILTER_VALUES,
- attr => $attribute,
- values => $values,
- exclude => $exclude ? 1 : 0,
- });
+ type => SPH_FILTER_VALUES,
+ attr => $attribute,
+ values => $values,
+ exclude => $exclude ? 1 : 0,
+ });
return $self;
}
=head2 SetFilterRange
$sph->SetFilterRange($attr, $min, $max);
$sph->SetFilterRange($attr, $min, $max, $exclude);
Sets the results to be filtered on a range of values for the given
attribute. Only those records where $attr column value is between $min and $max
(including $min and $max) will be returned.
If 'exclude' is set, excludes results that fall within the given range.
Returns $sph.
=cut
sub SetFilterRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not an integer") unless ($min =~ m/$num_re/);
croak("max: $max is not an integer") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
- type => SPH_FILTER_RANGE,
- attr => $attribute,
- min => $min,
- max => $max,
- exclude => $exclude ? 1 : 0,
- });
+ type => SPH_FILTER_RANGE,
+ attr => $attribute,
+ min => $min,
+ max => $max,
+ exclude => $exclude ? 1 : 0,
+ });
return $self;
}
=head2 SetFilterFloatRange
$sph->SetFilterFloatRange($attr, $min, $max, $exclude);
Same as L<SetFilterRange>, but allows floating point values.
Returns $sph.
=cut
sub SetFilterFloatRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not numeric") unless ($min =~ m/$num_re/);
croak("max: $max is not numeric") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
- type => SPH_FILTER_FLOATRANGE,
- attr => $attribute,
- min => $min,
- max => $max,
- exclude => $exclude ? 1 : 0,
- });
+ type => SPH_FILTER_FLOATRANGE,
+ attr => $attribute,
+ min => $min,
+ max => $max,
+ exclude => $exclude ? 1 : 0,
+ });
return $self;
}
=head2 SetGeoAnchor
$sph->SetGeoAnchor($attrlat, $attrlong, $lat, $long);
Setup anchor point for using geosphere distance calculations in filters and sorting.
Distance will be computed with respect to this point
=over 4
=item $attrlat is the name of latitude attribute
=item $attrlong is the name of longitude attribute
=item $lat is anchor point latitude, in radians
=item $long is anchor point longitude, in radians
=back
Returns $sph.
=cut
sub SetGeoAnchor {
my ($self, $attrlat, $attrlong, $lat, $long) = @_;
croak("attrlat is not defined") unless defined $attrlat;
croak("attrlong is not defined") unless defined $attrlong;
croak("lat: $lat is not numeric") unless ($lat =~ m/$num_re/);
croak("long: $long is not numeric") unless ($long =~ m/$num_re/);
$self->{_anchor} = {
- attrlat => $attrlat,
- attrlong => $attrlong,
- lat => $lat,
- long => $long,
- };
+ attrlat => $attrlat,
+ attrlong => $attrlong,
+ lat => $lat,
+ long => $long,
+ };
return $self;
}
=head2 SetGroupBy
$sph->SetGroupBy($attr, $func);
$sph->SetGroupBy($attr, $func, $groupsort);
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on grouping
function value. Each group keeps track of the total match count, and the best
match (in this group) according to current sorting function. The final result
set contains one best match per group, with grouping function value and matches
count attached.
$attr is any valid attribute. Use L<ResetGroupBy> to disable grouping.
$func is one of:
=over 4
=item * SPH_GROUPBY_DAY
Group by day (assumes timestamp type attribute of form YYYYMMDD)
=item * SPH_GROUPBY_WEEK
Group by week (assumes timestamp type attribute of form YYYYNNN)
=item * SPH_GROUPBY_MONTH
Group by month (assumes timestamp type attribute of form YYYYMM)
=item * SPH_GROUPBY_YEAR
Group by year (assumes timestamp type attribute of form YYYY)
=item * SPH_GROUPBY_ATTR
Group by attribute value
=item * SPH_GROUPBY_ATTRPAIR
Group by two attributes, being the given attribute and the attribute that
immediately follows it in the sequence of indexed attributes. The specified
attribute may therefore not be the last of the indexed attributes.
=back
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal Sphinx
attributes:
=over 4
=item @id - document ID;
=item @weight, @rank, @relevance - match weight;
=item @group - group by function value;
=item @count - number of matches in group.
=back
The default mode is to sort by groupby value in descending order,
ie. by "@group desc".
In the results set, "total_found" contains the total amount of matching groups
over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in total_found than actually present. @count might also
be underestimated.
For example, if sorting by relevance and grouping by a "published"
attribute with SPH_GROUPBY_DAY function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
=cut
sub SetGroupBy {
my $self = shift;
my $attribute = shift;
my $func = shift;
my $groupsort = shift || '@group desc';
croak("attribute is not defined") unless (defined $attribute);
croak("Unknown grouping function: $func") unless ($func==SPH_GROUPBY_DAY
- || $func==SPH_GROUPBY_WEEK
- || $func==SPH_GROUPBY_MONTH
- || $func==SPH_GROUPBY_YEAR
- || $func==SPH_GROUPBY_ATTR
- || $func==SPH_GROUPBY_ATTRPAIR
- );
+ || $func==SPH_GROUPBY_WEEK
+ || $func==SPH_GROUPBY_MONTH
+ || $func==SPH_GROUPBY_YEAR
+ || $func==SPH_GROUPBY_ATTR
+ || $func==SPH_GROUPBY_ATTRPAIR
+ );
$self->{_groupby} = $attribute;
$self->{_groupfunc} = $func;
$self->{_groupsort} = $groupsort;
return $self;
}
=head2 SetGroupDistinct
$sph->SetGroupDistinct($attr);
Set count-distinct attribute for group-by queries
=cut
sub SetGroupDistinct {
my $self = shift;
my $attribute = shift;
croak("attribute is not defined") unless (defined $attribute);
$self->{_groupdistinct} = $attribute;
return $self;
}
=head2 SetRetries
$sph->SetRetries($count, $delay);
Set distributed retries count and delay
=cut
sub SetRetries {
my $self = shift;
my $count = shift;
my $delay = shift || 0;
croak("count: $count is not an integer >= 0") unless ($count =~ /^\d+$/o && $count >= 0);
croak("delay: $delay is not an integer >= 0") unless ($delay =~ /^\d+$/o && $delay >= 0);
$self->{_retrycount} = $count;
$self->{_retrydelay} = $delay;
return $self;
}
=head2 SetOverride
$sph->SetOverride($attrname, $attrtype, $values);
Set attribute values override. There can be only one override per attribute.
$values must be a hash that maps document IDs to attribute values
=cut
sub SetOverride {
my $self = shift;
my $attrname = shift;
my $attrtype = shift;
my $values = shift;
croak("attribute name is not defined") unless defined $attrname;
croak("Uknown attribute type: $attrtype") unless ($attrtype == SPH_ATTR_INTEGER
- || $attrtype == SPH_ATTR_TIMESTAMP
- || $attrtype == SPH_ATTR_BOOL
- || $attrtype == SPH_ATTR_FLOAT
- || $attrtype == SPH_ATTR_BIGINT);
+ || $attrtype == SPH_ATTR_TIMESTAMP
+ || $attrtype == SPH_ATTR_BOOL
+ || $attrtype == SPH_ATTR_FLOAT
+ || $attrtype == SPH_ATTR_BIGINT);
$self->{_overrides}->{$attrname} = { attr => $attrname,
- type => $attrtype,
- values => $values,
- };
+ type => $attrtype,
+ values => $values,
+ };
return $self;
}
=head2 SetSelect
$sph->SetSelect($select)
Set select list (attributes or expressions). SQL-like syntax.
=cut
sub SetSelect {
my $self = shift;
$self->{_select} = shift;
return $self;
}
=head2 ResetFilters
$sph->ResetFilters;
Clear all filters.
=cut
sub ResetFilters {
my $self = shift;
$self->{_filters} = [];
$self->{_anchor} = undef;
return $self;
}
=head2 ResetGroupBy
$sph->ResetGroupBy;
Clear all group-by settings (for multi-queries)
=cut
sub ResetGroupBy {
my $self = shift;
$self->{_groupby} = "";
$self->{_groupfunc} = SPH_GROUPBY_DAY;
$self->{_groupsort} = '@group desc';
$self->{_groupdistinct} = "";
return $self;
}
=head2 ResetOverrides
Clear all attribute value overrides (for multi-queries)
=cut
sub ResetOverrides {
my $self = shift;
$self->{_select} = undef;
return $self;
}
=head2 Query
$results = $sph->Query($query, $index);
Connect to searchd server and run given search query.
=over 4
=item query is query string
=item index is index name to query, default is "*" which means to query all indexes. Use a space or comma separated list to search multiple indexes.
=back
Returns undef on failure
Returns hash which has the following keys on success:
=over 4
=item matches
Array containing hashes with found documents ( "doc", "weight", "group", "stamp" )
=item total
Total amount of matches retrieved (upto SPH_MAX_MATCHES, see sphinx.h)
=item total_found
Total amount of matching documents in index
=item time
Search time
=item words
Hash which maps query terms (stemmed!) to ( "docs", "hits" ) hash
=back
Returns the results array on success, undef on error.
=cut
sub Query {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
croak("_reqs is not empty") unless @{$self->{_reqs}} == 0;
$self->AddQuery($query, $index, $comment);
my $results = $self->RunQueries or return;
$self->_Error($results->[0]->{error}) if $results->[0]->{error};
$self->_Warning($results->[0]->{warning}) if $results->[0]->{warning};
return if $results->[0]->{status} && $results->[0]->{status} == SEARCHD_ERROR;
return $results->[0];
}
# helper to pack floats in network byte order
sub _PackFloat {
my $f = shift;
- my $t1 = pack ( "f", $f ); # machine order
+ my $t1 = pack ( "f", $f ); # machine order
my $t2 = unpack ( "L*", $t1 ); # int in machine order
return pack ( "N", $t2 );
}
=head2 AddQuery
$sph->AddQuery($query, $index);
Add a query to a batch request.
Batch queries enable searchd to perform internal optimizations,
if possible; and reduce network connection overheads in all cases.
For instance, running exactly the same query with different
groupby settings will enable searched to perform expensive
full-text search and ranking operation only once, but compute
multiple groupby results from its output.
Parameters are exactly the same as in Query() call.
Returns corresponding index to the results array returned by RunQueries() call.
=cut
sub AddQuery {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
##################
# build request
##################
my $req;
$req = pack ( "NNNNN", $self->{_offset}, $self->{_limit}, $self->{_mode}, $self->{_ranker}, $self->{_sort} ); # mode and limits
$req .= pack ( "N/a*", $self->{_sortby});
$req .= pack ( "N/a*", $self->{_string_encoder}->($query) ); # query itself
$req .= pack ( "N*", scalar(@{$self->{_weights}}), @{$self->{_weights}});
$req .= pack ( "N/a*", $index); # indexes
$req .= pack ( "N", 1)
- . $self->_sphPackU64($self->{_min_id})
- . $self->_sphPackU64($self->{_max_id}); # id64 range
+ . $self->_sphPackU64($self->{_min_id})
+ . $self->_sphPackU64($self->{_max_id}); # id64 range
# filters
$req .= pack ( "N", scalar @{$self->{_filters}} );
foreach my $filter (@{$self->{_filters}}) {
- $req .= pack ( "N/a*", $filter->{attr});
- $req .= pack ( "N", $filter->{type});
-
- my $t = $filter->{type};
- if ($t == SPH_FILTER_VALUES) {
- $req .= $self->_sphPackI64array($filter->{values});
- } elsif ($t == SPH_FILTER_RANGE) {
- $req .= $self->_sphPackI64($filter->{min}) . $self->_sphPackI64($filter->{max});
- } elsif ($t == SPH_FILTER_FLOATRANGE) {
- $req .= _PackFloat ( $filter->{"min"} ) . _PackFloat ( $filter->{"max"} );
- } else {
- croak("Unhandled filter type $t");
- }
- $req .= pack ( "N", $filter->{exclude});
-}
+ $req .= pack ( "N/a*", $filter->{attr});
+ $req .= pack ( "N", $filter->{type});
+
+ my $t = $filter->{type};
+ if ($t == SPH_FILTER_VALUES) {
+ $req .= $self->_sphPackI64array($filter->{values});
+ } elsif ($t == SPH_FILTER_RANGE) {
+ $req .= $self->_sphPackI64($filter->{min}) . $self->_sphPackI64($filter->{max});
+ } elsif ($t == SPH_FILTER_FLOATRANGE) {
+ $req .= _PackFloat ( $filter->{"min"} ) . _PackFloat ( $filter->{"max"} );
+ } else {
+ croak("Unhandled filter type $t");
+ }
+ $req .= pack ( "N", $filter->{exclude});
+ }
# group-by clause, max-matches count, group-sort clause, cutoff count
$req .= pack ( "NN/a*", $self->{_groupfunc}, $self->{_groupby} );
$req .= pack ( "N", $self->{_maxmatches} );
$req .= pack ( "N/a*", $self->{_groupsort});
$req .= pack ( "NNN", $self->{_cutoff}, $self->{_retrycount}, $self->{_retrydelay} );
$req .= pack ( "N/a*", $self->{_groupdistinct});
if (!defined $self->{_anchor}) {
$req .= pack ( "N", 0);
} else {
my $a = $self->{_anchor};
$req .= pack ( "N", 1);
$req .= pack ( "N/a*", $a->{attrlat});
$req .= pack ( "N/a*", $a->{attrlong});
$req .= _PackFloat($a->{lat}) . _PackFloat($a->{long});
}
# per-index weights
$req .= pack( "N", scalar keys %{$self->{_indexweights}});
$req .= pack ( "N/a*N", $_, $self->{_indexweights}->{$_} ) for keys %{$self->{_indexweights}};
# max query time
$req .= pack ( "N", $self->{_maxquerytime} );
# per-field weights
$req .= pack ( "N", scalar keys %{$self->{_fieldweights}} );
$req .= pack ( "N/a*N", $_, $self->{_fieldweights}->{$_}) for keys %{$self->{_fieldweights}};
# comment
$req .= pack ( "N/a*", $comment);
# attribute overrides
$req .= pack ( "N", scalar keys %{$self->{_overrides}} );
for my $entry (values %{$self->{_overrides}}) {
$req .= pack ("N/a*", $entry->{attr})
. pack ("NN", $entry->{type}, scalar keys %{$entry->{values}});
for my $id (keys %{$entry->{values}}) {
croak "Attribute value key is not numeric" unless $id =~ m/$num_re/;
my $v = $entry->{values}->{$id};
croak "Attribute value key is not numeric" unless $v =~ m/$num_re/;
$req .= $self->_sphPackU64($id);
if ($entry->{type} == SPH_ATTR_FLOAT) {
$req .= $self->_packfloat($v);
} elsif ($entry->{type} == SPH_ATTR_BIGINT) {
$req .= $self->_sphPackI64($v);
} else {
$req .= pack("N", $v);
}
}
}
# select list
$req .= pack("N/a*", $self->{_select} || '');
push(@{$self->{_reqs}}, $req);
return scalar $#{$self->{_reqs}};
}
=head2 RunQueries
$sph->RunQueries
Run batch of queries, as added by AddQuery.
Returns undef on network IO failure.
Returns an array of result sets on success.
Each result set in the returned array is a hash which contains
the same keys as the hash returned by L<Query>, plus:
=over 4
=item * error
Errors, if any, for this query.
=item * warnings
Any warnings associated with the query.
=back
=cut
sub RunQueries {
my $self = shift;
unless (@{$self->{_reqs}}) {
- $self->_Error("no queries defined, issue AddQuery() first");
- return;
+ $self->_Error("no queries defined, issue AddQuery() first");
+ return;
}
my $fp = $self->_Connect() or do { $self->{_reqs} = []; return };
##################
# send query, get response
##################
my $nreqs = @{$self->{_reqs}};
my $req = pack("Na*", $nreqs, join("", @{$self->{_reqs}}));
$req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
$self->_Send($fp, $req);
$self->{_reqs} = [];
my $response = $self->_GetResponse ( $fp, VER_COMMAND_SEARCH );
return unless $response;
##################
# parse response
##################
my $p = 0;
my $max = length($response); # Protection from broken response
my @results;
for (my $ires = 0; $ires < $nreqs; $ires++) {
- my $result = {}; # Empty hash ref
- push(@results, $result);
- $result->{matches} = []; # Empty array ref
- $result->{error} = "";
- $result->{warnings} = "";
-
- # extract status
- my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
- if ($status != SEARCHD_OK) {
- my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
- my $message = substr ( $response, $p, $len ); $p += $len;
- if ($status == SEARCHD_WARNING) {
- $result->{warning} = $message;
- }
- else {
- $result->{error} = $message;
- next;
- }
- }
+ my $result = {}; # Empty hash ref
+ push(@results, $result);
+ $result->{matches} = []; # Empty array ref
+ $result->{error} = "";
+ $result->{warnings} = "";
+
+ # extract status
+ my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ if ($status != SEARCHD_OK) {
+ my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ my $message = substr ( $response, $p, $len ); $p += $len;
+ if ($status == SEARCHD_WARNING) {
+ $result->{warning} = $message;
+ } else {
+ $result->{error} = $message;
+ next;
+ }
+ }
- # read schema
- my @fields;
- my (%attrs, @attr_list);
+ # read schema
+ my @fields;
+ my (%attrs, @attr_list);
- my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
- while ( $nfields-->0 && $p<$max ) {
- my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
- push(@fields, substr ( $response, $p, $len )); $p += $len;
- }
- $result->{"fields"} = \@fields;
-
- my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- while ( $nattrs-->0 && $p<$max ) {
- my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- my $attr = substr ( $response, $p, $len ); $p += $len;
- my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- $attrs{$attr} = $type;
- push(@attr_list, $attr);
- }
- $result->{"attrs"} = \%attrs;
-
- # read match count
- my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
-
- # read matches
- while ( $count-->0 && $p<$max ) {
- my $data = {};
- if ($id64) {
- $data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
- $data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
- }
- else {
- ( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
- $p += 8;
- }
- foreach my $attr (@attr_list) {
- if ($attrs{$attr} == SPH_ATTR_BIGINT) {
- $data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
- next;
- }
- if ($attrs{$attr} == SPH_ATTR_FLOAT) {
- my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- $data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
- next;
- }
- my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- if ($attrs{$attr} & SPH_ATTR_MULTI) {
- my $nvalues = $val;
- $data->{$attr} = [];
- while ($nvalues-->0 && $p < $max) {
- $val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
- push(@{$data->{$attr}}, $val);
- }
- }
- else {
- $data->{$attr} = $val;
- }
- }
- push(@{$result->{matches}}, $data);
- }
- my $words;
- ($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
- $result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
- $p += 16;
-
- while ( $words-->0 && $p < $max) {
- my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
- $p += 4;
- my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
- $p += $len;
- my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
- $p += 8;
- $result->{words}{$word} = {
- "docs" => $docs,
- "hits" => $hits
- };
- }
+ my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
+ while ( $nfields-->0 && $p<$max ) {
+ my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@fields, substr ( $response, $p, $len )); $p += $len;
+ }
+ $result->{"fields"} = \@fields;
+
+ my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ while ( $nattrs-->0 && $p<$max ) {
+ my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ my $attr = substr ( $response, $p, $len ); $p += $len;
+ my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ $attrs{$attr} = $type;
+ push(@attr_list, $attr);
+ }
+ $result->{"attrs"} = \%attrs;
+
+ # read match count
+ my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+
+ # read matches
+ while ( $count-->0 && $p<$max ) {
+ my $data = {};
+ if ($id64) {
+ $data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
+ $data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
+ } else {
+ ( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
+ $p += 8;
+ }
+ foreach my $attr (@attr_list) {
+ if ($attrs{$attr} == SPH_ATTR_BIGINT) {
+ $data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
+ next;
+ }
+ if ($attrs{$attr} == SPH_ATTR_FLOAT) {
+ my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ $data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
+ next;
+ }
+ my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ if ($attrs{$attr} & SPH_ATTR_MULTI) {
+ my $nvalues = $val;
+ $data->{$attr} = [];
+ while ($nvalues-->0 && $p < $max) {
+ $val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@{$data->{$attr}}, $val);
+ }
+ } else {
+ $data->{$attr} = $val;
+ }
+ }
+ push(@{$result->{matches}}, $data);
+ }
+ my $words;
+ ($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
+ $result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
+ $p += 16;
+
+ while ( $words-->0 && $p < $max) {
+ my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
+ $p += 4;
+ my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
+ $p += $len;
+ my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
+ $p += 8;
+ $result->{words}{$word} = {
+ "docs" => $docs,
+ "hits" => $hits
+ };
+ }
}
return \@results;
}
=head2 BuildExcerpts
$excerpts = $sph->BuildExcerpts($docs, $index, $words, $opts)
Generate document excerpts for the specified documents.
=over 4
=item docs
An array reference of strings which represent the document
contents
=item index
A string specifiying the index whose settings will be used
for stemming, lexing and case folding
=item words
A string which contains the words to highlight
=item opts
A hash which contains additional optional highlighting parameters:
=over 4
=item before_match - a string to insert before a set of matching words, default is "<b>"
=item after_match - a string to insert after a set of matching words, default is "<b>"
=item chunk_separator - a string to insert between excerpts chunks, default is " ... "
=item limit - max excerpt size in symbols (codepoints), default is 256
=item around - how many words to highlight around each match, default is 5
=item exact_phrase - whether to highlight exact phrase matches only, default is false
=item single_passage - whether to extract single best passage only, default is false
=item use_boundaries
=item weight_order
=back
=back
Returns undef on failure.
Returns an array ref of string excerpts on success.
=cut
sub BuildExcerpts {
my ($self, $docs, $index, $words, $opts) = @_;
$opts ||= {};
croak("BuildExcepts() called with incorrect parameters")
- unless (ref($docs) eq 'ARRAY'
- && defined($index)
- && defined($words)
- && ref($opts) eq 'HASH');
- my $fp = $self->_Connect() or return;
+ unless (ref($docs) eq 'ARRAY'
+ && defined($index)
+ && defined($words)
+ && ref($opts) eq 'HASH');
+ my $fp = $self->_Connect() or return;
##################
# fixup options
##################
$opts->{"before_match"} ||= "<b>";
$opts->{"after_match"} ||= "</b>";
$opts->{"chunk_separator"} ||= " ... ";
$opts->{"limit"} ||= 256;
$opts->{"around"} ||= 5;
$opts->{"exact_phrase"} ||= 0;
$opts->{"single_passage"} ||= 0;
$opts->{"use_boundaries"} ||= 0;
$opts->{"weight_order"} ||= 0;
##################
# build request
##################
# v.1.0 req
my $req;
- my $flags = 1; # remove spaces
+ my $flags = 1; # remove spaces
$flags |= 2 if ( $opts->{"exact_phrase"} );
$flags |= 4 if ( $opts->{"single_passage"} );
$flags |= 8 if ( $opts->{"use_boundaries"} );
$flags |= 16 if ( $opts->{"weight_order"} );
$req = pack ( "NN", 0, $flags ); # mode=0, flags=$flags
$req .= pack ( "N/a*", $index ); # req index
$req .= pack ( "N/a*", $self->{_string_encoder}->($words)); # req words
# options
$req .= pack ( "N/a*", $opts->{"before_match"});
$req .= pack ( "N/a*", $opts->{"after_match"});
$req .= pack ( "N/a*", $opts->{"chunk_separator"});
$req .= pack ( "N", int($opts->{"limit"}) );
$req .= pack ( "N", int($opts->{"around"}) );
# documents
$req .= pack ( "N", scalar(@$docs) );
foreach my $doc (@$docs) {
croak('BuildExcerpts: Found empty document in $docs') unless ($doc);
$req .= pack("N/a*", $self->{_string_encoder}->($doc));
}
##########################
- # send query, get response
+ # send query, get response
##########################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, $req); # add header
$self->_Send($fp, $req);
my $response = $self->_GetResponse($fp, VER_COMMAND_EXCERPT);
return unless $response;
my ($pos, $i) = 0;
- my $res = []; # Empty hash ref
- my $rlen = length($response);
- for ( $i=0; $i< scalar(@$docs); $i++ ) {
+ my $res = []; # Empty hash ref
+ my $rlen = length($response);
+ for ( $i=0; $i< scalar(@$docs); $i++ ) {
my $len = unpack ( "N*", substr ( $response, $pos, 4 ) );
$pos += 4;
- if ( $pos+$len > $rlen ) {
+ if ( $pos+$len > $rlen ) {
$self->_Error("incomplete reply");
return;
}
push(@$res, $self->{_string_decoder}->( substr ( $response, $pos, $len ) ));
$pos += $len;
- }
- return $res;
+ }
+ return $res;
}
=head2 BuildKeywords
$results = $sph->BuildKeywords($query, $index, $hits)
Generate keyword list for a given query
Returns undef on failure,
Returns an array of hashes, where each hash describes a word in the query with the following keys:
=over 4
=item * tokenized
Tokenised term from query
=item * normalized
Normalised term from query
=item * docs
Number of docs in which word was found (if $hits is true)
=item * hits
Number of occurrences of word (if $hits is true)
=back
=cut
sub BuildKeywords {
my ( $self, $query, $index, $hits ) = @_;
my $fp = $self->_Connect() or return;
# v.1.0 req
my $req = pack("N/a*", $self->{_string_encoder}->($query) );
$req .= pack("N/a*", $index);
$req .= pack("N", $self->{_string_encoder}->($hits) );
##################
# send query, get response
##################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, $req);
$self->_Send($fp, $req);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_KEYWORDS );
return unless $response;
##################
# parse response
##################
my $p = 0;
my @res;
my $rlen = length($response);
my $nwords = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
for (my $i=0; $i < $nwords; $i++ ) {
- my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
- my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
- $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
+ $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
- my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
- my %data = ( tokenized => $tokenized, normalized => $normalized );
+ my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
+ my %data = ( tokenized => $tokenized, normalized => $normalized );
- if ($hits) {
- ( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
- $p += 8;
+ if ($hits) {
+ ( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
+ $p += 8;
- }
- push(@res, \%data);
+ }
+ push(@res, \%data);
}
if ( $p > $rlen ) {
- $self->_Error("incomplete reply");
- return;
+ $self->_Error("incomplete reply");
+ return;
}
return \@res;
}
=head2 EscapeString
$escaped = $sph->EscapeString('abcde!@#$%')
Inserts backslash before all non-word characters in the given string.
=cut
sub EscapeString {
my $self = shift;
return quotemeta(shift);
}
=head2 UpdateAttributes
$sph->UpdateAttributes($index, \@attrs, \%values);
$sph->UpdateAttributes($index, \@attrs, \%values, $mva);
Update specified attributes on specified documents
=over 4
=item index
Name of the index to be updated
=item attrs
Array of attribute name strings
=item values
A hash with key as document id, value as an array of new attribute values
=back
Returns number of actually updated documents (0 or more) on success
Returns undef on failure
Usage example:
$sph->UpdateAttributes("test1", [ qw/group_id/ ], { 1 => [ 456] }) );
=cut
sub UpdateAttributes {
my ($self, $index, $attrs, $values, $mva ) = @_;
croak("index is not defined") unless (defined $index);
croak("attrs must be an array") unless ref($attrs) eq "ARRAY";
for my $attr (@$attrs) {
- croak("attribute is not defined") unless (defined $attr);
+ croak("attribute is not defined") unless (defined $attr);
}
croak("values must be a hashref") unless ref($values) eq "HASH";
for my $id (keys %$values) {
- my $entry = $values->{$id};
- croak("value id $id is not numeric") unless ($id =~ /$num_re/);
- croak("value entry must be an array") unless ref($entry) eq "ARRAY";
- croak("size of values must match size of attrs") unless @$entry == @$attrs;
- for my $v (@$entry) {
- if ($mva) {
- croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
- for my $vv (@$v) {
- croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
- }
- }
- else {
- croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
- }
- }
+ my $entry = $values->{$id};
+ croak("value id $id is not numeric") unless ($id =~ /$num_re/);
+ croak("value entry must be an array") unless ref($entry) eq "ARRAY";
+ croak("size of values must match size of attrs") unless @$entry == @$attrs;
+ for my $v (@$entry) {
+ if ($mva) {
+ croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
+ for my $vv (@$v) {
+ croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
+ }
+ } else {
+ croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
+ }
+ }
}
## build request
my $req = pack ( "N/a*", $index);
$req .= pack ( "N", scalar @$attrs );
for my $attr (@$attrs) {
- $req .= pack ( "N/a*", $attr)
- . pack("N", $mva ? 1 : 0);
+ $req .= pack ( "N/a*", $attr)
+ . pack("N", $mva ? 1 : 0);
}
$req .= pack ( "N", scalar keys %$values );
foreach my $id (keys %$values) {
- my $entry = $values->{$id};
- $req .= $self->_sphPackU64($id);
- if ($mva) {
- for my $v ( @$entry ) {
- $req .= pack ( "N", @$v );
- for my $vv (@$v) {
- $req .= pack ("N", $vv);
- }
- }
- }
- else {
- for my $v ( @$entry ) {
- $req .= pack ( "N", $v );
- }
- }
+ my $entry = $values->{$id};
+ $req .= $self->_sphPackU64($id);
+ if ($mva) {
+ for my $v ( @$entry ) {
+ $req .= pack ( "N", @$v );
+ for my $vv (@$v) {
+ $req .= pack ("N", $vv);
+ }
+ }
+ } else {
+ for my $v ( @$entry ) {
+ $req .= pack ( "N", $v );
+ }
+ }
}
## connect, send query, get response
my $fp = $self->_Connect() or return;
$req = pack ( "nnN/a*", SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, $req); ## add header
send ( $fp, $req, 0);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_UPDATE );
return unless $response;
## parse response
my ($updated) = unpack ( "N*", substr ( $response, 0, 4 ) );
return $updated;
}
=head2 Open
- $sph->Open()
+ $sph->Open()
-Opens a persistent connection for subsequent queries.
+ Opens a persistent connection for subsequent queries.
-To reduce the network connection overhead of making Sphinx queries, you can call
-$sph->Open(), then run any number of queries, and call $sph->Close() when
-finished.
+ To reduce the network connection overhead of making Sphinx queries, you can call
+ $sph->Open(), then run any number of queries, and call $sph->Close() when
+ finished.
-Returns 1 on success, 0 on failure.
+ Returns 1 on success, 0 on failure.
-=cut
+ =cut
-sub Open {
- my $self = shift;
+ sub Open {
+ my $self = shift;
- if ($self->{_socket}) {
- $self->_Error("already connected");
- return 0;
- }
- my $fp = $self->_Connect() or return 0;
+ if ($self->{_socket}) {
+ $self->_Error("already connected");
+ return 0;
+ }
+ my $fp = $self->_Connect() or return 0;
- my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
- $self->_Send($fp, $req) or return 0;
+ my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
+ $self->_Send($fp, $req) or return 0;
- $self->{_socket} = $fp;
- return 1;
-}
+ $self->{_socket} = $fp;
+ return 1;
+ }
=head2 Close
$sph->Close()
Closes a persistent connection.
Returns 1 on success, 0 on failure.
=cut
sub Close {
my $self = shift;
if (! $self->{_socket}) {
- $self->_Error("not connected");
- return 0;
+ $self->_Error("not connected");
+ return 0;
}
close($self->{_socket});
$self->{_socket} = undef;
return 1;
}
=head2 Status
$status = $sph->Status()
Queries searchd status, and returns a hash of status variable name and value pairs.
Returns undef on failure.
=cut
sub Status {
my $self = shift;
my $fp = $self->_Connect() or return;
my $req = pack("nnNN", SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 ); # len=4, body=1
$self->_Send($fp, $req) or return;
my $response = $self->_GetResponse ( $fp, VER_COMMAND_STATUS );
return unless $response;
my $p = 0;
my ($rows, $cols) = unpack("N*N*", substr ( $response, $p, 8 ) ); $p += 8;
return {} unless $rows && $cols;
my %res;
for (1 .. $rows ) {
- my @entry;
- for ( 1 .. $cols) {
- my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
- push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
- }
- if ($cols <= 2) {
- $res{$entry[0]} = $entry[1];
- }
- else {
- my $name = shift @entry;
- $res{$name} = \@entry;
- }
+ my @entry;
+ for ( 1 .. $cols) {
+ my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
+ }
+ if ($cols <= 2) {
+ $res{$entry[0]} = $entry[1];
+ } else {
+ my $name = shift @entry;
+ $res{$name} = \@entry;
+ }
}
return \%res;
}
=head1 SEE ALSO
L<http://www.sphinxsearch.com>
=head1 NOTES
There is (or was) a bundled Sphinx.pm in the contrib area of the Sphinx source
distribution, which was used as the starting point of Sphinx::Search.
Maintenance of that version appears to have lapsed at sphinx-0.9.7, so many of
the newer API calls are not available there. Sphinx::Search is mostly
compatible with the old Sphinx.pm except:
=over 4
=item On failure, Sphinx::Search returns undef rather than 0 or -1.
-=item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
+=Item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
Sphinx::Search->new
->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms")
=back
Sphinx::Search also provides documentation and unit tests, which were the main
motivations for branching from the earlier work.
=head1 AUTHOR
Jon Schutz
=head1 BUGS
Please report any bugs or feature requests to
C<bug-sphinx-search at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Sphinx-Search>.
I will be notified, and then you'll automatically be notified of progress on
your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Sphinx::Search
You can also look for information at:
=over 4
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Sphinx-Search>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Sphinx-Search>
=item * RT: CPAN's request tracker
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Sphinx-Search>
=item * Search CPAN
L<http://search.cpan.org/dist/Sphinx-Search>
=back
=head1 ACKNOWLEDGEMENTS
This module is based on Sphinx.pm (not deployed to CPAN) for Sphinx version
0.9.7-rc1, by Len Kranendonk, which was in turn based on the Sphinx PHP API.
=head1 COPYRIGHT & LICENSE
Copyright 2007 Jon Schutz, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License.
=cut
1;
|
thijs/cl-sphinx-search
|
2cc86855531ac95e6daee04f7b4fb0f52950e380
|
Working on run-queries
|
diff --git a/PerlAPI.pm b/PerlAPI.pm
index 10d4ec5..acdb805 100644
--- a/PerlAPI.pm
+++ b/PerlAPI.pm
@@ -1,2149 +1,2143 @@
package Sphinx::Search;
use warnings;
use strict;
use base 'Exporter';
use Carp;
use Socket;
use Config;
use Math::BigInt;
use IO::Socket::INET;
use IO::Socket::UNIX;
use Encode qw/encode_utf8 decode_utf8/;
my $is_native64 = $Config{longsize} == 8 || defined $Config{use64bitint} || defined $Config{use64bitall};
-
+
=head1 NAME
Sphinx::Search - Sphinx search engine API Perl client
=head1 VERSION
Please note that you *MUST* install a version which is compatible with your version of Sphinx.
Use version 0.22 for Sphinx 0.9.9-rc2 and later (Please read the Compatibility Note under L<SetEncoders> regarding encoding changes)
Use version 0.15 for Sphinx 0.9.9-svn-r1674
Use version 0.12 for Sphinx 0.9.8
Use version 0.11 for Sphinx 0.9.8-rc1
Use version 0.10 for Sphinx 0.9.8-svn-r1112
Use version 0.09 for Sphinx 0.9.8-svn-r985
Use version 0.08 for Sphinx 0.9.8-svn-r871
Use version 0.06 for Sphinx 0.9.8-svn-r820
Use version 0.05 for Sphinx 0.9.8-cvs-20070907
Use version 0.02 for Sphinx 0.9.8-cvs-20070818
=cut
our $VERSION = '0.22';
=head1 SYNOPSIS
use Sphinx::Search;
$sphinx = Sphinx::Search->new();
$results = $sphinx->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms");
=head1 DESCRIPTION
This is the Perl API client for the Sphinx open-source SQL full-text indexing
search engine, L<http://www.sphinxsearch.com>.
=cut
# Constants to export.
-our @EXPORT = qw(
+our @EXPORT = qw(
SPH_MATCH_ALL SPH_MATCH_ANY SPH_MATCH_PHRASE SPH_MATCH_BOOLEAN SPH_MATCH_EXTENDED
SPH_MATCH_FULLSCAN SPH_MATCH_EXTENDED2
SPH_RANK_PROXIMITY_BM25 SPH_RANK_BM25 SPH_RANK_NONE SPH_RANK_WORDCOUNT
SPH_SORT_RELEVANCE SPH_SORT_ATTR_DESC SPH_SORT_ATTR_ASC SPH_SORT_TIME_SEGMENTS
SPH_SORT_EXTENDED SPH_SORT_EXPR
SPH_GROUPBY_DAY SPH_GROUPBY_WEEK SPH_GROUPBY_MONTH SPH_GROUPBY_YEAR SPH_GROUPBY_ATTR
SPH_GROUPBY_ATTRPAIR
);
# known searchd commands
use constant SEARCHD_COMMAND_SEARCH => 0;
use constant SEARCHD_COMMAND_EXCERPT => 1;
use constant SEARCHD_COMMAND_UPDATE => 2;
use constant SEARCHD_COMMAND_KEYWORDS => 3;
use constant SEARCHD_COMMAND_PERSIST => 4;
use constant SEARCHD_COMMAND_STATUS => 5;
# current client-side command implementation versions
use constant VER_COMMAND_SEARCH => 0x116;
use constant VER_COMMAND_EXCERPT => 0x100;
use constant VER_COMMAND_UPDATE => 0x102;
use constant VER_COMMAND_KEYWORDS => 0x100;
use constant VER_COMMAND_STATUS => 0x100;
# known searchd status codes
use constant SEARCHD_OK => 0;
use constant SEARCHD_ERROR => 1;
use constant SEARCHD_RETRY => 2;
use constant SEARCHD_WARNING => 3;
# known match modes
use constant SPH_MATCH_ALL => 0;
use constant SPH_MATCH_ANY => 1;
use constant SPH_MATCH_PHRASE => 2;
use constant SPH_MATCH_BOOLEAN => 3;
use constant SPH_MATCH_EXTENDED => 4;
use constant SPH_MATCH_FULLSCAN => 5;
use constant SPH_MATCH_EXTENDED2 => 6; # extended engine V2 (TEMPORARY, WILL BE REMOVED
# known ranking modes (ext2 only)
use constant SPH_RANK_PROXIMITY_BM25 => 0; # default mode, phrase proximity major factor and BM25 minor one
use constant SPH_RANK_BM25 => 1; # statistical mode, BM25 ranking only (faster but worse quality)
use constant SPH_RANK_NONE => 2; # no ranking, all matches get a weight of 1
use constant SPH_RANK_WORDCOUNT => 3; # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
use constant SPH_RANK_PROXIMITY => 4;
use constant SPH_RANK_MATCHANY => 5;
# known sort modes
use constant SPH_SORT_RELEVANCE => 0;
use constant SPH_SORT_ATTR_DESC => 1;
use constant SPH_SORT_ATTR_ASC => 2;
use constant SPH_SORT_TIME_SEGMENTS => 3;
use constant SPH_SORT_EXTENDED => 4;
use constant SPH_SORT_EXPR => 5;
# known filter types
use constant SPH_FILTER_VALUES => 0;
use constant SPH_FILTER_RANGE => 1;
use constant SPH_FILTER_FLOATRANGE => 2;
# known attribute types
use constant SPH_ATTR_INTEGER => 1;
use constant SPH_ATTR_TIMESTAMP => 2;
use constant SPH_ATTR_ORDINAL => 3;
use constant SPH_ATTR_BOOL => 4;
use constant SPH_ATTR_FLOAT => 5;
use constant SPH_ATTR_BIGINT => 6;
use constant SPH_ATTR_MULTI => 0x40000000;
# known grouping functions
use constant SPH_GROUPBY_DAY => 0;
use constant SPH_GROUPBY_WEEK => 1;
use constant SPH_GROUPBY_MONTH => 2;
use constant SPH_GROUPBY_YEAR => 3;
use constant SPH_GROUPBY_ATTR => 4;
use constant SPH_GROUPBY_ATTRPAIR => 5;
# Floating point number matching expression
my $num_re = qr/^-?\d*\.?\d*(?:[eE][+-]?\d+)?$/;
# portably pack numeric to 64 signed bits, network order
sub _sphPackI64 {
my $self = shift;
my $v = shift;
# x64 route
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
# portably pack numeric to 64 unsigned bits, network order
sub _sphPackU64 {
my $self = shift;
my $v = shift;
my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
return pack ( "NN", $i>>32, $i & 4294967295 );
}
sub _sphPackI64array {
my $self = shift;
my $values = shift || [];
my $s = pack("N", scalar @$values);
$s .= $self->_sphPackI64($_) for @$values;
return $s;
}
# portably unpack 64 unsigned bits, network order to numeric
-sub _sphUnpackU64
+sub _sphUnpackU64
{
my $self = shift;
my $v = shift;
my ($h,$l) = unpack ( "N*N*", $v );
# x64 route
return ($h<<32) + $l if $is_native64;
# x32 route, BigInt
$h = Math::BigInt->new($h);
$h->blsft(32)->badd($l);
-
+
return $h->bstr;
}
# portably unpack 64 signed bits, network order to numeric
-sub _sphUnpackI64
+sub _sphUnpackI64
{
my $self = shift;
my $v = shift;
my ($h,$l) = unpack ( "N*N*", $v );
my $neg = ($h & 0x80000000) ? 1 : 0;
# x64 route
if ( $is_native64 ) {
return -(~(($h<<32) + $l) + 1) if $neg;
return ($h<<32) + $l;
}
# x32 route, BigInt
if ($neg) {
$h = ~$h;
$l = ~$l;
}
my $x = Math::BigInt->new($h);
$x->blsft(32)->badd($l);
$x->binc()->bneg() if $neg;
return $x->bstr;
}
=head1 CONSTRUCTOR
=head2 new
$sph = Sphinx::Search->new;
$sph = Sphinx::Search->new(\%options);
Create a new Sphinx::Search instance.
OPTIONS
=over 4
=item log
Specify an optional logger instance. This can be any class that provides error,
warn, info, and debug methods (e.g. see L<Log::Log4perl>). Logging is disabled
if no logger instance is provided.
=item debug
Debug flag. If set (and a logger instance is specified), debugging messages
will be generated.
=back
=cut
# create a new client object and fill defaults
sub new {
my ($class, $options) = @_;
my $self = {
# per=client-object settings
_host => 'localhost',
_port => 3312,
_path => undef,
_socket => undef,
# per-query settings
_offset => 0,
_limit => 20,
_mode => SPH_MATCH_ALL,
_weights => [],
_sort => SPH_SORT_RELEVANCE,
_sortby => "",
_min_id => 0,
_max_id => 0,
_filters => [],
_groupby => "",
_groupdistinct => "",
_groupfunc => SPH_GROUPBY_DAY,
_groupsort => '@group desc',
_maxmatches => 1000,
_cutoff => 0,
_retrycount => 0,
_retrydelay => 0,
_anchor => undef,
_indexweights => undef,
_ranker => SPH_RANK_PROXIMITY_BM25,
_maxquerytime => 0,
_fieldweights => {},
_overrides => {},
_select => q{*},
# per-reply fields (for single-query case)
_error => '',
_warning => '',
_connerror => '',
-
+
# request storage (for multi-query case)
_reqs => [],
_timeout => 0,
_string_encoder => \&encode_utf8,
_string_decoder => \&decode_utf8,
};
bless $self, ref($class) || $class;
- # These options are supported in the constructor, but not recommended
+ # These options are supported in the constructor, but not recommended
# since there is no validation. Use the Set* methods instead.
my %legal_opts = map { $_ => 1 } qw/host port offset limit mode weights sort sortby groupby groupbyfunc maxmatches cutoff retrycount retrydelay log debug string_encoder string_decoder/;
for my $opt (keys %$options) {
$self->{'_' . $opt} = $options->{$opt} if $legal_opts{$opt};
}
# Disable debug unless we have something to log to
$self->{_debug} = 0 unless $self->{_log};
return $self;
}
=head1 METHODS
=cut
sub _Error {
my ($self, $msg) = @_;
$self->{_error} = $msg;
$self->{_log}->error($msg) if $self->{_log};
}
=head2 GetLastError
$error = $sph->GetLastError;
Get last error message (string)
=cut
sub GetLastError {
my $self = shift;
return $self->{_error};
}
sub _Warning {
my ($self, $msg) = @_;
$self->{_warning} = $msg;
$self->{_log}->warn($msg) if $self->{_log};
}
=head2 GetLastWarning
$warning = $sph->GetLastWarning;
Get last warning message (string)
=cut
sub GetLastWarning {
my $self = shift;
return $self->{_warning};
}
-=head2 IsConnectError
+=head2 IsConnectError
Check connection error flag (to differentiate between network connection errors
and bad responses). Returns true value on connection error.
=cut
sub IsConnectError {
return shift->{_connerror};
}
=head2 SetEncoders
$sph->SetEncoders(\&encode_function, \&decode_function)
COMPATIBILITY NOTE: SetEncoders() was introduced in version 0.17.
Prior to that, all strings were considered to be sequences of bytes
which may have led to issues with multi-byte characters. If you were
previously encoding/decoding strings external to Sphinx::Search, you
will need to disable encoding/decoding by setting Sphinx::Search to
use raw values as explained below (or modify your code and let
Sphinx::Search do the recoding).
Set the string encoder/decoder functions for transferring strings
between perl and Sphinx. The encoder should take the perl internal
representation and convert to the bytestream that searchd expects, and
the decoder should take the bytestream returned by searchd and convert to
perl format.
The searchd format will depend on the 'charset_type' index setting in
the Sphinx configuration file.
The coders default to encode_utf8 and decode_utf8 respectively, which
are compatible with the 'utf8' charset_type.
If either the encoder or decoder functions are left undefined in the
-call to SetEncoders, they return to their default values.
+call to SetEncoders, they return to their default values.
If you wish to send raw values (no encoding/decoding), supply a
-function that simply returns its argument, e.g.
+function that simply returns its argument, e.g.
$sph->SetEncoders( sub { shift }, sub { shift });
Returns $sph.
=cut
sub SetEncoders {
my $self = shift;
my $encoder = shift;
my $decoder = shift;
$self->{_string_encoder} = $encoder ? $encoder : \&encode_utf8;
$self->{_string_decoder} = $decoder ? $decoder : \&decode_utf8;
-
+
return $self;
}
=head2 SetServer
$sph->SetServer($host, $port);
$sph->SetServer($path, $port);
In the first form, sets the host (string) and port (integer) details for the
searchd server using a network (INET) socket.
In the second form, where $path is a local filesystem path (optionally prefixed
by 'unix://'), sets the client to access the searchd server via a local (UNIX
domain) socket at the specified path.
Returns $sph.
=cut
sub SetServer {
my $self = shift;
my $host = shift;
my $port = shift;
croak("host is not defined") unless defined($host);
$self->{_path} = $host, return if substr($host, 0, 1) eq '/';
$self->{_path} = substr($host, 7), return if substr($host, 0, 7) eq 'unix://';
-
+
croak("port is not an integer") unless defined($port) && $port =~ m/^\d+$/o;
$self->{_host} = $host;
$self->{_port} = $port;
$self->{_path} = undef;
return $self;
}
=head2 SetConnectTimeout
$sph->SetConnectTimeout($timeout)
Set server connection timeout (in seconds).
Returns $sph.
=cut
sub SetConnectTimeout {
my $self = shift;
my $timeout = shift;
croak("timeout is not numeric") unless ($timeout =~ m/$num_re/);
$self->{_timeout} = $timeout;
}
sub _Send {
my $self = shift;
my $fp = shift;
my $data = shift;
$self->{_log}->debug("Writing to socket") if $self->{_debug};
$fp->write($data); return 1;
if ($fp->eof || ! $fp->write($data)) {
$self->_Error("connection unexpectedly closed (timed out?): $!");
$self->{_connerror} = 1;
return 0;
}
return 1;
}
# connect to searchd server
sub _Connect {
my $self = shift;
-
+
return $self->{_socket} if $self->{_socket};
my $debug = $self->{_debug};
my $str_dest = $self->{_path} ? 'unix://' . $self->{_path} : "$self->{_host}:$self->{_port}";
$self->{_log}->debug("Connecting to $str_dest") if $debug;
# connect socket
$self->{_connerror} = q{};
my $fp;
my %params = (); # ( Blocking => 0 );
$params{Timeout} = $self->{_timeout} if $self->{_timeout};
if ($self->{_path}) {
$fp = IO::Socket::UNIX->new( Peer => $self->{_path},
%params,
);
}
else {
$fp = IO::Socket::INET->new( PeerPort => $self->{_port},
PeerAddr => $self->{_host},
Proto => 'tcp',
%params,
);
}
if (! $fp) {
$self->_Error("Failed to open connection to $str_dest: $!");
$self->{_connerror} = 1;
return 0;
}
binmode($fp, ':bytes');
# check version
my $buf = '';
$fp->read($buf, 4) or do {
$self->_Error("Failed on initial read from $str_dest: $!");
$self->{_connerror} = 1;
return 0;
};
my $v = unpack("N*", $buf);
$v = int($v);
$self->{_log}->debug("Got version $v from searchd") if $debug;
if ($v < 1) {
close($fp);
$self->_Error("expected searchd protocol version 1+, got version '$v'");
return 0;
}
$self->{_log}->debug("Sending version") if $debug;
# All ok, send my version
$self->_Send($fp, pack("N", 1)) or return 0;
$self->{_log}->debug("Connection complete") if $debug;
return $fp;
}
#-------------------------------------------------------------
# get and check response packet from searchd server
sub _GetResponse {
my $self = shift;
my $fp = shift;
my $client_ver = shift;
my $header;
defined($fp->read($header, 8, 0)) or do {
$self->_Error("read failed: $!");
return 0;
};
my ($status, $ver, $len ) = unpack("n2N", $header);
my $response = q{};
my $lasterror = q{};
my $lentotal = 0;
while (my $rlen = $fp->read(my $chunk, $len)) {
$lasterror = $!, last if $rlen < 0;
$response .= $chunk;
$lentotal += $rlen;
last if $lentotal >= $len;
}
close($fp) unless $self->{_socket};
# check response
if ( length($response) != $len ) {
- $self->_Error( $len
+ $self->_Error( $len
? "failed to read searchd response (status=$status, ver=$ver, len=$len, read=". length($response) . ", last error=$lasterror)"
: "received zero-sized searchd response");
return 0;
}
# check status
if ( $status==SEARCHD_WARNING ) {
my ($wlen) = unpack ( "N*", substr ( $response, 0, 4 ) );
$self->_Warning(substr ( $response, 4, $wlen ));
return substr ( $response, 4+$wlen );
}
if ( $status==SEARCHD_ERROR ) {
$self->_Error("searchd error: " . substr ( $response, 4 ));
return 0;
}
if ( $status==SEARCHD_RETRY ) {
$self->_Error("temporary searchd error: " . substr ( $response, 4 ));
return 0;
}
if ( $status!=SEARCHD_OK ) {
$self->_Error("unknown status code '$status'");
return 0;
}
# check version
if ( $ver<$client_ver ) {
$self->_Warning(sprintf ( "searchd command v.%d.%d older than client's v.%d.%d, some options might not work",
$ver>>8, $ver&0xff, $client_ver>>8, $client_ver&0xff ));
}
return $response;
}
=head2 SetLimits
$sph->SetLimits($offset, $limit);
$sph->SetLimits($offset, $limit, $max);
Set match offset/limits, and optionally the max number of matches to return.
Returns $sph.
=cut
sub SetLimits {
my $self = shift;
my $offset = shift;
my $limit = shift;
my $max = shift || 0;
croak("offset should be an integer >= 0") unless ($offset =~ /^\d+$/ && $offset >= 0) ;
croak("limit should be an integer >= 0") unless ($limit =~ /^\d+$/ && $limit >= 0);
$self->{_offset} = $offset;
$self->{_limit} = $limit;
if($max > 0) {
$self->{_maxmatches} = $max;
}
return $self;
}
=head2 SetMaxQueryTime
$sph->SetMaxQueryTime($millisec);
Set maximum query time, in milliseconds, per index.
The value may not be negative; 0 means "do not limit".
Returns $sph.
=cut
sub SetMaxQueryTime {
my $self = shift;
my $max = shift;
croak("max value should be an integer >= 0") unless ($max =~ /^\d+$/ && $max >= 0) ;
$self->{_maxquerytime} = $max;
return $self;
}
=head2 SetMatchMode
$sph->SetMatchMode($mode);
Set match mode, which may be one of:
=over 4
=item * SPH_MATCH_ALL
Match all words
-=item * SPH_MATCH_ANY
+=item * SPH_MATCH_ANY
Match any words
-=item * SPH_MATCH_PHRASE
+=item * SPH_MATCH_PHRASE
Exact phrase match
-=item * SPH_MATCH_BOOLEAN
+=item * SPH_MATCH_BOOLEAN
Boolean match, using AND (&), OR (|), NOT (!,-) and parenthetic grouping.
-=item * SPH_MATCH_EXTENDED
+=item * SPH_MATCH_EXTENDED
Extended match, which includes the Boolean syntax plus field, phrase and
proximity operators.
=back
Returns $sph.
=cut
sub SetMatchMode {
my $self = shift;
my $mode = shift;
croak("Match mode not defined") unless defined($mode);
- croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
- || $mode==SPH_MATCH_ANY
- || $mode==SPH_MATCH_PHRASE
- || $mode==SPH_MATCH_BOOLEAN
- || $mode==SPH_MATCH_EXTENDED
- || $mode==SPH_MATCH_FULLSCAN
+ croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
+ || $mode==SPH_MATCH_ANY
+ || $mode==SPH_MATCH_PHRASE
+ || $mode==SPH_MATCH_BOOLEAN
+ || $mode==SPH_MATCH_EXTENDED
+ || $mode==SPH_MATCH_FULLSCAN
|| $mode==SPH_MATCH_EXTENDED2 );
$self->{_mode} = $mode;
return $self;
}
=head2 SetRankingMode
$sph->SetRankingMode(SPH_RANK_BM25);
Set ranking mode, which may be one of:
=over 4
-=item * SPH_RANK_PROXIMITY_BM25
+=item * SPH_RANK_PROXIMITY_BM25
Default mode, phrase proximity major factor and BM25 minor one
-=item * SPH_RANK_BM25
+=item * SPH_RANK_BM25
Statistical mode, BM25 ranking only (faster but worse quality)
-=item * SPH_RANK_NONE
+=item * SPH_RANK_NONE
No ranking, all matches get a weight of 1
-=item * SPH_RANK_WORDCOUNT
+=item * SPH_RANK_WORDCOUNT
Simple word-count weighting, rank is a weighted sum of per-field keyword
occurence counts
=back
Returns $sph.
=cut
sub SetRankingMode {
my $self = shift;
my $ranker = shift;
croak("Unknown ranking mode: $ranker") unless ( $ranker==SPH_RANK_PROXIMITY_BM25
|| $ranker==SPH_RANK_BM25
|| $ranker==SPH_RANK_NONE
|| $ranker==SPH_RANK_WORDCOUNT
|| $ranker==SPH_RANK_PROXIMITY );
$self->{_ranker} = $ranker;
return $self;
}
-
+
=head2 SetSortMode
$sph->SetSortMode(SPH_SORT_RELEVANCE);
$sph->SetSortMode($mode, $sortby);
Set sort mode, which may be any of:
=over 4
=item SPH_SORT_RELEVANCE - sort by relevance
=item SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC
Sort by attribute descending/ascending. $sortby specifies the sorting attribute.
=item SPH_SORT_TIME_SEGMENTS
Sort by time segments (last hour/day/week/month) in descending order, and then
by relevance in descending order. $sortby specifies the time attribute.
=item SPH_SORT_EXTENDED
Sort by SQL-like syntax. $sortby is the sorting specification.
=item SPH_SORT_EXPR
=back
Returns $sph.
=cut
sub SetSortMode {
my $self = shift;
my $mode = shift;
my $sortby = shift || "";
croak("Sort mode not defined") unless defined($mode);
croak("Unknown sort mode: $mode") unless ( $mode == SPH_SORT_RELEVANCE
|| $mode == SPH_SORT_ATTR_DESC
- || $mode == SPH_SORT_ATTR_ASC
+ || $mode == SPH_SORT_ATTR_ASC
|| $mode == SPH_SORT_TIME_SEGMENTS
|| $mode == SPH_SORT_EXTENDED
|| $mode == SPH_SORT_EXPR
);
croak("Sortby must be defined") unless ($mode==SPH_SORT_RELEVANCE || length($sortby));
$self->{_sort} = $mode;
$self->{_sortby} = $sortby;
return $self;
}
=head2 SetWeights
-
+
$sph->SetWeights([ 1, 2, 3, 4]);
This method is deprecated. Use L<SetFieldWeights> instead.
Set per-field (integer) weights. The ordering of the weights correspond to the
ordering of fields as indexed.
Returns $sph.
=cut
sub SetWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not an array reference") unless (ref($weights) eq 'ARRAY');
foreach my $weight (@$weights) {
croak("Weight: $weight is not an integer") unless ($weight =~ /^\d+$/);
}
$self->{_weights} = $weights;
return $self;
}
=head2 SetFieldWeights
-
+
$sph->SetFieldWeights(\%weights);
Set per-field (integer) weights by field name. The weights hash provides field
name to weight mappings.
Takes precedence over L<SetWeights>.
Unknown names will be silently ignored. Missing fields will be given a weight of 1.
Returns $sph.
=cut
sub SetFieldWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
foreach my $field (keys %$weights) {
croak("Weight: $weights->{$field} is not an integer >= 0") unless ($weights->{$field} =~ /^\d+$/);
}
$self->{_fieldweights} = $weights;
return $self;
}
=head2 SetIndexWeights
-
+
$sph->SetIndexWeights(\%weights);
Set per-index (integer) weights. The weights hash is a mapping of index name to integer weight.
Returns $sph.
=cut
sub SetIndexWeights {
my $self = shift;
my $weights = shift;
croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
foreach (keys %$weights) {
croak("IndexWeight $_: $weights->{$_} is not an integer") unless ($weights->{$_} =~ /^\d+$/);
}
$self->{_indexweights} = $weights;
return $self;
}
=head2 SetIDRange
$sph->SetIDRange($min, $max);
Set IDs range only match those records where document ID
is between $min and $max (including $min and $max)
Returns $sph.
=cut
sub SetIDRange {
my $self = shift;
my $min = shift;
my $max = shift;
croak("min_id is not numeric") unless ($min =~ m/$num_re/);
croak("max_id is not numeric") unless ($max =~ m/$num_re/);
croak("min_id is larger than or equal to max_id") unless ($min < $max);
$self->{_min_id} = $min;
$self->{_max_id} = $max;
return $self;
}
=head2 SetFilter
$sph->SetFilter($attr, \@values);
$sph->SetFilter($attr, \@values, $exclude);
Sets the results to be filtered on the given attribute. Only results which have
attributes matching the given (numeric) values will be returned.
This may be called multiple times with different attributes to select on
multiple attributes.
If 'exclude' is set, excludes results that match the filter.
Returns $sph.
=cut
sub SetFilter {
my ($self, $attribute, $values, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("values is not an array reference") unless (ref($values) eq 'ARRAY');
croak("values reference is empty") unless (scalar(@$values));
foreach my $value (@$values) {
croak("value $value is not numeric") unless ($value =~ m/$num_re/);
}
push(@{$self->{_filters}}, {
type => SPH_FILTER_VALUES,
attr => $attribute,
values => $values,
exclude => $exclude ? 1 : 0,
});
return $self;
}
=head2 SetFilterRange
$sph->SetFilterRange($attr, $min, $max);
$sph->SetFilterRange($attr, $min, $max, $exclude);
Sets the results to be filtered on a range of values for the given
attribute. Only those records where $attr column value is between $min and $max
(including $min and $max) will be returned.
If 'exclude' is set, excludes results that fall within the given range.
Returns $sph.
=cut
sub SetFilterRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not an integer") unless ($min =~ m/$num_re/);
croak("max: $max is not an integer") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
type => SPH_FILTER_RANGE,
attr => $attribute,
min => $min,
max => $max,
exclude => $exclude ? 1 : 0,
});
return $self;
}
-=head2 SetFilterFloatRange
+=head2 SetFilterFloatRange
$sph->SetFilterFloatRange($attr, $min, $max, $exclude);
Same as L<SetFilterRange>, but allows floating point values.
Returns $sph.
=cut
sub SetFilterFloatRange {
my ($self, $attribute, $min, $max, $exclude) = @_;
croak("attribute is not defined") unless (defined $attribute);
croak("min: $min is not numeric") unless ($min =~ m/$num_re/);
croak("max: $max is not numeric") unless ($max =~ m/$num_re/);
croak("min value should be <= max") unless ($min <= $max);
push(@{$self->{_filters}}, {
type => SPH_FILTER_FLOATRANGE,
attr => $attribute,
min => $min,
max => $max,
exclude => $exclude ? 1 : 0,
});
return $self;
}
=head2 SetGeoAnchor
$sph->SetGeoAnchor($attrlat, $attrlong, $lat, $long);
Setup anchor point for using geosphere distance calculations in filters and sorting.
Distance will be computed with respect to this point
=over 4
=item $attrlat is the name of latitude attribute
=item $attrlong is the name of longitude attribute
=item $lat is anchor point latitude, in radians
=item $long is anchor point longitude, in radians
=back
Returns $sph.
=cut
sub SetGeoAnchor {
my ($self, $attrlat, $attrlong, $lat, $long) = @_;
croak("attrlat is not defined") unless defined $attrlat;
croak("attrlong is not defined") unless defined $attrlong;
croak("lat: $lat is not numeric") unless ($lat =~ m/$num_re/);
croak("long: $long is not numeric") unless ($long =~ m/$num_re/);
- $self->{_anchor} = {
- attrlat => $attrlat,
- attrlong => $attrlong,
+ $self->{_anchor} = {
+ attrlat => $attrlat,
+ attrlong => $attrlong,
lat => $lat,
long => $long,
};
return $self;
}
=head2 SetGroupBy
$sph->SetGroupBy($attr, $func);
$sph->SetGroupBy($attr, $func, $groupsort);
Sets attribute and function of results grouping.
In grouping mode, all matches are assigned to different groups based on grouping
function value. Each group keeps track of the total match count, and the best
match (in this group) according to current sorting function. The final result
set contains one best match per group, with grouping function value and matches
count attached.
$attr is any valid attribute. Use L<ResetGroupBy> to disable grouping.
$func is one of:
=over 4
=item * SPH_GROUPBY_DAY
Group by day (assumes timestamp type attribute of form YYYYMMDD)
=item * SPH_GROUPBY_WEEK
Group by week (assumes timestamp type attribute of form YYYYNNN)
=item * SPH_GROUPBY_MONTH
Group by month (assumes timestamp type attribute of form YYYYMM)
=item * SPH_GROUPBY_YEAR
Group by year (assumes timestamp type attribute of form YYYY)
=item * SPH_GROUPBY_ATTR
Group by attribute value
=item * SPH_GROUPBY_ATTRPAIR
Group by two attributes, being the given attribute and the attribute that
immediately follows it in the sequence of indexed attributes. The specified
attribute may therefore not be the last of the indexed attributes.
=back
Groups in the set of results can be sorted by any SQL-like sorting clause,
including both document attributes and the following special internal Sphinx
attributes:
=over 4
=item @id - document ID;
=item @weight, @rank, @relevance - match weight;
=item @group - group by function value;
=item @count - number of matches in group.
=back
The default mode is to sort by groupby value in descending order,
ie. by "@group desc".
In the results set, "total_found" contains the total amount of matching groups
over the whole index.
WARNING: grouping is done in fixed memory and thus its results
are only approximate; so there might be more groups reported
in total_found than actually present. @count might also
-be underestimated.
+be underestimated.
For example, if sorting by relevance and grouping by a "published"
attribute with SPH_GROUPBY_DAY function, then the result set will
contain only the most relevant match for each day when there were any
matches published, with day number and per-day match count attached,
and sorted by day number in descending order (ie. recent days first).
=cut
sub SetGroupBy {
my $self = shift;
my $attribute = shift;
my $func = shift;
my $groupsort = shift || '@group desc';
croak("attribute is not defined") unless (defined $attribute);
croak("Unknown grouping function: $func") unless ($func==SPH_GROUPBY_DAY
|| $func==SPH_GROUPBY_WEEK
|| $func==SPH_GROUPBY_MONTH
|| $func==SPH_GROUPBY_YEAR
|| $func==SPH_GROUPBY_ATTR
|| $func==SPH_GROUPBY_ATTRPAIR
);
$self->{_groupby} = $attribute;
$self->{_groupfunc} = $func;
$self->{_groupsort} = $groupsort;
return $self;
}
=head2 SetGroupDistinct
$sph->SetGroupDistinct($attr);
Set count-distinct attribute for group-by queries
=cut
sub SetGroupDistinct {
my $self = shift;
my $attribute = shift;
croak("attribute is not defined") unless (defined $attribute);
$self->{_groupdistinct} = $attribute;
return $self;
}
=head2 SetRetries
$sph->SetRetries($count, $delay);
Set distributed retries count and delay
=cut
sub SetRetries {
my $self = shift;
my $count = shift;
my $delay = shift || 0;
croak("count: $count is not an integer >= 0") unless ($count =~ /^\d+$/o && $count >= 0);
croak("delay: $delay is not an integer >= 0") unless ($delay =~ /^\d+$/o && $delay >= 0);
$self->{_retrycount} = $count;
$self->{_retrydelay} = $delay;
return $self;
}
=head2 SetOverride
$sph->SetOverride($attrname, $attrtype, $values);
Set attribute values override. There can be only one override per attribute.
$values must be a hash that maps document IDs to attribute values
=cut
sub SetOverride {
my $self = shift;
my $attrname = shift;
my $attrtype = shift;
my $values = shift;
croak("attribute name is not defined") unless defined $attrname;
croak("Uknown attribute type: $attrtype") unless ($attrtype == SPH_ATTR_INTEGER
|| $attrtype == SPH_ATTR_TIMESTAMP
|| $attrtype == SPH_ATTR_BOOL
|| $attrtype == SPH_ATTR_FLOAT
|| $attrtype == SPH_ATTR_BIGINT);
$self->{_overrides}->{$attrname} = { attr => $attrname,
type => $attrtype,
values => $values,
};
-
+
return $self;
}
-=head2 SetSelect
+=head2 SetSelect
$sph->SetSelect($select)
Set select list (attributes or expressions). SQL-like syntax.
=cut
sub SetSelect {
my $self = shift;
$self->{_select} = shift;
return $self;
}
=head2 ResetFilters
$sph->ResetFilters;
Clear all filters.
=cut
sub ResetFilters {
my $self = shift;
$self->{_filters} = [];
$self->{_anchor} = undef;
return $self;
}
=head2 ResetGroupBy
$sph->ResetGroupBy;
Clear all group-by settings (for multi-queries)
=cut
sub ResetGroupBy {
my $self = shift;
$self->{_groupby} = "";
$self->{_groupfunc} = SPH_GROUPBY_DAY;
$self->{_groupsort} = '@group desc';
$self->{_groupdistinct} = "";
return $self;
}
=head2 ResetOverrides
Clear all attribute value overrides (for multi-queries)
=cut
sub ResetOverrides {
my $self = shift;
$self->{_select} = undef;
return $self;
}
=head2 Query
$results = $sph->Query($query, $index);
Connect to searchd server and run given search query.
=over 4
=item query is query string
=item index is index name to query, default is "*" which means to query all indexes. Use a space or comma separated list to search multiple indexes.
=back
Returns undef on failure
Returns hash which has the following keys on success:
=over 4
=item matches
-
+
Array containing hashes with found documents ( "doc", "weight", "group", "stamp" )
-
+
=item total
Total amount of matches retrieved (upto SPH_MAX_MATCHES, see sphinx.h)
=item total_found
-
+
Total amount of matching documents in index
-
+
=item time
-
+
Search time
=item words
-
+
Hash which maps query terms (stemmed!) to ( "docs", "hits" ) hash
=back
Returns the results array on success, undef on error.
=cut
sub Query {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
croak("_reqs is not empty") unless @{$self->{_reqs}} == 0;
$self->AddQuery($query, $index, $comment);
my $results = $self->RunQueries or return;
$self->_Error($results->[0]->{error}) if $results->[0]->{error};
$self->_Warning($results->[0]->{warning}) if $results->[0]->{warning};
return if $results->[0]->{status} && $results->[0]->{status} == SEARCHD_ERROR;
return $results->[0];
}
# helper to pack floats in network byte order
sub _PackFloat {
my $f = shift;
my $t1 = pack ( "f", $f ); # machine order
my $t2 = unpack ( "L*", $t1 ); # int in machine order
return pack ( "N", $t2 );
}
=head2 AddQuery
$sph->AddQuery($query, $index);
Add a query to a batch request.
Batch queries enable searchd to perform internal optimizations,
if possible; and reduce network connection overheads in all cases.
For instance, running exactly the same query with different
groupby settings will enable searched to perform expensive
full-text search and ranking operation only once, but compute
multiple groupby results from its output.
Parameters are exactly the same as in Query() call.
Returns corresponding index to the results array returned by RunQueries() call.
=cut
sub AddQuery {
my $self = shift;
my $query = shift;
my $index = shift || '*';
my $comment = shift || '';
##################
# build request
##################
my $req;
$req = pack ( "NNNNN", $self->{_offset}, $self->{_limit}, $self->{_mode}, $self->{_ranker}, $self->{_sort} ); # mode and limits
$req .= pack ( "N/a*", $self->{_sortby});
$req .= pack ( "N/a*", $self->{_string_encoder}->($query) ); # query itself
$req .= pack ( "N*", scalar(@{$self->{_weights}}), @{$self->{_weights}});
$req .= pack ( "N/a*", $index); # indexes
- $req .= pack ( "N", 1)
+ $req .= pack ( "N", 1)
. $self->_sphPackU64($self->{_min_id})
. $self->_sphPackU64($self->{_max_id}); # id64 range
# filters
$req .= pack ( "N", scalar @{$self->{_filters}} );
foreach my $filter (@{$self->{_filters}}) {
$req .= pack ( "N/a*", $filter->{attr});
$req .= pack ( "N", $filter->{type});
my $t = $filter->{type};
if ($t == SPH_FILTER_VALUES) {
$req .= $self->_sphPackI64array($filter->{values});
- }
- elsif ($t == SPH_FILTER_RANGE) {
+ } elsif ($t == SPH_FILTER_RANGE) {
$req .= $self->_sphPackI64($filter->{min}) . $self->_sphPackI64($filter->{max});
- }
- elsif ($t == SPH_FILTER_FLOATRANGE) {
+ } elsif ($t == SPH_FILTER_FLOATRANGE) {
$req .= _PackFloat ( $filter->{"min"} ) . _PackFloat ( $filter->{"max"} );
- }
- else {
+ } else {
croak("Unhandled filter type $t");
}
$req .= pack ( "N", $filter->{exclude});
- }
+}
# group-by clause, max-matches count, group-sort clause, cutoff count
$req .= pack ( "NN/a*", $self->{_groupfunc}, $self->{_groupby} );
$req .= pack ( "N", $self->{_maxmatches} );
$req .= pack ( "N/a*", $self->{_groupsort});
$req .= pack ( "NNN", $self->{_cutoff}, $self->{_retrycount}, $self->{_retrydelay} );
$req .= pack ( "N/a*", $self->{_groupdistinct});
if (!defined $self->{_anchor}) {
- $req .= pack ( "N", 0);
- }
- else {
- my $a = $self->{_anchor};
- $req .= pack ( "N", 1);
- $req .= pack ( "N/a*", $a->{attrlat});
- $req .= pack ( "N/a*", $a->{attrlong});
- $req .= _PackFloat($a->{lat}) . _PackFloat($a->{long});
+ $req .= pack ( "N", 0);
+ } else {
+ my $a = $self->{_anchor};
+ $req .= pack ( "N", 1);
+ $req .= pack ( "N/a*", $a->{attrlat});
+ $req .= pack ( "N/a*", $a->{attrlong});
+ $req .= _PackFloat($a->{lat}) . _PackFloat($a->{long});
}
# per-index weights
$req .= pack( "N", scalar keys %{$self->{_indexweights}});
$req .= pack ( "N/a*N", $_, $self->{_indexweights}->{$_} ) for keys %{$self->{_indexweights}};
# max query time
$req .= pack ( "N", $self->{_maxquerytime} );
# per-field weights
$req .= pack ( "N", scalar keys %{$self->{_fieldweights}} );
$req .= pack ( "N/a*N", $_, $self->{_fieldweights}->{$_}) for keys %{$self->{_fieldweights}};
# comment
$req .= pack ( "N/a*", $comment);
# attribute overrides
$req .= pack ( "N", scalar keys %{$self->{_overrides}} );
for my $entry (values %{$self->{_overrides}}) {
- $req .= pack ("N/a*", $entry->{attr})
- . pack ("NN", $entry->{type}, scalar keys %{$entry->{values}});
- for my $id (keys %{$entry->{values}}) {
- croak "Attribute value key is not numeric" unless $id =~ m/$num_re/;
- my $v = $entry->{values}->{$id};
- croak "Attribute value key is not numeric" unless $v =~ m/$num_re/;
- $req .= $self->_sphPackU64($id);
- if ($entry->{type} == SPH_ATTR_FLOAT) {
- $req .= $self->_packfloat($v);
- }
- elsif ($entry->{type} == SPH_ATTR_BIGINT) {
- $req .= $self->_sphPackI64($v);
- }
- else {
- $req .= pack("N", $v);
- }
- }
+ $req .= pack ("N/a*", $entry->{attr})
+ . pack ("NN", $entry->{type}, scalar keys %{$entry->{values}});
+ for my $id (keys %{$entry->{values}}) {
+ croak "Attribute value key is not numeric" unless $id =~ m/$num_re/;
+ my $v = $entry->{values}->{$id};
+ croak "Attribute value key is not numeric" unless $v =~ m/$num_re/;
+ $req .= $self->_sphPackU64($id);
+ if ($entry->{type} == SPH_ATTR_FLOAT) {
+ $req .= $self->_packfloat($v);
+ } elsif ($entry->{type} == SPH_ATTR_BIGINT) {
+ $req .= $self->_sphPackI64($v);
+ } else {
+ $req .= pack("N", $v);
+ }
+ }
}
-
+
# select list
$req .= pack("N/a*", $self->{_select} || '');
push(@{$self->{_reqs}}, $req);
return scalar $#{$self->{_reqs}};
}
=head2 RunQueries
$sph->RunQueries
Run batch of queries, as added by AddQuery.
Returns undef on network IO failure.
Returns an array of result sets on success.
Each result set in the returned array is a hash which contains
the same keys as the hash returned by L<Query>, plus:
-=over 4
+=over 4
-=item * error
+=item * error
Errors, if any, for this query.
=item * warnings
-
+
Any warnings associated with the query.
=back
=cut
sub RunQueries {
my $self = shift;
unless (@{$self->{_reqs}}) {
$self->_Error("no queries defined, issue AddQuery() first");
return;
}
my $fp = $self->_Connect() or do { $self->{_reqs} = []; return };
##################
# send query, get response
##################
my $nreqs = @{$self->{_reqs}};
my $req = pack("Na*", $nreqs, join("", @{$self->{_reqs}}));
$req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
$self->_Send($fp, $req);
$self->{_reqs} = [];
-
+
my $response = $self->_GetResponse ( $fp, VER_COMMAND_SEARCH );
return unless $response;
##################
# parse response
##################
my $p = 0;
my $max = length($response); # Protection from broken response
my @results;
for (my $ires = 0; $ires < $nreqs; $ires++) {
my $result = {}; # Empty hash ref
push(@results, $result);
$result->{matches} = []; # Empty array ref
$result->{error} = "";
$result->{warnings} = "";
# extract status
my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
if ($status != SEARCHD_OK) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $message = substr ( $response, $p, $len ); $p += $len;
if ($status == SEARCHD_WARNING) {
$result->{warning} = $message;
}
else {
$result->{error} = $message;
next;
- }
+ }
}
# read schema
my @fields;
my (%attrs, @attr_list);
my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nfields-->0 && $p<$max ) {
my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
push(@fields, substr ( $response, $p, $len )); $p += $len;
}
$result->{"fields"} = \@fields;
my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
while ( $nattrs-->0 && $p<$max ) {
my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $attr = substr ( $response, $p, $len ); $p += $len;
my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$attrs{$attr} = $type;
push(@attr_list, $attr);
}
$result->{"attrs"} = \%attrs;
# read match count
my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
# read matches
while ( $count-->0 && $p<$max ) {
my $data = {};
if ($id64) {
$data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
$data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
}
else {
( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
}
foreach my $attr (@attr_list) {
if ($attrs{$attr} == SPH_ATTR_BIGINT) {
$data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
next;
}
if ($attrs{$attr} == SPH_ATTR_FLOAT) {
my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
$data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
next;
}
my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
if ($attrs{$attr} & SPH_ATTR_MULTI) {
my $nvalues = $val;
$data->{$attr} = [];
while ($nvalues-->0 && $p < $max) {
$val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@{$data->{$attr}}, $val);
}
}
else {
$data->{$attr} = $val;
}
}
push(@{$result->{matches}}, $data);
}
my $words;
($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
$result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
$p += 16;
while ( $words-->0 && $p < $max) {
- my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
+ my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
$p += 4;
- my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
+ my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
$p += $len;
my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
$p += 8;
$result->{words}{$word} = {
"docs" => $docs,
"hits" => $hits
};
}
}
return \@results;
}
=head2 BuildExcerpts
$excerpts = $sph->BuildExcerpts($docs, $index, $words, $opts)
Generate document excerpts for the specified documents.
=over 4
-=item docs
+=item docs
An array reference of strings which represent the document
contents
-=item index
+=item index
A string specifiying the index whose settings will be used
for stemming, lexing and case folding
-=item words
+=item words
A string which contains the words to highlight
-=item opts
+=item opts
A hash which contains additional optional highlighting parameters:
=over 4
=item before_match - a string to insert before a set of matching words, default is "<b>"
=item after_match - a string to insert after a set of matching words, default is "<b>"
=item chunk_separator - a string to insert between excerpts chunks, default is " ... "
=item limit - max excerpt size in symbols (codepoints), default is 256
=item around - how many words to highlight around each match, default is 5
=item exact_phrase - whether to highlight exact phrase matches only, default is false
=item single_passage - whether to extract single best passage only, default is false
=item use_boundaries
-=item weight_order
+=item weight_order
=back
=back
Returns undef on failure.
Returns an array ref of string excerpts on success.
=cut
sub BuildExcerpts {
my ($self, $docs, $index, $words, $opts) = @_;
$opts ||= {};
- croak("BuildExcepts() called with incorrect parameters")
- unless (ref($docs) eq 'ARRAY'
- && defined($index)
- && defined($words)
+ croak("BuildExcepts() called with incorrect parameters")
+ unless (ref($docs) eq 'ARRAY'
+ && defined($index)
+ && defined($words)
&& ref($opts) eq 'HASH');
my $fp = $self->_Connect() or return;
##################
# fixup options
##################
$opts->{"before_match"} ||= "<b>";
$opts->{"after_match"} ||= "</b>";
$opts->{"chunk_separator"} ||= " ... ";
$opts->{"limit"} ||= 256;
$opts->{"around"} ||= 5;
$opts->{"exact_phrase"} ||= 0;
$opts->{"single_passage"} ||= 0;
$opts->{"use_boundaries"} ||= 0;
$opts->{"weight_order"} ||= 0;
##################
# build request
##################
# v.1.0 req
my $req;
my $flags = 1; # remove spaces
$flags |= 2 if ( $opts->{"exact_phrase"} );
$flags |= 4 if ( $opts->{"single_passage"} );
$flags |= 8 if ( $opts->{"use_boundaries"} );
$flags |= 16 if ( $opts->{"weight_order"} );
$req = pack ( "NN", 0, $flags ); # mode=0, flags=$flags
$req .= pack ( "N/a*", $index ); # req index
$req .= pack ( "N/a*", $self->{_string_encoder}->($words)); # req words
# options
$req .= pack ( "N/a*", $opts->{"before_match"});
$req .= pack ( "N/a*", $opts->{"after_match"});
$req .= pack ( "N/a*", $opts->{"chunk_separator"});
$req .= pack ( "N", int($opts->{"limit"}) );
$req .= pack ( "N", int($opts->{"around"}) );
# documents
$req .= pack ( "N", scalar(@$docs) );
foreach my $doc (@$docs) {
croak('BuildExcerpts: Found empty document in $docs') unless ($doc);
$req .= pack("N/a*", $self->{_string_encoder}->($doc));
}
##########################
# send query, get response
##########################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, $req); # add header
$self->_Send($fp, $req);
-
+
my $response = $self->_GetResponse($fp, VER_COMMAND_EXCERPT);
return unless $response;
my ($pos, $i) = 0;
my $res = []; # Empty hash ref
my $rlen = length($response);
for ( $i=0; $i< scalar(@$docs); $i++ ) {
my $len = unpack ( "N*", substr ( $response, $pos, 4 ) );
$pos += 4;
if ( $pos+$len > $rlen ) {
$self->_Error("incomplete reply");
return;
}
push(@$res, $self->{_string_decoder}->( substr ( $response, $pos, $len ) ));
$pos += $len;
}
return $res;
}
=head2 BuildKeywords
$results = $sph->BuildKeywords($query, $index, $hits)
Generate keyword list for a given query
Returns undef on failure,
Returns an array of hashes, where each hash describes a word in the query with the following keys:
=over 4
-=item * tokenized
+=item * tokenized
Tokenised term from query
-=item * normalized
+=item * normalized
Normalised term from query
-=item * docs
+=item * docs
Number of docs in which word was found (if $hits is true)
-=item * hits
+=item * hits
Number of occurrences of word (if $hits is true)
=back
=cut
sub BuildKeywords {
my ( $self, $query, $index, $hits ) = @_;
my $fp = $self->_Connect() or return;
# v.1.0 req
my $req = pack("N/a*", $self->{_string_encoder}->($query) );
$req .= pack("N/a*", $index);
$req .= pack("N", $self->{_string_encoder}->($hits) );
##################
# send query, get response
##################
$req = pack ( "nnN/a*", SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, $req);
$self->_Send($fp, $req);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_KEYWORDS );
return unless $response;
##################
# parse response
##################
my $p = 0;
my @res;
my $rlen = length($response);
my $nwords = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
for (my $i=0; $i < $nwords; $i++ ) {
my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
$len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
my %data = ( tokenized => $tokenized, normalized => $normalized );
-
+
if ($hits) {
( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
$p += 8;
-
+
}
push(@res, \%data);
}
if ( $p > $rlen ) {
$self->_Error("incomplete reply");
return;
}
return \@res;
}
=head2 EscapeString
$escaped = $sph->EscapeString('abcde!@#$%')
Inserts backslash before all non-word characters in the given string.
=cut
sub EscapeString {
my $self = shift;
return quotemeta(shift);
}
=head2 UpdateAttributes
$sph->UpdateAttributes($index, \@attrs, \%values);
$sph->UpdateAttributes($index, \@attrs, \%values, $mva);
Update specified attributes on specified documents
=over 4
-=item index
+=item index
Name of the index to be updated
-=item attrs
+=item attrs
Array of attribute name strings
-=item values
+=item values
A hash with key as document id, value as an array of new attribute values
=back
Returns number of actually updated documents (0 or more) on success
Returns undef on failure
Usage example:
$sph->UpdateAttributes("test1", [ qw/group_id/ ], { 1 => [ 456] }) );
=cut
sub UpdateAttributes {
my ($self, $index, $attrs, $values, $mva ) = @_;
croak("index is not defined") unless (defined $index);
croak("attrs must be an array") unless ref($attrs) eq "ARRAY";
for my $attr (@$attrs) {
croak("attribute is not defined") unless (defined $attr);
}
croak("values must be a hashref") unless ref($values) eq "HASH";
for my $id (keys %$values) {
my $entry = $values->{$id};
croak("value id $id is not numeric") unless ($id =~ /$num_re/);
croak("value entry must be an array") unless ref($entry) eq "ARRAY";
croak("size of values must match size of attrs") unless @$entry == @$attrs;
for my $v (@$entry) {
if ($mva) {
croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
for my $vv (@$v) {
croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
}
}
- else {
+ else {
croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
}
}
}
## build request
my $req = pack ( "N/a*", $index);
$req .= pack ( "N", scalar @$attrs );
for my $attr (@$attrs) {
$req .= pack ( "N/a*", $attr)
. pack("N", $mva ? 1 : 0);
}
$req .= pack ( "N", scalar keys %$values );
foreach my $id (keys %$values) {
my $entry = $values->{$id};
$req .= $self->_sphPackU64($id);
if ($mva) {
for my $v ( @$entry ) {
$req .= pack ( "N", @$v );
for my $vv (@$v) {
$req .= pack ("N", $vv);
}
}
}
else {
for my $v ( @$entry ) {
$req .= pack ( "N", $v );
}
}
}
## connect, send query, get response
my $fp = $self->_Connect() or return;
$req = pack ( "nnN/a*", SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, $req); ## add header
send ( $fp, $req, 0);
my $response = $self->_GetResponse ( $fp, VER_COMMAND_UPDATE );
return unless $response;
## parse response
my ($updated) = unpack ( "N*", substr ( $response, 0, 4 ) );
return $updated;
}
=head2 Open
$sph->Open()
-Opens a persistent connection for subsequent queries.
+Opens a persistent connection for subsequent queries.
To reduce the network connection overhead of making Sphinx queries, you can call
$sph->Open(), then run any number of queries, and call $sph->Close() when
finished.
Returns 1 on success, 0 on failure.
-=cut
+=cut
sub Open {
my $self = shift;
if ($self->{_socket}) {
$self->_Error("already connected");
return 0;
}
my $fp = $self->_Connect() or return 0;
my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
$self->_Send($fp, $req) or return 0;
$self->{_socket} = $fp;
return 1;
}
=head2 Close
$sph->Close()
Closes a persistent connection.
Returns 1 on success, 0 on failure.
-=cut
+=cut
sub Close {
my $self = shift;
if (! $self->{_socket}) {
$self->_Error("not connected");
return 0;
}
-
+
close($self->{_socket});
$self->{_socket} = undef;
return 1;
}
=head2 Status
$status = $sph->Status()
-Queries searchd status, and returns a hash of status variable name and value pairs.
+Queries searchd status, and returns a hash of status variable name and value pairs.
Returns undef on failure.
=cut
sub Status {
-
+
my $self = shift;
my $fp = $self->_Connect() or return;
-
+
my $req = pack("nnNN", SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 ); # len=4, body=1
$self->_Send($fp, $req) or return;
my $response = $self->_GetResponse ( $fp, VER_COMMAND_STATUS );
return unless $response;
my $p = 0;
my ($rows, $cols) = unpack("N*N*", substr ( $response, $p, 8 ) ); $p += 8;
return {} unless $rows && $cols;
my %res;
for (1 .. $rows ) {
my @entry;
for ( 1 .. $cols) {
my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
}
if ($cols <= 2) {
$res{$entry[0]} = $entry[1];
}
else {
my $name = shift @entry;
$res{$name} = \@entry;
}
}
return \%res;
}
-
+
=head1 SEE ALSO
L<http://www.sphinxsearch.com>
=head1 NOTES
There is (or was) a bundled Sphinx.pm in the contrib area of the Sphinx source
distribution, which was used as the starting point of Sphinx::Search.
Maintenance of that version appears to have lapsed at sphinx-0.9.7, so many of
the newer API calls are not available there. Sphinx::Search is mostly
compatible with the old Sphinx.pm except:
=over 4
=item On failure, Sphinx::Search returns undef rather than 0 or -1.
=item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
Sphinx::Search->new
->SetMatchMode(SPH_MATCH_ALL)
->SetSortMode(SPH_SORT_RELEVANCE)
->Query("search terms")
=back
Sphinx::Search also provides documentation and unit tests, which were the main
motivations for branching from the earlier work.
=head1 AUTHOR
Jon Schutz
=head1 BUGS
Please report any bugs or feature requests to
C<bug-sphinx-search at rt.cpan.org>, or through the web interface at
L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Sphinx-Search>.
I will be notified, and then you'll automatically be notified of progress on
your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Sphinx::Search
You can also look for information at:
=over 4
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Sphinx-Search>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Sphinx-Search>
=item * RT: CPAN's request tracker
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Sphinx-Search>
=item * Search CPAN
L<http://search.cpan.org/dist/Sphinx-Search>
=back
=head1 ACKNOWLEDGEMENTS
This module is based on Sphinx.pm (not deployed to CPAN) for Sphinx version
0.9.7-rc1, by Len Kranendonk, which was in turn based on the Sphinx PHP API.
=head1 COPYRIGHT & LICENSE
Copyright 2007 Jon Schutz, all rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License.
=cut
1;
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index f61d13e..4edc4b8 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,321 +1,383 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((%host
:accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(%port
:accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(%path
:accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(%socket
:accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters; a list of hashes")
(group-by
:accessor group-by
:initarg :group-by
:initform ""
:documentation "group-by attribute name")
(group-function
:accessor group-function
:initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(group-sort
:accessor group-sort
:initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(group-distinct
:accessor group-distinct
:initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
(max-matches
:accessor max-matches
:initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
- :initform ()
+ :initform 0
:documentation "cutoff to stop searching at")
(retry-count
:accessor retry-count
:initarg :retry-count
:initform 0
:documentation "distributed retry count")
(retry-delay
:accessor retry-delay
:initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
(index-weights
:accessor index-weights
:initarg :index-weights
- :initform ()
+ :initform (make-hash-table)
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(max-query-time
:accessor max-query-time
:initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(field-weights
:accessor field-weights
:initarg :field-weights
- :initform ()
+ :initform (make-hash-table)
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
- :initform ()
+ :initform (make-hash-table)
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (%path client) host)
(setf (%host client) ())
(setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (%path client) (subseq host 6 (length host)))
(setf (%host client) ())
(setf (%port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
(setf (%host client) host)
(setf (%port client) port)
(setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((%socket client))
((%path client)
(setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (%path client)))))
(t
(setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (%host client)
:remote-port (%port client)))))
(let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
(close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (%socket client)
(string-to-octets (pack "N" 1) :encoding :utf-8))
(format t "~a~%" v)
(%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "~a~%" rec)
(let ((res
(octets-to-string
(coerce rec
'(vector (unsigned-byte 8)))
:encoding :utf-8)))
(format t "res: ~a~%" res)
res)))
-(defmethod %get-response ((client sphinx-client) &key client-version)
- (multiple-value-bind (status version len) (unpack "n2N" (read-from (%socket client) 8))
+(defmethod %get-response ((client sphinx-client) &key fp client-version)
+ (multiple-value-bind (status version len) (unpack "n2N" (read-from fp 8))
(format t "~a : ~a : ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (< left 0)
(return))
- (let ((chunk (read-from (%socket client) left)))
+ (let ((chunk (read-from fp left)))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(- left (length chunk)))
(return))))
(let ((done (length response)))
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
+(defmethod run-queries ((client sphinx-client))
+ (assert (> (length (reqs client)) 0))
+ (let* ((requests (pack "Na*" (length (reqs client)) (reqs client)))
+ (data (pack "nnN/a*" +searchd-command-search+ +ver-command-search+ requests)))
+ (setf (reqs client) ())
+ (let ((fp (%connect client)))
+ (when fp
+ (%send client :fp fp :data data)
+ (let ((response (%get-response client :fp fp :client-version +ver-command-search+)))
+ (format t "~a~%" response))))))
+
+
+(defmethod %send ((client sphinx-client) &key fp data)
+ (format t "Writing to socket ~a~%" fp)
+ (sockets:send-to fp (string-to-octets data :encoding :utf-8)))
+
+
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
(let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
(pack "N/a*" query)
(pack "N*" (length (weights client)) (weights client))
(pack "N/a*" index)
(pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
(pack "N" (length (filters client)))
- (map #'(lambda (filter)
- (concatenate 'string
- (pack "N/a*" (gethash 'attr filter))
- (let ((type (gethash 'type filter)))
- (concatenate 'string
- (pack "N" type)
- (cond ((eql type +sph-filter-values+)
- (pack-array-signed-quads (get-hash 'values filter)))
- ((eql type +sph-filter-range+)
- (concatenate 'string (pack "q>" (get-hash 'min filter))
- (pack "q>" (get-hash 'max filter))))
- ((eql type +sph-filter-floatrange+)
- (concatenate 'string (pack-float (get-hash 'min filter))
- (pack-float (get-hash 'max filter))))
- (t
- (error "Unhandled filter type ~S" type)))
- (pack "N" (get-hash 'exclude filter))))))
- (filters client))
+ (%pack-filters (filters client))
(pack "NN/a*" (group-function client) (group-by client))
(pack "N" (max-matches client))
(pack "N/a*" (group-sort client))
(pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
(pack "N/a*" (group-distinct client))
(cond ((anchor client)
(concatenate 'string
(pack "N/a*" (first (anchor client)))
(pack "N/a*" (third (anchor client)))
- (pack-float (second (anchor client)))
- (pack-float (last (anchor client)))))
+ (%pack-float (second (anchor client)))
+ (%pack-float (last (anchor client)))))
(t
(pack "N" 0)))
+ (%pack-hash (index-weights client))
+ (pack "N" (max-query-time client))
+ (%pack-hash (field-weights client))
+ (pack "N/a*" comment)
+ (pack "N" (hash-table-count (overrides client)))
+ (%pack-overrides (overrides client))
+ (pack "N/a*" (if (select client)
+ (select client)
+ "")))))
+ (format t "req is: ~a~%" req)
+ (setf (reqs client) (append (reqs client) (list req))))
+ (length (reqs client)))
+
+
+(defun %pack-overrides (overrides)
+ (when (hash-table-p overrides)
+ (maphash #'(lambda (k entry)
+ (concatenate 'string
+ (pack "N/a*" (get-hash 'attr entry))
+ (pack "NN" (get-hash 'type entry) (hash-table-count (get-hash 'values entry)))
+ (maphash #'(lambda (id v)
+ (concatenate 'string
+ (assert (and (numberp id) (numberp v)))
+ (pack "Q>" id)
+ (cond ((eql (get-hash 'type entry) +sph-attr-float+)
+ (%pack-float v))
+ ((eql (get-hash 'type entry) +sph-attr-bigint+)
+ (pack "q>" v))
+ (t
+ (pack "N" v)))))
+ (get-hash 'values entry))))
+ overrides)))
+
+(defun %pack-filters (filters)
+ (map 'string #'(lambda (filter)
+ (when (hash-table-p filter)
+ (concatenate 'string
+ (pack "N/a*" (gethash 'attr filter))
+ (let ((type (gethash 'type filter)))
+ (concatenate 'string
+ (pack "N" type)
+ (cond ((eql type +sph-filter-values+)
+ (%pack-array-signed-quads (get-hash 'values filter)))
+ ((eql type +sph-filter-range+)
+ (concatenate 'string (pack "q>" (get-hash 'min filter))
+ (pack "q>" (get-hash 'max filter))))
+ ((eql type +sph-filter-floatrange+)
+ (concatenate 'string (%pack-float (get-hash 'min filter))
+ (%pack-float (get-hash 'max filter))))
+ (t
+ (error "Unhandled filter type ~S" type)))
+ (pack "N" (get-hash 'exclude filter)))))))
+ filters))
-(defun pack-array-signed-quads (values-list)
+(defun %pack-hash (hash-table)
+ (when (hash-table-count hash-table)
+ (concatenate 'string
+ (pack "N" (hash-table-count hash-table))
+ (maphash #'(lambda (k v)
+ (pack "N/a*N" k v))
+ hash-table))))
+
+
+(defun %pack-array-signed-quads (values-list)
(concatenate 'string
(pack "N" (length values-list))
(map #'(lambda (value)
(pack "q>" value)) values-list)))
-(defun pack-float (float-value)
+(defun %pack-float (float-value)
(pack "N" (unpack "L*" (pack "f" float-value))))
|
thijs/cl-sphinx-search
|
728659a770ae90427b0f3a7a501272b27261d298
|
Working on add-query
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 1465acc..f61d13e 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,275 +1,321 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
- ((sphinx-host
- :accessor sphinx-host
+ ((%host
+ :accessor %host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
- (sphinx-port
- :accessor sphinx-port
+ (%port
+ :accessor %port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
- (sphinx-path
- :accessor sphinx-path
+ (%path
+ :accessor %path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
- (sphinx-socket
- :accessor sphinx-socket
+ (%socket
+ :accessor %socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
- :documentation "search filters")
- (groupby
- :accessor groupby
- :initarg :groupby
+ :documentation "search filters; a list of hashes")
+ (group-by
+ :accessor group-by
+ :initarg :group-by
:initform ""
:documentation "group-by attribute name")
- (groupfunc
- :accessor groupfunc
- :initarg :groupfunc
+ (group-function
+ :accessor group-function
+ :initarg :group-function
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
- (groupsort
- :accessor groupsort
- :initarg :groupsort
+ (group-sort
+ :accessor group-sort
+ :initarg :group-sort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
- (groupdistinct
- :accessor groupdistinct
- :initarg :groupdistinct
+ (group-distinct
+ :accessor group-distinct
+ :initarg :group-distinct
:initform ""
:documentation "group-by count-distinct attribute")
- (maxmatches
- :accessor maxmatches
- :initarg :maxmatches
+ (max-matches
+ :accessor max-matches
+ :initarg :max-matches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
- (retrycount
- :accessor retrycount
- :initarg :retrycount
+ (retry-count
+ :accessor retry-count
+ :initarg :retry-count
:initform 0
:documentation "distributed retry count")
- (retrydelay
- :accessor retrydelay
- :initarg :retrydelay
+ (retry-delay
+ :accessor retry-delay
+ :initarg :retry-delay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
- :documentation "geographical anchor point")
- (indexweights
- :accessor indexweights
- :initarg :indexweights
+ :documentation "geographical anchor point; fixed length list with '(attrlat lat attrlon lon)")
+ (index-weights
+ :accessor index-weights
+ :initarg :index-weights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
- (maxquerytime
- :accessor maxquerytime
- :initarg :maxquerytime
+ (max-query-time
+ :accessor max-query-time
+ :initarg :max-query-time
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
- (fieldweights
- :accessor fieldweights
- :initarg :fieldweights
+ (field-weights
+ :accessor field-weights
+ :initarg :field-weights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
- (setf (sphinx-path client) host)
- (setf (sphinx-host client) ())
- (setf (sphinx-port client) ()))
+ (setf (%path client) host)
+ (setf (%host client) ())
+ (setf (%port client) ()))
((string= host "unix://" :start1 0 :end1 7)
- (setf (sphinx-path client) (subseq host 6 (length host)))
- (setf (sphinx-host client) ())
- (setf (sphinx-port client) ()))
+ (setf (%path client) (subseq host 6 (length host)))
+ (setf (%host client) ())
+ (setf (%port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
- (setf (sphinx-host client) host)
- (setf (sphinx-port client) port)
- (setf (sphinx-path client) ()))))
+ (setf (%host client) host)
+ (setf (%port client) port)
+ (setf (%path client) ()))))
(defmethod %connect ((client sphinx-client))
- (cond ((sphinx-socket client))
- ((sphinx-path client)
- (setf (sphinx-socket client)
+ (cond ((%socket client))
+ ((%path client)
+ (setf (%socket client)
(sockets:make-socket :address-family :local :type :stream
- :local-filename (namestring (sphinx-path client)))))
+ :local-filename (namestring (%path client)))))
(t
- (setf (sphinx-socket client)
+ (setf (%socket client)
(sockets:make-socket :address-family :internet :type :stream
- :remote-host (sphinx-host client)
- :remote-port (sphinx-port client)))))
- (let ((v (unpack "N*" (read-from (sphinx-socket client) 4))))
+ :remote-host (%host client)
+ :remote-port (%port client)))))
+ (let ((v (unpack "N*" (read-from (%socket client) 4))))
(if (< v 1)
(progn
- (close (sphinx-socket client))
+ (close (%socket client))
(setf (last-error client) "connection to socket failed"))
(progn
- (sockets:send-to (sphinx-socket client)
- (string-to-octets (pack "N" 1) :external-format :utf-8))
+ (sockets:send-to (%socket client)
+ (string-to-octets (pack "N" 1) :encoding :utf-8))
(format t "~a~%" v)
- (sphinx-socket client)))))
+ (%socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "~a~%" rec)
(let ((res
(octets-to-string
(coerce rec
'(vector (unsigned-byte 8)))
- :external-format :utf-8)))
+ :encoding :utf-8)))
(format t "res: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
- (multiple-value-bind (status version len) (unpack "n2N" (read-from (sphinx-socket client) 8))
+ (multiple-value-bind (status version len) (unpack "n2N" (read-from (%socket client) 8))
(format t "~a : ~a : ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (< left 0)
(return))
- (let ((chunk (read-from (sphinx-socket client) left)))
+ (let ((chunk (read-from (%socket client) left)))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(- left (length chunk)))
(return))))
(let ((done (length response)))
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
- (setf (maxmatches client) max))
+ (setf (max-matches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
- (let ((req (concatenate 'string
+ (let ((req (concatenate 'string
(pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
(pack "N/a*" (sort-by client))
- ;;(pack "N/a*" (string-to-octets query))
- (pack "N*" (length (weights client)) (weights client)))))
- req))
+ (pack "N/a*" query)
+ (pack "N*" (length (weights client)) (weights client))
+ (pack "N/a*" index)
+ (pack "N" 1) (pack "Q>" (min-id client)) (pack "Q>" (max-id client))
+ (pack "N" (length (filters client)))
+ (map #'(lambda (filter)
+ (concatenate 'string
+ (pack "N/a*" (gethash 'attr filter))
+ (let ((type (gethash 'type filter)))
+ (concatenate 'string
+ (pack "N" type)
+ (cond ((eql type +sph-filter-values+)
+ (pack-array-signed-quads (get-hash 'values filter)))
+ ((eql type +sph-filter-range+)
+ (concatenate 'string (pack "q>" (get-hash 'min filter))
+ (pack "q>" (get-hash 'max filter))))
+ ((eql type +sph-filter-floatrange+)
+ (concatenate 'string (pack-float (get-hash 'min filter))
+ (pack-float (get-hash 'max filter))))
+ (t
+ (error "Unhandled filter type ~S" type)))
+ (pack "N" (get-hash 'exclude filter))))))
+ (filters client))
+ (pack "NN/a*" (group-function client) (group-by client))
+ (pack "N" (max-matches client))
+ (pack "N/a*" (group-sort client))
+ (pack "NNN" (cutoff client) (retry-count client) (retry-delay client))
+ (pack "N/a*" (group-distinct client))
+ (cond ((anchor client)
+ (concatenate 'string
+ (pack "N/a*" (first (anchor client)))
+ (pack "N/a*" (third (anchor client)))
+ (pack-float (second (anchor client)))
+ (pack-float (last (anchor client)))))
+ (t
+ (pack "N" 0)))
+
+
+
+
+(defun pack-array-signed-quads (values-list)
+ (concatenate 'string
+ (pack "N" (length values-list))
+ (map #'(lambda (value)
+ (pack "q>" value)) values-list)))
+
+(defun pack-float (float-value)
+ (pack "N" (unpack "L*" (pack "f" float-value))))
+
|
thijs/cl-sphinx-search
|
47968647919011e57caa7e85051312aad2a8fc5e
|
Use babel
|
diff --git a/PerlAPI.pm b/PerlAPI.pm
new file mode 100644
index 0000000..10d4ec5
--- /dev/null
+++ b/PerlAPI.pm
@@ -0,0 +1,2149 @@
+package Sphinx::Search;
+
+use warnings;
+use strict;
+
+use base 'Exporter';
+
+use Carp;
+use Socket;
+use Config;
+use Math::BigInt;
+use IO::Socket::INET;
+use IO::Socket::UNIX;
+use Encode qw/encode_utf8 decode_utf8/;
+
+my $is_native64 = $Config{longsize} == 8 || defined $Config{use64bitint} || defined $Config{use64bitall};
+
+
+=head1 NAME
+
+Sphinx::Search - Sphinx search engine API Perl client
+
+=head1 VERSION
+
+Please note that you *MUST* install a version which is compatible with your version of Sphinx.
+
+Use version 0.22 for Sphinx 0.9.9-rc2 and later (Please read the Compatibility Note under L<SetEncoders> regarding encoding changes)
+
+Use version 0.15 for Sphinx 0.9.9-svn-r1674
+
+Use version 0.12 for Sphinx 0.9.8
+
+Use version 0.11 for Sphinx 0.9.8-rc1
+
+Use version 0.10 for Sphinx 0.9.8-svn-r1112
+
+Use version 0.09 for Sphinx 0.9.8-svn-r985
+
+Use version 0.08 for Sphinx 0.9.8-svn-r871
+
+Use version 0.06 for Sphinx 0.9.8-svn-r820
+
+Use version 0.05 for Sphinx 0.9.8-cvs-20070907
+
+Use version 0.02 for Sphinx 0.9.8-cvs-20070818
+
+=cut
+
+our $VERSION = '0.22';
+
+=head1 SYNOPSIS
+
+ use Sphinx::Search;
+
+ $sphinx = Sphinx::Search->new();
+
+ $results = $sphinx->SetMatchMode(SPH_MATCH_ALL)
+ ->SetSortMode(SPH_SORT_RELEVANCE)
+ ->Query("search terms");
+
+=head1 DESCRIPTION
+
+This is the Perl API client for the Sphinx open-source SQL full-text indexing
+search engine, L<http://www.sphinxsearch.com>.
+
+=cut
+
+# Constants to export.
+our @EXPORT = qw(
+ SPH_MATCH_ALL SPH_MATCH_ANY SPH_MATCH_PHRASE SPH_MATCH_BOOLEAN SPH_MATCH_EXTENDED
+ SPH_MATCH_FULLSCAN SPH_MATCH_EXTENDED2
+ SPH_RANK_PROXIMITY_BM25 SPH_RANK_BM25 SPH_RANK_NONE SPH_RANK_WORDCOUNT
+ SPH_SORT_RELEVANCE SPH_SORT_ATTR_DESC SPH_SORT_ATTR_ASC SPH_SORT_TIME_SEGMENTS
+ SPH_SORT_EXTENDED SPH_SORT_EXPR
+ SPH_GROUPBY_DAY SPH_GROUPBY_WEEK SPH_GROUPBY_MONTH SPH_GROUPBY_YEAR SPH_GROUPBY_ATTR
+ SPH_GROUPBY_ATTRPAIR
+ );
+
+# known searchd commands
+use constant SEARCHD_COMMAND_SEARCH => 0;
+use constant SEARCHD_COMMAND_EXCERPT => 1;
+use constant SEARCHD_COMMAND_UPDATE => 2;
+use constant SEARCHD_COMMAND_KEYWORDS => 3;
+use constant SEARCHD_COMMAND_PERSIST => 4;
+use constant SEARCHD_COMMAND_STATUS => 5;
+
+# current client-side command implementation versions
+use constant VER_COMMAND_SEARCH => 0x116;
+use constant VER_COMMAND_EXCERPT => 0x100;
+use constant VER_COMMAND_UPDATE => 0x102;
+use constant VER_COMMAND_KEYWORDS => 0x100;
+use constant VER_COMMAND_STATUS => 0x100;
+
+# known searchd status codes
+use constant SEARCHD_OK => 0;
+use constant SEARCHD_ERROR => 1;
+use constant SEARCHD_RETRY => 2;
+use constant SEARCHD_WARNING => 3;
+
+# known match modes
+use constant SPH_MATCH_ALL => 0;
+use constant SPH_MATCH_ANY => 1;
+use constant SPH_MATCH_PHRASE => 2;
+use constant SPH_MATCH_BOOLEAN => 3;
+use constant SPH_MATCH_EXTENDED => 4;
+use constant SPH_MATCH_FULLSCAN => 5;
+use constant SPH_MATCH_EXTENDED2 => 6; # extended engine V2 (TEMPORARY, WILL BE REMOVED
+
+# known ranking modes (ext2 only)
+use constant SPH_RANK_PROXIMITY_BM25 => 0; # default mode, phrase proximity major factor and BM25 minor one
+use constant SPH_RANK_BM25 => 1; # statistical mode, BM25 ranking only (faster but worse quality)
+use constant SPH_RANK_NONE => 2; # no ranking, all matches get a weight of 1
+use constant SPH_RANK_WORDCOUNT => 3; # simple word-count weighting, rank is a weighted sum of per-field keyword occurence counts
+use constant SPH_RANK_PROXIMITY => 4;
+use constant SPH_RANK_MATCHANY => 5;
+
+# known sort modes
+use constant SPH_SORT_RELEVANCE => 0;
+use constant SPH_SORT_ATTR_DESC => 1;
+use constant SPH_SORT_ATTR_ASC => 2;
+use constant SPH_SORT_TIME_SEGMENTS => 3;
+use constant SPH_SORT_EXTENDED => 4;
+use constant SPH_SORT_EXPR => 5;
+
+# known filter types
+use constant SPH_FILTER_VALUES => 0;
+use constant SPH_FILTER_RANGE => 1;
+use constant SPH_FILTER_FLOATRANGE => 2;
+
+# known attribute types
+use constant SPH_ATTR_INTEGER => 1;
+use constant SPH_ATTR_TIMESTAMP => 2;
+use constant SPH_ATTR_ORDINAL => 3;
+use constant SPH_ATTR_BOOL => 4;
+use constant SPH_ATTR_FLOAT => 5;
+use constant SPH_ATTR_BIGINT => 6;
+use constant SPH_ATTR_MULTI => 0x40000000;
+
+# known grouping functions
+use constant SPH_GROUPBY_DAY => 0;
+use constant SPH_GROUPBY_WEEK => 1;
+use constant SPH_GROUPBY_MONTH => 2;
+use constant SPH_GROUPBY_YEAR => 3;
+use constant SPH_GROUPBY_ATTR => 4;
+use constant SPH_GROUPBY_ATTRPAIR => 5;
+
+# Floating point number matching expression
+my $num_re = qr/^-?\d*\.?\d*(?:[eE][+-]?\d+)?$/;
+
+# portably pack numeric to 64 signed bits, network order
+sub _sphPackI64 {
+ my $self = shift;
+ my $v = shift;
+
+ # x64 route
+ my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
+ return pack ( "NN", $i>>32, $i & 4294967295 );
+}
+
+# portably pack numeric to 64 unsigned bits, network order
+sub _sphPackU64 {
+ my $self = shift;
+ my $v = shift;
+
+ my $i = $is_native64 ? int($v) : Math::BigInt->new("$v");
+ return pack ( "NN", $i>>32, $i & 4294967295 );
+}
+
+sub _sphPackI64array {
+ my $self = shift;
+ my $values = shift || [];
+
+ my $s = pack("N", scalar @$values);
+ $s .= $self->_sphPackI64($_) for @$values;
+ return $s;
+}
+
+# portably unpack 64 unsigned bits, network order to numeric
+sub _sphUnpackU64
+{
+ my $self = shift;
+ my $v = shift;
+
+ my ($h,$l) = unpack ( "N*N*", $v );
+
+ # x64 route
+ return ($h<<32) + $l if $is_native64;
+
+ # x32 route, BigInt
+ $h = Math::BigInt->new($h);
+ $h->blsft(32)->badd($l);
+
+ return $h->bstr;
+}
+
+# portably unpack 64 signed bits, network order to numeric
+sub _sphUnpackI64
+{
+ my $self = shift;
+ my $v = shift;
+
+ my ($h,$l) = unpack ( "N*N*", $v );
+
+ my $neg = ($h & 0x80000000) ? 1 : 0;
+
+ # x64 route
+ if ( $is_native64 ) {
+ return -(~(($h<<32) + $l) + 1) if $neg;
+ return ($h<<32) + $l;
+ }
+
+ # x32 route, BigInt
+ if ($neg) {
+ $h = ~$h;
+ $l = ~$l;
+ }
+
+ my $x = Math::BigInt->new($h);
+ $x->blsft(32)->badd($l);
+ $x->binc()->bneg() if $neg;
+
+ return $x->bstr;
+}
+
+
+
+
+
+
+=head1 CONSTRUCTOR
+
+=head2 new
+
+ $sph = Sphinx::Search->new;
+ $sph = Sphinx::Search->new(\%options);
+
+Create a new Sphinx::Search instance.
+
+OPTIONS
+
+=over 4
+
+=item log
+
+Specify an optional logger instance. This can be any class that provides error,
+warn, info, and debug methods (e.g. see L<Log::Log4perl>). Logging is disabled
+if no logger instance is provided.
+
+=item debug
+
+Debug flag. If set (and a logger instance is specified), debugging messages
+will be generated.
+
+=back
+
+=cut
+
+# create a new client object and fill defaults
+sub new {
+ my ($class, $options) = @_;
+ my $self = {
+ # per=client-object settings
+ _host => 'localhost',
+ _port => 3312,
+ _path => undef,
+ _socket => undef,
+
+ # per-query settings
+ _offset => 0,
+ _limit => 20,
+ _mode => SPH_MATCH_ALL,
+ _weights => [],
+ _sort => SPH_SORT_RELEVANCE,
+ _sortby => "",
+ _min_id => 0,
+ _max_id => 0,
+ _filters => [],
+ _groupby => "",
+ _groupdistinct => "",
+ _groupfunc => SPH_GROUPBY_DAY,
+ _groupsort => '@group desc',
+ _maxmatches => 1000,
+ _cutoff => 0,
+ _retrycount => 0,
+ _retrydelay => 0,
+ _anchor => undef,
+ _indexweights => undef,
+ _ranker => SPH_RANK_PROXIMITY_BM25,
+ _maxquerytime => 0,
+ _fieldweights => {},
+ _overrides => {},
+ _select => q{*},
+
+ # per-reply fields (for single-query case)
+ _error => '',
+ _warning => '',
+ _connerror => '',
+
+ # request storage (for multi-query case)
+ _reqs => [],
+ _timeout => 0,
+
+ _string_encoder => \&encode_utf8,
+ _string_decoder => \&decode_utf8,
+ };
+ bless $self, ref($class) || $class;
+
+ # These options are supported in the constructor, but not recommended
+ # since there is no validation. Use the Set* methods instead.
+ my %legal_opts = map { $_ => 1 } qw/host port offset limit mode weights sort sortby groupby groupbyfunc maxmatches cutoff retrycount retrydelay log debug string_encoder string_decoder/;
+ for my $opt (keys %$options) {
+ $self->{'_' . $opt} = $options->{$opt} if $legal_opts{$opt};
+ }
+ # Disable debug unless we have something to log to
+ $self->{_debug} = 0 unless $self->{_log};
+
+ return $self;
+}
+
+
+=head1 METHODS
+
+=cut
+
+sub _Error {
+ my ($self, $msg) = @_;
+
+ $self->{_error} = $msg;
+ $self->{_log}->error($msg) if $self->{_log};
+}
+
+=head2 GetLastError
+
+ $error = $sph->GetLastError;
+
+Get last error message (string)
+
+=cut
+
+sub GetLastError {
+ my $self = shift;
+ return $self->{_error};
+}
+
+sub _Warning {
+ my ($self, $msg) = @_;
+
+ $self->{_warning} = $msg;
+ $self->{_log}->warn($msg) if $self->{_log};
+}
+
+=head2 GetLastWarning
+
+ $warning = $sph->GetLastWarning;
+
+Get last warning message (string)
+
+=cut
+
+sub GetLastWarning {
+ my $self = shift;
+ return $self->{_warning};
+}
+
+
+=head2 IsConnectError
+
+Check connection error flag (to differentiate between network connection errors
+and bad responses). Returns true value on connection error.
+
+=cut
+
+sub IsConnectError {
+ return shift->{_connerror};
+}
+
+=head2 SetEncoders
+
+ $sph->SetEncoders(\&encode_function, \&decode_function)
+
+COMPATIBILITY NOTE: SetEncoders() was introduced in version 0.17.
+Prior to that, all strings were considered to be sequences of bytes
+which may have led to issues with multi-byte characters. If you were
+previously encoding/decoding strings external to Sphinx::Search, you
+will need to disable encoding/decoding by setting Sphinx::Search to
+use raw values as explained below (or modify your code and let
+Sphinx::Search do the recoding).
+
+Set the string encoder/decoder functions for transferring strings
+between perl and Sphinx. The encoder should take the perl internal
+representation and convert to the bytestream that searchd expects, and
+the decoder should take the bytestream returned by searchd and convert to
+perl format.
+
+The searchd format will depend on the 'charset_type' index setting in
+the Sphinx configuration file.
+
+The coders default to encode_utf8 and decode_utf8 respectively, which
+are compatible with the 'utf8' charset_type.
+
+If either the encoder or decoder functions are left undefined in the
+call to SetEncoders, they return to their default values.
+
+If you wish to send raw values (no encoding/decoding), supply a
+function that simply returns its argument, e.g.
+
+ $sph->SetEncoders( sub { shift }, sub { shift });
+
+Returns $sph.
+
+=cut
+
+sub SetEncoders {
+ my $self = shift;
+ my $encoder = shift;
+ my $decoder = shift;
+
+ $self->{_string_encoder} = $encoder ? $encoder : \&encode_utf8;
+ $self->{_string_decoder} = $decoder ? $decoder : \&decode_utf8;
+
+ return $self;
+}
+
+=head2 SetServer
+
+ $sph->SetServer($host, $port);
+ $sph->SetServer($path, $port);
+
+In the first form, sets the host (string) and port (integer) details for the
+searchd server using a network (INET) socket.
+
+In the second form, where $path is a local filesystem path (optionally prefixed
+by 'unix://'), sets the client to access the searchd server via a local (UNIX
+domain) socket at the specified path.
+
+Returns $sph.
+
+=cut
+
+sub SetServer {
+ my $self = shift;
+ my $host = shift;
+ my $port = shift;
+
+ croak("host is not defined") unless defined($host);
+ $self->{_path} = $host, return if substr($host, 0, 1) eq '/';
+ $self->{_path} = substr($host, 7), return if substr($host, 0, 7) eq 'unix://';
+
+ croak("port is not an integer") unless defined($port) && $port =~ m/^\d+$/o;
+
+ $self->{_host} = $host;
+ $self->{_port} = $port;
+ $self->{_path} = undef;
+
+ return $self;
+}
+
+=head2 SetConnectTimeout
+
+ $sph->SetConnectTimeout($timeout)
+
+Set server connection timeout (in seconds).
+
+Returns $sph.
+
+=cut
+
+sub SetConnectTimeout {
+ my $self = shift;
+ my $timeout = shift;
+
+ croak("timeout is not numeric") unless ($timeout =~ m/$num_re/);
+ $self->{_timeout} = $timeout;
+}
+
+sub _Send {
+ my $self = shift;
+ my $fp = shift;
+ my $data = shift;
+
+ $self->{_log}->debug("Writing to socket") if $self->{_debug};
+ $fp->write($data); return 1;
+ if ($fp->eof || ! $fp->write($data)) {
+ $self->_Error("connection unexpectedly closed (timed out?): $!");
+ $self->{_connerror} = 1;
+ return 0;
+ }
+ return 1;
+}
+
+# connect to searchd server
+
+sub _Connect {
+ my $self = shift;
+
+ return $self->{_socket} if $self->{_socket};
+
+ my $debug = $self->{_debug};
+ my $str_dest = $self->{_path} ? 'unix://' . $self->{_path} : "$self->{_host}:$self->{_port}";
+ $self->{_log}->debug("Connecting to $str_dest") if $debug;
+
+ # connect socket
+ $self->{_connerror} = q{};
+
+ my $fp;
+ my %params = (); # ( Blocking => 0 );
+ $params{Timeout} = $self->{_timeout} if $self->{_timeout};
+ if ($self->{_path}) {
+ $fp = IO::Socket::UNIX->new( Peer => $self->{_path},
+ %params,
+ );
+ }
+ else {
+ $fp = IO::Socket::INET->new( PeerPort => $self->{_port},
+ PeerAddr => $self->{_host},
+ Proto => 'tcp',
+ %params,
+ );
+ }
+ if (! $fp) {
+ $self->_Error("Failed to open connection to $str_dest: $!");
+ $self->{_connerror} = 1;
+ return 0;
+ }
+ binmode($fp, ':bytes');
+
+ # check version
+ my $buf = '';
+ $fp->read($buf, 4) or do {
+ $self->_Error("Failed on initial read from $str_dest: $!");
+ $self->{_connerror} = 1;
+ return 0;
+ };
+ my $v = unpack("N*", $buf);
+ $v = int($v);
+ $self->{_log}->debug("Got version $v from searchd") if $debug;
+ if ($v < 1) {
+ close($fp);
+ $self->_Error("expected searchd protocol version 1+, got version '$v'");
+ return 0;
+ }
+
+ $self->{_log}->debug("Sending version") if $debug;
+
+ # All ok, send my version
+ $self->_Send($fp, pack("N", 1)) or return 0;
+
+ $self->{_log}->debug("Connection complete") if $debug;
+
+ return $fp;
+}
+
+#-------------------------------------------------------------
+
+# get and check response packet from searchd server
+sub _GetResponse {
+ my $self = shift;
+ my $fp = shift;
+ my $client_ver = shift;
+
+ my $header;
+ defined($fp->read($header, 8, 0)) or do {
+ $self->_Error("read failed: $!");
+ return 0;
+ };
+
+ my ($status, $ver, $len ) = unpack("n2N", $header);
+ my $response = q{};
+ my $lasterror = q{};
+ my $lentotal = 0;
+ while (my $rlen = $fp->read(my $chunk, $len)) {
+ $lasterror = $!, last if $rlen < 0;
+ $response .= $chunk;
+ $lentotal += $rlen;
+ last if $lentotal >= $len;
+ }
+ close($fp) unless $self->{_socket};
+
+ # check response
+ if ( length($response) != $len ) {
+ $self->_Error( $len
+ ? "failed to read searchd response (status=$status, ver=$ver, len=$len, read=". length($response) . ", last error=$lasterror)"
+ : "received zero-sized searchd response");
+ return 0;
+ }
+
+ # check status
+ if ( $status==SEARCHD_WARNING ) {
+ my ($wlen) = unpack ( "N*", substr ( $response, 0, 4 ) );
+ $self->_Warning(substr ( $response, 4, $wlen ));
+ return substr ( $response, 4+$wlen );
+ }
+ if ( $status==SEARCHD_ERROR ) {
+ $self->_Error("searchd error: " . substr ( $response, 4 ));
+ return 0;
+ }
+ if ( $status==SEARCHD_RETRY ) {
+ $self->_Error("temporary searchd error: " . substr ( $response, 4 ));
+ return 0;
+ }
+ if ( $status!=SEARCHD_OK ) {
+ $self->_Error("unknown status code '$status'");
+ return 0;
+ }
+
+ # check version
+ if ( $ver<$client_ver ) {
+ $self->_Warning(sprintf ( "searchd command v.%d.%d older than client's v.%d.%d, some options might not work",
+ $ver>>8, $ver&0xff, $client_ver>>8, $client_ver&0xff ));
+ }
+ return $response;
+}
+
+=head2 SetLimits
+
+ $sph->SetLimits($offset, $limit);
+ $sph->SetLimits($offset, $limit, $max);
+
+Set match offset/limits, and optionally the max number of matches to return.
+
+Returns $sph.
+
+=cut
+
+sub SetLimits {
+ my $self = shift;
+ my $offset = shift;
+ my $limit = shift;
+ my $max = shift || 0;
+ croak("offset should be an integer >= 0") unless ($offset =~ /^\d+$/ && $offset >= 0) ;
+ croak("limit should be an integer >= 0") unless ($limit =~ /^\d+$/ && $limit >= 0);
+ $self->{_offset} = $offset;
+ $self->{_limit} = $limit;
+ if($max > 0) {
+ $self->{_maxmatches} = $max;
+ }
+ return $self;
+}
+
+=head2 SetMaxQueryTime
+
+ $sph->SetMaxQueryTime($millisec);
+
+Set maximum query time, in milliseconds, per index.
+
+The value may not be negative; 0 means "do not limit".
+
+Returns $sph.
+
+=cut
+
+sub SetMaxQueryTime {
+ my $self = shift;
+ my $max = shift;
+
+ croak("max value should be an integer >= 0") unless ($max =~ /^\d+$/ && $max >= 0) ;
+ $self->{_maxquerytime} = $max;
+ return $self;
+}
+
+
+=head2 SetMatchMode
+
+ $sph->SetMatchMode($mode);
+
+Set match mode, which may be one of:
+
+=over 4
+
+=item * SPH_MATCH_ALL
+
+Match all words
+
+=item * SPH_MATCH_ANY
+
+Match any words
+
+=item * SPH_MATCH_PHRASE
+
+Exact phrase match
+
+=item * SPH_MATCH_BOOLEAN
+
+Boolean match, using AND (&), OR (|), NOT (!,-) and parenthetic grouping.
+
+=item * SPH_MATCH_EXTENDED
+
+Extended match, which includes the Boolean syntax plus field, phrase and
+proximity operators.
+
+=back
+
+Returns $sph.
+
+=cut
+
+sub SetMatchMode {
+ my $self = shift;
+ my $mode = shift;
+ croak("Match mode not defined") unless defined($mode);
+ croak("Unknown matchmode: $mode") unless ( $mode==SPH_MATCH_ALL
+ || $mode==SPH_MATCH_ANY
+ || $mode==SPH_MATCH_PHRASE
+ || $mode==SPH_MATCH_BOOLEAN
+ || $mode==SPH_MATCH_EXTENDED
+ || $mode==SPH_MATCH_FULLSCAN
+ || $mode==SPH_MATCH_EXTENDED2 );
+ $self->{_mode} = $mode;
+ return $self;
+}
+
+
+=head2 SetRankingMode
+
+ $sph->SetRankingMode(SPH_RANK_BM25);
+
+Set ranking mode, which may be one of:
+
+=over 4
+
+=item * SPH_RANK_PROXIMITY_BM25
+
+Default mode, phrase proximity major factor and BM25 minor one
+
+=item * SPH_RANK_BM25
+
+Statistical mode, BM25 ranking only (faster but worse quality)
+
+=item * SPH_RANK_NONE
+
+No ranking, all matches get a weight of 1
+
+=item * SPH_RANK_WORDCOUNT
+
+Simple word-count weighting, rank is a weighted sum of per-field keyword
+occurence counts
+
+=back
+
+Returns $sph.
+
+=cut
+
+sub SetRankingMode {
+ my $self = shift;
+ my $ranker = shift;
+
+ croak("Unknown ranking mode: $ranker") unless ( $ranker==SPH_RANK_PROXIMITY_BM25
+ || $ranker==SPH_RANK_BM25
+ || $ranker==SPH_RANK_NONE
+ || $ranker==SPH_RANK_WORDCOUNT
+ || $ranker==SPH_RANK_PROXIMITY );
+
+ $self->{_ranker} = $ranker;
+ return $self;
+}
+
+
+=head2 SetSortMode
+
+ $sph->SetSortMode(SPH_SORT_RELEVANCE);
+ $sph->SetSortMode($mode, $sortby);
+
+Set sort mode, which may be any of:
+
+=over 4
+
+=item SPH_SORT_RELEVANCE - sort by relevance
+
+=item SPH_SORT_ATTR_DESC, SPH_SORT_ATTR_ASC
+
+Sort by attribute descending/ascending. $sortby specifies the sorting attribute.
+
+=item SPH_SORT_TIME_SEGMENTS
+
+Sort by time segments (last hour/day/week/month) in descending order, and then
+by relevance in descending order. $sortby specifies the time attribute.
+
+=item SPH_SORT_EXTENDED
+
+Sort by SQL-like syntax. $sortby is the sorting specification.
+
+=item SPH_SORT_EXPR
+
+
+=back
+
+Returns $sph.
+
+=cut
+
+sub SetSortMode {
+ my $self = shift;
+ my $mode = shift;
+ my $sortby = shift || "";
+ croak("Sort mode not defined") unless defined($mode);
+ croak("Unknown sort mode: $mode") unless ( $mode == SPH_SORT_RELEVANCE
+ || $mode == SPH_SORT_ATTR_DESC
+ || $mode == SPH_SORT_ATTR_ASC
+ || $mode == SPH_SORT_TIME_SEGMENTS
+ || $mode == SPH_SORT_EXTENDED
+ || $mode == SPH_SORT_EXPR
+ );
+ croak("Sortby must be defined") unless ($mode==SPH_SORT_RELEVANCE || length($sortby));
+ $self->{_sort} = $mode;
+ $self->{_sortby} = $sortby;
+ return $self;
+}
+
+=head2 SetWeights
+
+ $sph->SetWeights([ 1, 2, 3, 4]);
+
+This method is deprecated. Use L<SetFieldWeights> instead.
+
+Set per-field (integer) weights. The ordering of the weights correspond to the
+ordering of fields as indexed.
+
+Returns $sph.
+
+=cut
+
+sub SetWeights {
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not an array reference") unless (ref($weights) eq 'ARRAY');
+ foreach my $weight (@$weights) {
+ croak("Weight: $weight is not an integer") unless ($weight =~ /^\d+$/);
+ }
+ $self->{_weights} = $weights;
+ return $self;
+}
+
+=head2 SetFieldWeights
+
+ $sph->SetFieldWeights(\%weights);
+
+Set per-field (integer) weights by field name. The weights hash provides field
+name to weight mappings.
+
+Takes precedence over L<SetWeights>.
+
+Unknown names will be silently ignored. Missing fields will be given a weight of 1.
+
+Returns $sph.
+
+=cut
+
+sub SetFieldWeights {
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
+ foreach my $field (keys %$weights) {
+ croak("Weight: $weights->{$field} is not an integer >= 0") unless ($weights->{$field} =~ /^\d+$/);
+ }
+ $self->{_fieldweights} = $weights;
+ return $self;
+}
+
+=head2 SetIndexWeights
+
+ $sph->SetIndexWeights(\%weights);
+
+Set per-index (integer) weights. The weights hash is a mapping of index name to integer weight.
+
+Returns $sph.
+
+=cut
+
+sub SetIndexWeights {
+ my $self = shift;
+ my $weights = shift;
+ croak("Weights is not a hash reference") unless (ref($weights) eq 'HASH');
+ foreach (keys %$weights) {
+ croak("IndexWeight $_: $weights->{$_} is not an integer") unless ($weights->{$_} =~ /^\d+$/);
+ }
+ $self->{_indexweights} = $weights;
+ return $self;
+}
+
+
+
+=head2 SetIDRange
+
+ $sph->SetIDRange($min, $max);
+
+Set IDs range only match those records where document ID
+is between $min and $max (including $min and $max)
+
+Returns $sph.
+
+=cut
+
+sub SetIDRange {
+ my $self = shift;
+ my $min = shift;
+ my $max = shift;
+ croak("min_id is not numeric") unless ($min =~ m/$num_re/);
+ croak("max_id is not numeric") unless ($max =~ m/$num_re/);
+ croak("min_id is larger than or equal to max_id") unless ($min < $max);
+ $self->{_min_id} = $min;
+ $self->{_max_id} = $max;
+ return $self;
+}
+
+=head2 SetFilter
+
+ $sph->SetFilter($attr, \@values);
+ $sph->SetFilter($attr, \@values, $exclude);
+
+Sets the results to be filtered on the given attribute. Only results which have
+attributes matching the given (numeric) values will be returned.
+
+This may be called multiple times with different attributes to select on
+multiple attributes.
+
+If 'exclude' is set, excludes results that match the filter.
+
+Returns $sph.
+
+=cut
+
+sub SetFilter {
+ my ($self, $attribute, $values, $exclude) = @_;
+
+ croak("attribute is not defined") unless (defined $attribute);
+ croak("values is not an array reference") unless (ref($values) eq 'ARRAY');
+ croak("values reference is empty") unless (scalar(@$values));
+
+ foreach my $value (@$values) {
+ croak("value $value is not numeric") unless ($value =~ m/$num_re/);
+ }
+ push(@{$self->{_filters}}, {
+ type => SPH_FILTER_VALUES,
+ attr => $attribute,
+ values => $values,
+ exclude => $exclude ? 1 : 0,
+ });
+
+ return $self;
+}
+
+=head2 SetFilterRange
+
+ $sph->SetFilterRange($attr, $min, $max);
+ $sph->SetFilterRange($attr, $min, $max, $exclude);
+
+Sets the results to be filtered on a range of values for the given
+attribute. Only those records where $attr column value is between $min and $max
+(including $min and $max) will be returned.
+
+If 'exclude' is set, excludes results that fall within the given range.
+
+Returns $sph.
+
+=cut
+
+sub SetFilterRange {
+ my ($self, $attribute, $min, $max, $exclude) = @_;
+ croak("attribute is not defined") unless (defined $attribute);
+ croak("min: $min is not an integer") unless ($min =~ m/$num_re/);
+ croak("max: $max is not an integer") unless ($max =~ m/$num_re/);
+ croak("min value should be <= max") unless ($min <= $max);
+
+ push(@{$self->{_filters}}, {
+ type => SPH_FILTER_RANGE,
+ attr => $attribute,
+ min => $min,
+ max => $max,
+ exclude => $exclude ? 1 : 0,
+ });
+
+ return $self;
+}
+
+=head2 SetFilterFloatRange
+
+ $sph->SetFilterFloatRange($attr, $min, $max, $exclude);
+
+Same as L<SetFilterRange>, but allows floating point values.
+
+Returns $sph.
+
+=cut
+
+sub SetFilterFloatRange {
+ my ($self, $attribute, $min, $max, $exclude) = @_;
+ croak("attribute is not defined") unless (defined $attribute);
+ croak("min: $min is not numeric") unless ($min =~ m/$num_re/);
+ croak("max: $max is not numeric") unless ($max =~ m/$num_re/);
+ croak("min value should be <= max") unless ($min <= $max);
+
+ push(@{$self->{_filters}}, {
+ type => SPH_FILTER_FLOATRANGE,
+ attr => $attribute,
+ min => $min,
+ max => $max,
+ exclude => $exclude ? 1 : 0,
+ });
+
+ return $self;
+
+}
+
+=head2 SetGeoAnchor
+
+ $sph->SetGeoAnchor($attrlat, $attrlong, $lat, $long);
+
+Setup anchor point for using geosphere distance calculations in filters and sorting.
+Distance will be computed with respect to this point
+
+=over 4
+
+=item $attrlat is the name of latitude attribute
+
+=item $attrlong is the name of longitude attribute
+
+=item $lat is anchor point latitude, in radians
+
+=item $long is anchor point longitude, in radians
+
+=back
+
+Returns $sph.
+
+=cut
+
+sub SetGeoAnchor {
+ my ($self, $attrlat, $attrlong, $lat, $long) = @_;
+
+ croak("attrlat is not defined") unless defined $attrlat;
+ croak("attrlong is not defined") unless defined $attrlong;
+ croak("lat: $lat is not numeric") unless ($lat =~ m/$num_re/);
+ croak("long: $long is not numeric") unless ($long =~ m/$num_re/);
+
+ $self->{_anchor} = {
+ attrlat => $attrlat,
+ attrlong => $attrlong,
+ lat => $lat,
+ long => $long,
+ };
+ return $self;
+}
+
+=head2 SetGroupBy
+
+ $sph->SetGroupBy($attr, $func);
+ $sph->SetGroupBy($attr, $func, $groupsort);
+
+Sets attribute and function of results grouping.
+
+In grouping mode, all matches are assigned to different groups based on grouping
+function value. Each group keeps track of the total match count, and the best
+match (in this group) according to current sorting function. The final result
+set contains one best match per group, with grouping function value and matches
+count attached.
+
+$attr is any valid attribute. Use L<ResetGroupBy> to disable grouping.
+
+$func is one of:
+
+=over 4
+
+=item * SPH_GROUPBY_DAY
+
+Group by day (assumes timestamp type attribute of form YYYYMMDD)
+
+=item * SPH_GROUPBY_WEEK
+
+Group by week (assumes timestamp type attribute of form YYYYNNN)
+
+=item * SPH_GROUPBY_MONTH
+
+Group by month (assumes timestamp type attribute of form YYYYMM)
+
+=item * SPH_GROUPBY_YEAR
+
+Group by year (assumes timestamp type attribute of form YYYY)
+
+=item * SPH_GROUPBY_ATTR
+
+Group by attribute value
+
+=item * SPH_GROUPBY_ATTRPAIR
+
+Group by two attributes, being the given attribute and the attribute that
+immediately follows it in the sequence of indexed attributes. The specified
+attribute may therefore not be the last of the indexed attributes.
+
+=back
+
+Groups in the set of results can be sorted by any SQL-like sorting clause,
+including both document attributes and the following special internal Sphinx
+attributes:
+
+=over 4
+
+=item @id - document ID;
+
+=item @weight, @rank, @relevance - match weight;
+
+=item @group - group by function value;
+
+=item @count - number of matches in group.
+
+=back
+
+The default mode is to sort by groupby value in descending order,
+ie. by "@group desc".
+
+In the results set, "total_found" contains the total amount of matching groups
+over the whole index.
+
+WARNING: grouping is done in fixed memory and thus its results
+are only approximate; so there might be more groups reported
+in total_found than actually present. @count might also
+be underestimated.
+
+For example, if sorting by relevance and grouping by a "published"
+attribute with SPH_GROUPBY_DAY function, then the result set will
+contain only the most relevant match for each day when there were any
+matches published, with day number and per-day match count attached,
+and sorted by day number in descending order (ie. recent days first).
+
+=cut
+
+sub SetGroupBy {
+ my $self = shift;
+ my $attribute = shift;
+ my $func = shift;
+ my $groupsort = shift || '@group desc';
+ croak("attribute is not defined") unless (defined $attribute);
+ croak("Unknown grouping function: $func") unless ($func==SPH_GROUPBY_DAY
+ || $func==SPH_GROUPBY_WEEK
+ || $func==SPH_GROUPBY_MONTH
+ || $func==SPH_GROUPBY_YEAR
+ || $func==SPH_GROUPBY_ATTR
+ || $func==SPH_GROUPBY_ATTRPAIR
+ );
+
+ $self->{_groupby} = $attribute;
+ $self->{_groupfunc} = $func;
+ $self->{_groupsort} = $groupsort;
+ return $self;
+}
+
+=head2 SetGroupDistinct
+
+ $sph->SetGroupDistinct($attr);
+
+Set count-distinct attribute for group-by queries
+
+=cut
+
+sub SetGroupDistinct {
+ my $self = shift;
+ my $attribute = shift;
+ croak("attribute is not defined") unless (defined $attribute);
+ $self->{_groupdistinct} = $attribute;
+ return $self;
+}
+
+=head2 SetRetries
+
+ $sph->SetRetries($count, $delay);
+
+Set distributed retries count and delay
+
+=cut
+
+sub SetRetries {
+ my $self = shift;
+ my $count = shift;
+ my $delay = shift || 0;
+
+ croak("count: $count is not an integer >= 0") unless ($count =~ /^\d+$/o && $count >= 0);
+ croak("delay: $delay is not an integer >= 0") unless ($delay =~ /^\d+$/o && $delay >= 0);
+ $self->{_retrycount} = $count;
+ $self->{_retrydelay} = $delay;
+ return $self;
+}
+
+=head2 SetOverride
+
+ $sph->SetOverride($attrname, $attrtype, $values);
+
+ Set attribute values override. There can be only one override per attribute.
+ $values must be a hash that maps document IDs to attribute values
+
+=cut
+
+sub SetOverride {
+ my $self = shift;
+ my $attrname = shift;
+ my $attrtype = shift;
+ my $values = shift;
+
+ croak("attribute name is not defined") unless defined $attrname;
+ croak("Uknown attribute type: $attrtype") unless ($attrtype == SPH_ATTR_INTEGER
+ || $attrtype == SPH_ATTR_TIMESTAMP
+ || $attrtype == SPH_ATTR_BOOL
+ || $attrtype == SPH_ATTR_FLOAT
+ || $attrtype == SPH_ATTR_BIGINT);
+ $self->{_overrides}->{$attrname} = { attr => $attrname,
+ type => $attrtype,
+ values => $values,
+ };
+
+ return $self;
+}
+
+
+=head2 SetSelect
+
+ $sph->SetSelect($select)
+
+Set select list (attributes or expressions). SQL-like syntax.
+
+=cut
+
+sub SetSelect {
+ my $self = shift;
+ $self->{_select} = shift;
+ return $self;
+}
+
+=head2 ResetFilters
+
+ $sph->ResetFilters;
+
+Clear all filters.
+
+=cut
+
+sub ResetFilters {
+ my $self = shift;
+
+ $self->{_filters} = [];
+ $self->{_anchor} = undef;
+
+ return $self;
+}
+
+=head2 ResetGroupBy
+
+ $sph->ResetGroupBy;
+
+Clear all group-by settings (for multi-queries)
+
+=cut
+
+sub ResetGroupBy {
+ my $self = shift;
+
+ $self->{_groupby} = "";
+ $self->{_groupfunc} = SPH_GROUPBY_DAY;
+ $self->{_groupsort} = '@group desc';
+ $self->{_groupdistinct} = "";
+
+ return $self;
+}
+
+=head2 ResetOverrides
+
+Clear all attribute value overrides (for multi-queries)
+
+=cut
+
+sub ResetOverrides {
+ my $self = shift;
+
+ $self->{_select} = undef;
+ return $self;
+}
+
+=head2 Query
+
+ $results = $sph->Query($query, $index);
+
+Connect to searchd server and run given search query.
+
+=over 4
+
+=item query is query string
+
+=item index is index name to query, default is "*" which means to query all indexes. Use a space or comma separated list to search multiple indexes.
+
+=back
+
+Returns undef on failure
+
+Returns hash which has the following keys on success:
+
+=over 4
+
+=item matches
+
+Array containing hashes with found documents ( "doc", "weight", "group", "stamp" )
+
+=item total
+
+Total amount of matches retrieved (upto SPH_MAX_MATCHES, see sphinx.h)
+
+=item total_found
+
+Total amount of matching documents in index
+
+=item time
+
+Search time
+
+=item words
+
+Hash which maps query terms (stemmed!) to ( "docs", "hits" ) hash
+
+=back
+
+Returns the results array on success, undef on error.
+
+=cut
+
+sub Query {
+ my $self = shift;
+ my $query = shift;
+ my $index = shift || '*';
+ my $comment = shift || '';
+
+ croak("_reqs is not empty") unless @{$self->{_reqs}} == 0;
+
+ $self->AddQuery($query, $index, $comment);
+ my $results = $self->RunQueries or return;
+ $self->_Error($results->[0]->{error}) if $results->[0]->{error};
+ $self->_Warning($results->[0]->{warning}) if $results->[0]->{warning};
+ return if $results->[0]->{status} && $results->[0]->{status} == SEARCHD_ERROR;
+
+ return $results->[0];
+}
+
+# helper to pack floats in network byte order
+sub _PackFloat {
+ my $f = shift;
+ my $t1 = pack ( "f", $f ); # machine order
+ my $t2 = unpack ( "L*", $t1 ); # int in machine order
+ return pack ( "N", $t2 );
+}
+
+
+=head2 AddQuery
+
+ $sph->AddQuery($query, $index);
+
+Add a query to a batch request.
+
+Batch queries enable searchd to perform internal optimizations,
+if possible; and reduce network connection overheads in all cases.
+
+For instance, running exactly the same query with different
+groupby settings will enable searched to perform expensive
+full-text search and ranking operation only once, but compute
+multiple groupby results from its output.
+
+Parameters are exactly the same as in Query() call.
+
+Returns corresponding index to the results array returned by RunQueries() call.
+
+=cut
+
+sub AddQuery {
+ my $self = shift;
+ my $query = shift;
+ my $index = shift || '*';
+ my $comment = shift || '';
+
+ ##################
+ # build request
+ ##################
+
+ my $req;
+ $req = pack ( "NNNNN", $self->{_offset}, $self->{_limit}, $self->{_mode}, $self->{_ranker}, $self->{_sort} ); # mode and limits
+ $req .= pack ( "N/a*", $self->{_sortby});
+ $req .= pack ( "N/a*", $self->{_string_encoder}->($query) ); # query itself
+ $req .= pack ( "N*", scalar(@{$self->{_weights}}), @{$self->{_weights}});
+ $req .= pack ( "N/a*", $index); # indexes
+ $req .= pack ( "N", 1)
+ . $self->_sphPackU64($self->{_min_id})
+ . $self->_sphPackU64($self->{_max_id}); # id64 range
+
+ # filters
+ $req .= pack ( "N", scalar @{$self->{_filters}} );
+ foreach my $filter (@{$self->{_filters}}) {
+ $req .= pack ( "N/a*", $filter->{attr});
+ $req .= pack ( "N", $filter->{type});
+
+ my $t = $filter->{type};
+ if ($t == SPH_FILTER_VALUES) {
+ $req .= $self->_sphPackI64array($filter->{values});
+ }
+ elsif ($t == SPH_FILTER_RANGE) {
+ $req .= $self->_sphPackI64($filter->{min}) . $self->_sphPackI64($filter->{max});
+ }
+ elsif ($t == SPH_FILTER_FLOATRANGE) {
+ $req .= _PackFloat ( $filter->{"min"} ) . _PackFloat ( $filter->{"max"} );
+ }
+ else {
+ croak("Unhandled filter type $t");
+ }
+ $req .= pack ( "N", $filter->{exclude});
+ }
+
+ # group-by clause, max-matches count, group-sort clause, cutoff count
+ $req .= pack ( "NN/a*", $self->{_groupfunc}, $self->{_groupby} );
+ $req .= pack ( "N", $self->{_maxmatches} );
+ $req .= pack ( "N/a*", $self->{_groupsort});
+ $req .= pack ( "NNN", $self->{_cutoff}, $self->{_retrycount}, $self->{_retrydelay} );
+ $req .= pack ( "N/a*", $self->{_groupdistinct});
+
+ if (!defined $self->{_anchor}) {
+ $req .= pack ( "N", 0);
+ }
+ else {
+ my $a = $self->{_anchor};
+ $req .= pack ( "N", 1);
+ $req .= pack ( "N/a*", $a->{attrlat});
+ $req .= pack ( "N/a*", $a->{attrlong});
+ $req .= _PackFloat($a->{lat}) . _PackFloat($a->{long});
+ }
+
+ # per-index weights
+ $req .= pack( "N", scalar keys %{$self->{_indexweights}});
+ $req .= pack ( "N/a*N", $_, $self->{_indexweights}->{$_} ) for keys %{$self->{_indexweights}};
+
+ # max query time
+ $req .= pack ( "N", $self->{_maxquerytime} );
+
+ # per-field weights
+ $req .= pack ( "N", scalar keys %{$self->{_fieldweights}} );
+ $req .= pack ( "N/a*N", $_, $self->{_fieldweights}->{$_}) for keys %{$self->{_fieldweights}};
+ # comment
+ $req .= pack ( "N/a*", $comment);
+
+ # attribute overrides
+ $req .= pack ( "N", scalar keys %{$self->{_overrides}} );
+ for my $entry (values %{$self->{_overrides}}) {
+ $req .= pack ("N/a*", $entry->{attr})
+ . pack ("NN", $entry->{type}, scalar keys %{$entry->{values}});
+ for my $id (keys %{$entry->{values}}) {
+ croak "Attribute value key is not numeric" unless $id =~ m/$num_re/;
+ my $v = $entry->{values}->{$id};
+ croak "Attribute value key is not numeric" unless $v =~ m/$num_re/;
+ $req .= $self->_sphPackU64($id);
+ if ($entry->{type} == SPH_ATTR_FLOAT) {
+ $req .= $self->_packfloat($v);
+ }
+ elsif ($entry->{type} == SPH_ATTR_BIGINT) {
+ $req .= $self->_sphPackI64($v);
+ }
+ else {
+ $req .= pack("N", $v);
+ }
+ }
+ }
+
+ # select list
+ $req .= pack("N/a*", $self->{_select} || '');
+
+ push(@{$self->{_reqs}}, $req);
+
+ return scalar $#{$self->{_reqs}};
+}
+
+=head2 RunQueries
+
+ $sph->RunQueries
+
+Run batch of queries, as added by AddQuery.
+
+Returns undef on network IO failure.
+
+Returns an array of result sets on success.
+
+Each result set in the returned array is a hash which contains
+the same keys as the hash returned by L<Query>, plus:
+
+=over 4
+
+=item * error
+
+Errors, if any, for this query.
+
+=item * warnings
+
+Any warnings associated with the query.
+
+=back
+
+=cut
+
+sub RunQueries {
+ my $self = shift;
+
+ unless (@{$self->{_reqs}}) {
+ $self->_Error("no queries defined, issue AddQuery() first");
+ return;
+ }
+
+ my $fp = $self->_Connect() or do { $self->{_reqs} = []; return };
+
+ ##################
+ # send query, get response
+ ##################
+ my $nreqs = @{$self->{_reqs}};
+ my $req = pack("Na*", $nreqs, join("", @{$self->{_reqs}}));
+ $req = pack ( "nnN/a*", SEARCHD_COMMAND_SEARCH, VER_COMMAND_SEARCH, $req); # add header
+ $self->_Send($fp, $req);
+
+ $self->{_reqs} = [];
+
+ my $response = $self->_GetResponse ( $fp, VER_COMMAND_SEARCH );
+ return unless $response;
+
+ ##################
+ # parse response
+ ##################
+
+ my $p = 0;
+ my $max = length($response); # Protection from broken response
+
+ my @results;
+ for (my $ires = 0; $ires < $nreqs; $ires++) {
+ my $result = {}; # Empty hash ref
+ push(@results, $result);
+ $result->{matches} = []; # Empty array ref
+ $result->{error} = "";
+ $result->{warnings} = "";
+
+ # extract status
+ my $status = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ if ($status != SEARCHD_OK) {
+ my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+ my $message = substr ( $response, $p, $len ); $p += $len;
+ if ($status == SEARCHD_WARNING) {
+ $result->{warning} = $message;
+ }
+ else {
+ $result->{error} = $message;
+ next;
+ }
+ }
+
+ # read schema
+ my @fields;
+ my (%attrs, @attr_list);
+
+ my $nfields = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
+ while ( $nfields-->0 && $p<$max ) {
+ my $len = unpack ( "N", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@fields, substr ( $response, $p, $len )); $p += $len;
+ }
+ $result->{"fields"} = \@fields;
+
+ my $nattrs = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ while ( $nattrs-->0 && $p<$max ) {
+ my $len = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ my $attr = substr ( $response, $p, $len ); $p += $len;
+ my $type = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ $attrs{$attr} = $type;
+ push(@attr_list, $attr);
+ }
+ $result->{"attrs"} = \%attrs;
+
+ # read match count
+ my $count = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ my $id64 = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+
+ # read matches
+ while ( $count-->0 && $p<$max ) {
+ my $data = {};
+ if ($id64) {
+ $data->{doc} = $self->_sphUnpackU64(substr($response, $p, 8)); $p += 8;
+ $data->{weight} = unpack("N*", substr($response, $p, 4)); $p += 4;
+ }
+ else {
+ ( $data->{doc}, $data->{weight} ) = unpack("N*N*", substr($response,$p,8));
+ $p += 8;
+ }
+ foreach my $attr (@attr_list) {
+ if ($attrs{$attr} == SPH_ATTR_BIGINT) {
+ $data->{$attr} = $self->_sphUnpackI64(substr($response, $p, 8)); $p += 8;
+ next;
+ }
+ if ($attrs{$attr} == SPH_ATTR_FLOAT) {
+ my $uval = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ $data->{$attr} = [ unpack("f*", pack("L", $uval)) ];
+ next;
+ }
+ my $val = unpack ( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ if ($attrs{$attr} & SPH_ATTR_MULTI) {
+ my $nvalues = $val;
+ $data->{$attr} = [];
+ while ($nvalues-->0 && $p < $max) {
+ $val = unpack( "N*", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@{$data->{$attr}}, $val);
+ }
+ }
+ else {
+ $data->{$attr} = $val;
+ }
+ }
+ push(@{$result->{matches}}, $data);
+ }
+ my $words;
+ ($result->{total}, $result->{total_found}, $result->{time}, $words) = unpack("N*N*N*N*", substr($response, $p, 16));
+ $result->{time} = sprintf ( "%.3f", $result->{"time"}/1000 );
+ $p += 16;
+
+ while ( $words-->0 && $p < $max) {
+ my $len = unpack ( "N*", substr ( $response, $p, 4 ) );
+ $p += 4;
+ my $word = $self->{_string_decoder}->( substr ( $response, $p, $len ) );
+ $p += $len;
+ my ($docs, $hits) = unpack ("N*N*", substr($response, $p, 8));
+ $p += 8;
+ $result->{words}{$word} = {
+ "docs" => $docs,
+ "hits" => $hits
+ };
+ }
+ }
+
+ return \@results;
+}
+
+=head2 BuildExcerpts
+
+ $excerpts = $sph->BuildExcerpts($docs, $index, $words, $opts)
+
+Generate document excerpts for the specified documents.
+
+=over 4
+
+=item docs
+
+An array reference of strings which represent the document
+contents
+
+=item index
+
+A string specifiying the index whose settings will be used
+for stemming, lexing and case folding
+
+=item words
+
+A string which contains the words to highlight
+
+=item opts
+
+A hash which contains additional optional highlighting parameters:
+
+=over 4
+
+=item before_match - a string to insert before a set of matching words, default is "<b>"
+
+=item after_match - a string to insert after a set of matching words, default is "<b>"
+
+=item chunk_separator - a string to insert between excerpts chunks, default is " ... "
+
+=item limit - max excerpt size in symbols (codepoints), default is 256
+
+=item around - how many words to highlight around each match, default is 5
+
+=item exact_phrase - whether to highlight exact phrase matches only, default is false
+
+=item single_passage - whether to extract single best passage only, default is false
+
+=item use_boundaries
+
+=item weight_order
+
+=back
+
+=back
+
+Returns undef on failure.
+
+Returns an array ref of string excerpts on success.
+
+=cut
+
+sub BuildExcerpts {
+ my ($self, $docs, $index, $words, $opts) = @_;
+ $opts ||= {};
+ croak("BuildExcepts() called with incorrect parameters")
+ unless (ref($docs) eq 'ARRAY'
+ && defined($index)
+ && defined($words)
+ && ref($opts) eq 'HASH');
+ my $fp = $self->_Connect() or return;
+
+ ##################
+ # fixup options
+ ##################
+ $opts->{"before_match"} ||= "<b>";
+ $opts->{"after_match"} ||= "</b>";
+ $opts->{"chunk_separator"} ||= " ... ";
+ $opts->{"limit"} ||= 256;
+ $opts->{"around"} ||= 5;
+ $opts->{"exact_phrase"} ||= 0;
+ $opts->{"single_passage"} ||= 0;
+ $opts->{"use_boundaries"} ||= 0;
+ $opts->{"weight_order"} ||= 0;
+
+ ##################
+ # build request
+ ##################
+
+ # v.1.0 req
+ my $req;
+ my $flags = 1; # remove spaces
+ $flags |= 2 if ( $opts->{"exact_phrase"} );
+ $flags |= 4 if ( $opts->{"single_passage"} );
+ $flags |= 8 if ( $opts->{"use_boundaries"} );
+ $flags |= 16 if ( $opts->{"weight_order"} );
+ $req = pack ( "NN", 0, $flags ); # mode=0, flags=$flags
+
+ $req .= pack ( "N/a*", $index ); # req index
+ $req .= pack ( "N/a*", $self->{_string_encoder}->($words)); # req words
+
+ # options
+ $req .= pack ( "N/a*", $opts->{"before_match"});
+ $req .= pack ( "N/a*", $opts->{"after_match"});
+ $req .= pack ( "N/a*", $opts->{"chunk_separator"});
+ $req .= pack ( "N", int($opts->{"limit"}) );
+ $req .= pack ( "N", int($opts->{"around"}) );
+
+ # documents
+ $req .= pack ( "N", scalar(@$docs) );
+ foreach my $doc (@$docs) {
+ croak('BuildExcerpts: Found empty document in $docs') unless ($doc);
+ $req .= pack("N/a*", $self->{_string_encoder}->($doc));
+ }
+
+ ##########################
+ # send query, get response
+ ##########################
+
+ $req = pack ( "nnN/a*", SEARCHD_COMMAND_EXCERPT, VER_COMMAND_EXCERPT, $req); # add header
+ $self->_Send($fp, $req);
+
+ my $response = $self->_GetResponse($fp, VER_COMMAND_EXCERPT);
+ return unless $response;
+
+ my ($pos, $i) = 0;
+ my $res = []; # Empty hash ref
+ my $rlen = length($response);
+ for ( $i=0; $i< scalar(@$docs); $i++ ) {
+ my $len = unpack ( "N*", substr ( $response, $pos, 4 ) );
+ $pos += 4;
+
+ if ( $pos+$len > $rlen ) {
+ $self->_Error("incomplete reply");
+ return;
+ }
+ push(@$res, $self->{_string_decoder}->( substr ( $response, $pos, $len ) ));
+ $pos += $len;
+ }
+ return $res;
+}
+
+
+=head2 BuildKeywords
+
+ $results = $sph->BuildKeywords($query, $index, $hits)
+
+Generate keyword list for a given query
+Returns undef on failure,
+Returns an array of hashes, where each hash describes a word in the query with the following keys:
+
+=over 4
+
+=item * tokenized
+
+Tokenised term from query
+
+=item * normalized
+
+Normalised term from query
+
+=item * docs
+
+Number of docs in which word was found (if $hits is true)
+
+=item * hits
+
+Number of occurrences of word (if $hits is true)
+
+=back
+
+=cut
+
+sub BuildKeywords {
+ my ( $self, $query, $index, $hits ) = @_;
+
+ my $fp = $self->_Connect() or return;
+
+ # v.1.0 req
+ my $req = pack("N/a*", $self->{_string_encoder}->($query) );
+ $req .= pack("N/a*", $index);
+ $req .= pack("N", $self->{_string_encoder}->($hits) );
+
+ ##################
+ # send query, get response
+ ##################
+
+ $req = pack ( "nnN/a*", SEARCHD_COMMAND_KEYWORDS, VER_COMMAND_KEYWORDS, $req);
+ $self->_Send($fp, $req);
+ my $response = $self->_GetResponse ( $fp, VER_COMMAND_KEYWORDS );
+ return unless $response;
+
+ ##################
+ # parse response
+ ##################
+
+ my $p = 0;
+ my @res;
+ my $rlen = length($response);
+
+ my $nwords = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+
+ for (my $i=0; $i < $nwords; $i++ ) {
+ my $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+
+ my $tokenized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
+ $len = unpack("N", substr ( $response, $p, 4 ) ); $p += 4;
+
+ my $normalized = $len ? $self->{_string_decoder}->( substr ( $response, $p, $len ) ) : ""; $p += $len;
+ my %data = ( tokenized => $tokenized, normalized => $normalized );
+
+ if ($hits) {
+ ( $data{docs}, $data{hits} ) = unpack("N*N*", substr($response,$p,8));
+ $p += 8;
+
+ }
+ push(@res, \%data);
+ }
+ if ( $p > $rlen ) {
+ $self->_Error("incomplete reply");
+ return;
+ }
+
+ return \@res;
+}
+
+=head2 EscapeString
+
+ $escaped = $sph->EscapeString('abcde!@#$%')
+
+Inserts backslash before all non-word characters in the given string.
+
+=cut
+
+sub EscapeString {
+ my $self = shift;
+ return quotemeta(shift);
+}
+
+
+=head2 UpdateAttributes
+
+ $sph->UpdateAttributes($index, \@attrs, \%values);
+ $sph->UpdateAttributes($index, \@attrs, \%values, $mva);
+
+Update specified attributes on specified documents
+
+=over 4
+
+=item index
+
+Name of the index to be updated
+
+=item attrs
+
+Array of attribute name strings
+
+=item values
+
+A hash with key as document id, value as an array of new attribute values
+
+=back
+
+Returns number of actually updated documents (0 or more) on success
+
+Returns undef on failure
+
+Usage example:
+
+ $sph->UpdateAttributes("test1", [ qw/group_id/ ], { 1 => [ 456] }) );
+
+=cut
+
+sub UpdateAttributes {
+ my ($self, $index, $attrs, $values, $mva ) = @_;
+
+ croak("index is not defined") unless (defined $index);
+ croak("attrs must be an array") unless ref($attrs) eq "ARRAY";
+ for my $attr (@$attrs) {
+ croak("attribute is not defined") unless (defined $attr);
+ }
+ croak("values must be a hashref") unless ref($values) eq "HASH";
+
+ for my $id (keys %$values) {
+ my $entry = $values->{$id};
+ croak("value id $id is not numeric") unless ($id =~ /$num_re/);
+ croak("value entry must be an array") unless ref($entry) eq "ARRAY";
+ croak("size of values must match size of attrs") unless @$entry == @$attrs;
+ for my $v (@$entry) {
+ if ($mva) {
+ croak("multi-valued entry $v is not an array") unless ref($v) eq 'ARRAY';
+ for my $vv (@$v) {
+ croak("array entry value $vv is not an integer") unless ($vv =~ /^(\d+)$/o);
+ }
+ }
+ else {
+ croak("entry value $v is not an integer") unless ($v =~ /^(\d+)$/o);
+ }
+ }
+ }
+
+ ## build request
+ my $req = pack ( "N/a*", $index);
+
+ $req .= pack ( "N", scalar @$attrs );
+ for my $attr (@$attrs) {
+ $req .= pack ( "N/a*", $attr)
+ . pack("N", $mva ? 1 : 0);
+ }
+ $req .= pack ( "N", scalar keys %$values );
+ foreach my $id (keys %$values) {
+ my $entry = $values->{$id};
+ $req .= $self->_sphPackU64($id);
+ if ($mva) {
+ for my $v ( @$entry ) {
+ $req .= pack ( "N", @$v );
+ for my $vv (@$v) {
+ $req .= pack ("N", $vv);
+ }
+ }
+ }
+ else {
+ for my $v ( @$entry ) {
+ $req .= pack ( "N", $v );
+ }
+ }
+ }
+
+ ## connect, send query, get response
+ my $fp = $self->_Connect() or return;
+
+ $req = pack ( "nnN/a*", SEARCHD_COMMAND_UPDATE, VER_COMMAND_UPDATE, $req); ## add header
+ send ( $fp, $req, 0);
+
+ my $response = $self->_GetResponse ( $fp, VER_COMMAND_UPDATE );
+ return unless $response;
+
+ ## parse response
+ my ($updated) = unpack ( "N*", substr ( $response, 0, 4 ) );
+ return $updated;
+}
+
+=head2 Open
+
+ $sph->Open()
+
+Opens a persistent connection for subsequent queries.
+
+To reduce the network connection overhead of making Sphinx queries, you can call
+$sph->Open(), then run any number of queries, and call $sph->Close() when
+finished.
+
+Returns 1 on success, 0 on failure.
+
+=cut
+
+sub Open {
+ my $self = shift;
+
+ if ($self->{_socket}) {
+ $self->_Error("already connected");
+ return 0;
+ }
+ my $fp = $self->_Connect() or return 0;
+
+ my $req = pack("nnNN", SEARCHD_COMMAND_PERSIST, 0, 4, 1);
+ $self->_Send($fp, $req) or return 0;
+
+ $self->{_socket} = $fp;
+ return 1;
+}
+
+=head2 Close
+
+ $sph->Close()
+
+Closes a persistent connection.
+
+Returns 1 on success, 0 on failure.
+
+=cut
+
+sub Close {
+ my $self = shift;
+
+ if (! $self->{_socket}) {
+ $self->_Error("not connected");
+ return 0;
+ }
+
+ close($self->{_socket});
+ $self->{_socket} = undef;
+
+ return 1;
+}
+
+=head2 Status
+
+ $status = $sph->Status()
+
+Queries searchd status, and returns a hash of status variable name and value pairs.
+
+Returns undef on failure.
+
+=cut
+
+sub Status {
+
+ my $self = shift;
+
+ my $fp = $self->_Connect() or return;
+
+ my $req = pack("nnNN", SEARCHD_COMMAND_STATUS, VER_COMMAND_STATUS, 4, 1 ); # len=4, body=1
+ $self->_Send($fp, $req) or return;
+ my $response = $self->_GetResponse ( $fp, VER_COMMAND_STATUS );
+ return unless $response;
+
+ my $p = 0;
+ my ($rows, $cols) = unpack("N*N*", substr ( $response, $p, 8 ) ); $p += 8;
+
+ return {} unless $rows && $cols;
+ my %res;
+ for (1 .. $rows ) {
+ my @entry;
+ for ( 1 .. $cols) {
+ my $len = unpack("N*", substr ( $response, $p, 4 ) ); $p += 4;
+ push(@entry, $len ? substr ( $response, $p, $len ) : ""); $p += $len;
+ }
+ if ($cols <= 2) {
+ $res{$entry[0]} = $entry[1];
+ }
+ else {
+ my $name = shift @entry;
+ $res{$name} = \@entry;
+ }
+ }
+ return \%res;
+}
+
+
+=head1 SEE ALSO
+
+L<http://www.sphinxsearch.com>
+
+=head1 NOTES
+
+There is (or was) a bundled Sphinx.pm in the contrib area of the Sphinx source
+distribution, which was used as the starting point of Sphinx::Search.
+Maintenance of that version appears to have lapsed at sphinx-0.9.7, so many of
+the newer API calls are not available there. Sphinx::Search is mostly
+compatible with the old Sphinx.pm except:
+
+=over 4
+
+=item On failure, Sphinx::Search returns undef rather than 0 or -1.
+
+=item Sphinx::Search 'Set' functions are cascadable, e.g. you can do
+ Sphinx::Search->new
+ ->SetMatchMode(SPH_MATCH_ALL)
+ ->SetSortMode(SPH_SORT_RELEVANCE)
+ ->Query("search terms")
+
+=back
+
+Sphinx::Search also provides documentation and unit tests, which were the main
+motivations for branching from the earlier work.
+
+=head1 AUTHOR
+
+Jon Schutz
+
+=head1 BUGS
+
+Please report any bugs or feature requests to
+C<bug-sphinx-search at rt.cpan.org>, or through the web interface at
+L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Sphinx-Search>.
+I will be notified, and then you'll automatically be notified of progress on
+your bug as I make changes.
+
+=head1 SUPPORT
+
+You can find documentation for this module with the perldoc command.
+
+ perldoc Sphinx::Search
+
+You can also look for information at:
+
+=over 4
+
+=item * AnnoCPAN: Annotated CPAN documentation
+
+L<http://annocpan.org/dist/Sphinx-Search>
+
+=item * CPAN Ratings
+
+L<http://cpanratings.perl.org/d/Sphinx-Search>
+
+=item * RT: CPAN's request tracker
+
+L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Sphinx-Search>
+
+=item * Search CPAN
+
+L<http://search.cpan.org/dist/Sphinx-Search>
+
+=back
+
+=head1 ACKNOWLEDGEMENTS
+
+This module is based on Sphinx.pm (not deployed to CPAN) for Sphinx version
+0.9.7-rc1, by Len Kranendonk, which was in turn based on the Sphinx PHP API.
+
+=head1 COPYRIGHT & LICENSE
+
+Copyright 2007 Jon Schutz, all rights reserved.
+
+This program is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License.
+
+=cut
+
+
+1;
diff --git a/package.lisp b/package.lisp
index 761f191..00c684b 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,9 +1,9 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api
(:nicknames "sphinx-search-api")
- (:use :cl :iolib.sockets :cl-pack)
+ (:use :cl :iolib.sockets :babel :cl-pack)
(:export #:bla))
diff --git a/sphinx-search-api.asd b/sphinx-search-api.asd
index 95f24ec..f79094b 100644
--- a/sphinx-search-api.asd
+++ b/sphinx-search-api.asd
@@ -1,28 +1,30 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-asd
(:use :cl :asdf))
(asdf:operate 'asdf:load-op :ieee-floats)
(asdf:operate 'asdf:load-op :cl-pack)
(in-package #:com.oppermannen.sphinx-search-api-asd)
(defsystem #:sphinx-search-api
:name "SPHINX-SEARCH-API"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Classifier based on bayes theorem"
:long-description "SPHINX-SEARCH-API is the Common Lisp connection layer to Sphinx Search <http://sphinxsearch.com/>"
:serial t
:components ((:file "package")
(:file "sphinx-search-api-config")
(:file "constants")
(:file "sphinx-search-api"))
- :depends-on (:iolib.sockets :cl-pack))
+ :depends-on (:iolib.sockets
+ :cl-pack
+ :babel))
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 862702b..1465acc 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,267 +1,275 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((sphinx-host
:accessor sphinx-host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(sphinx-port
:accessor sphinx-port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(sphinx-path
:accessor sphinx-path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(sphinx-socket
:accessor sphinx-socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters")
(groupby
:accessor groupby
:initarg :groupby
:initform ""
:documentation "group-by attribute name")
(groupfunc
:accessor groupfunc
:initarg :groupfunc
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(groupsort
:accessor groupsort
:initarg :groupsort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(groupdistinct
:accessor groupdistinct
:initarg :groupdistinct
:initform ""
:documentation "group-by count-distinct attribute")
(maxmatches
:accessor maxmatches
:initarg :maxmatches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
(retrycount
:accessor retrycount
:initarg :retrycount
:initform 0
:documentation "distributed retry count")
(retrydelay
:accessor retrydelay
:initarg :retrydelay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point")
(indexweights
:accessor indexweights
:initarg :indexweights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(maxquerytime
:accessor maxquerytime
:initarg :maxquerytime
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(fieldweights
:accessor fieldweights
:initarg :fieldweights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (sphinx-path client) host)
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (sphinx-path client) (subseq host 6 (length host)))
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
(setf (sphinx-host client) host)
(setf (sphinx-port client) port)
(setf (sphinx-path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((sphinx-socket client))
((sphinx-path client)
(setf (sphinx-socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (sphinx-path client)))))
(t
(setf (sphinx-socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (sphinx-host client)
:remote-port (sphinx-port client)))))
(let ((v (unpack "N*" (read-from (sphinx-socket client) 4))))
(if (< v 1)
(progn
(close (sphinx-socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (sphinx-socket client)
- (sb-ext:string-to-octets (pack "N" 1) :external-format :latin-1))
+ (string-to-octets (pack "N" 1) :external-format :utf-8))
(format t "~a~%" v)
(sphinx-socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "~a~%" rec)
(let ((res
- (sb-ext:octets-to-string
+ (octets-to-string
(coerce rec
'(vector (unsigned-byte 8)))
- :external-format :latin-1)))
+ :external-format :utf-8)))
(format t "res: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from (sphinx-socket client) 8))
(format t "~a : ~a : ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (< left 0)
(return))
(let ((chunk (read-from (sphinx-socket client) left)))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(- left (length chunk)))
(return))))
(let ((done (length response)))
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (maxmatches client) max))
(when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
+(defmethod add-query ((client sphinx-client) &key query (index "*") (comment ""))
+ (let ((req (concatenate 'string
+ (pack "NNNNN" (offset client) (limit client) (mode client) (ranker client) (sort-mode client))
+ (pack "N/a*" (sort-by client))
+ ;;(pack "N/a*" (string-to-octets query))
+ (pack "N*" (length (weights client)) (weights client)))))
+ req))
+
|
thijs/cl-sphinx-search
|
f37f033c1e8ab09484432396002efd82af1d3ad3
|
Small changes
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 5676096..862702b 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,271 +1,267 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((sphinx-host
:accessor sphinx-host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(sphinx-port
:accessor sphinx-port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(sphinx-path
:accessor sphinx-path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(sphinx-socket
:accessor sphinx-socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters")
(groupby
:accessor groupby
:initarg :groupby
:initform ""
:documentation "group-by attribute name")
(groupfunc
:accessor groupfunc
:initarg :groupfunc
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(groupsort
:accessor groupsort
:initarg :groupsort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(groupdistinct
:accessor groupdistinct
:initarg :groupdistinct
:initform ""
:documentation "group-by count-distinct attribute")
(maxmatches
:accessor maxmatches
:initarg :maxmatches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
(retrycount
:accessor retrycount
:initarg :retrycount
:initform 0
:documentation "distributed retry count")
(retrydelay
:accessor retrydelay
:initarg :retrydelay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point")
(indexweights
:accessor indexweights
:initarg :indexweights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(maxquerytime
:accessor maxquerytime
:initarg :maxquerytime
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(fieldweights
:accessor fieldweights
:initarg :fieldweights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (sphinx-path client) host)
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (sphinx-path client) (subseq host 6 (length host)))
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
(setf (sphinx-host client) host)
(setf (sphinx-port client) port)
(setf (sphinx-path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((sphinx-socket client))
((sphinx-path client)
(setf (sphinx-socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (sphinx-path client)))))
(t
(setf (sphinx-socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (sphinx-host client)
:remote-port (sphinx-port client)))))
- (let ((v (unpack "N*" (sb-ext:octets-to-string
- (coerce
- (sockets:receive-from (sphinx-socket client) :size 4)
- '(vector (unsigned-byte 8)))
- :external-format :latin-1))))
+ (let ((v (unpack "N*" (read-from (sphinx-socket client) 4))))
(if (< v 1)
(progn
(close (sphinx-socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (sphinx-socket client)
(sb-ext:string-to-octets (pack "N" 1) :external-format :latin-1))
(format t "~a~%" v)
(sphinx-socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "~a~%" rec)
(let ((res
(sb-ext:octets-to-string
(coerce rec
'(vector (unsigned-byte 8)))
:external-format :latin-1)))
(format t "res: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from (sphinx-socket client) 8))
(format t "~a : ~a : ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (< left 0)
(return))
(let ((chunk (read-from (sphinx-socket client) left)))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(- left (length chunk)))
(return))))
(let ((done (length response)))
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
(assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
(assert (and (numberp max) (>= max 0)))
(setf (offset client) offset)
(setf (limit client) limit)
(when (> max 0)
(setf (maxmatches client) max))
- (when (>= cutoff 0)
+ (when (and cutoff (>= cutoff 0))
(setf (cutoff client) cutoff)))
|
thijs/cl-sphinx-search
|
9c2ea53be858e856fba59550b0cfdda444180d5a
|
Added set-limits
|
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index adf0d99..5676096 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,262 +1,271 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((sphinx-host
:accessor sphinx-host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(sphinx-port
:accessor sphinx-port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(sphinx-path
:accessor sphinx-path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(sphinx-socket
:accessor sphinx-socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters")
(groupby
:accessor groupby
:initarg :groupby
:initform ""
:documentation "group-by attribute name")
(groupfunc
:accessor groupfunc
:initarg :groupfunc
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(groupsort
:accessor groupsort
:initarg :groupsort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(groupdistinct
:accessor groupdistinct
:initarg :groupdistinct
:initform ""
:documentation "group-by count-distinct attribute")
(maxmatches
:accessor maxmatches
:initarg :maxmatches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
(retrycount
:accessor retrycount
:initarg :retrycount
:initform 0
:documentation "distributed retry count")
(retrydelay
:accessor retrydelay
:initarg :retrydelay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point")
(indexweights
:accessor indexweights
:initarg :indexweights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(maxquerytime
:accessor maxquerytime
:initarg :maxquerytime
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(fieldweights
:accessor fieldweights
:initarg :fieldweights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
(setf (sphinx-path client) host)
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
((string= host "unix://" :start1 0 :end1 7)
(setf (sphinx-path client) (subseq host 6 (length host)))
(setf (sphinx-host client) ())
(setf (sphinx-port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
(setf (sphinx-host client) host)
(setf (sphinx-port client) port)
(setf (sphinx-path client) ()))))
(defmethod %connect ((client sphinx-client))
(cond ((sphinx-socket client))
((sphinx-path client)
(setf (sphinx-socket client)
(sockets:make-socket :address-family :local :type :stream
:local-filename (namestring (sphinx-path client)))))
(t
(setf (sphinx-socket client)
(sockets:make-socket :address-family :internet :type :stream
:remote-host (sphinx-host client)
:remote-port (sphinx-port client)))))
(let ((v (unpack "N*" (sb-ext:octets-to-string
(coerce
(sockets:receive-from (sphinx-socket client) :size 4)
'(vector (unsigned-byte 8)))
:external-format :latin-1))))
(if (< v 1)
(progn
(close (sphinx-socket client))
(setf (last-error client) "connection to socket failed"))
(progn
(sockets:send-to (sphinx-socket client)
(sb-ext:string-to-octets (pack "N" 1) :external-format :latin-1))
(format t "~a~%" v)
(sphinx-socket client)))))
(defun read-from (socket size)
(let ((rec (sockets:receive-from socket :size size)))
(format t "~a~%" rec)
(let ((res
(sb-ext:octets-to-string
(coerce rec
'(vector (unsigned-byte 8)))
:external-format :latin-1)))
(format t "res: ~a~%" res)
res)))
(defmethod %get-response ((client sphinx-client) &key client-version)
(multiple-value-bind (status version len) (unpack "n2N" (read-from (sphinx-socket client) 8))
(format t "~a : ~a : ~a~%" status version len)
(let ((response ())
(left len))
(loop
(when (< left 0)
(return))
(let ((chunk (read-from (sphinx-socket client) left)))
(if (> (length chunk) 0)
(progn
(setf response (concatenate 'vector response chunk))
(- left (length chunk)))
(return))))
(let ((done (length response)))
(cond ((or (not response)
(not (eql done len)))
(if len
(setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
(setf (last-error client) "received zero-sized searchd response"))
'())
((eql status +searchd-warning+)
(let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
(setf (last-warning client) (subseq response 4 warn-length))
(subseq response warn-length)))
((eql status +searchd-error+)
(setf (last-error client) (subseq response 4))
'())
((eql status +searchd-retry+)
(setf (last-error client) (subseq response 4))
'())
((not (eql status +searchd-ok+))
(setf (last-error client) "unknown status code: x")
'())
(t
(when (< version client-version)
(setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
response))))))
+(defmethod set-limits ((client sphinx-client) &key offset limit max cutoff)
+ (assert (and (numberp offset) (numberp limit) (>= offset 0) (>= limit 0)))
+ (assert (and (numberp max) (>= max 0)))
+ (setf (offset client) offset)
+ (setf (limit client) limit)
+ (when (> max 0)
+ (setf (maxmatches client) max))
+ (when (>= cutoff 0)
+ (setf (cutoff client) cutoff)))
|
thijs/cl-sphinx-search
|
1b06e8c6819615a6d8f90c7381ff98ddec0d241c
|
Use iolib; use _perl_ pack structures; add %connect and %get-response
|
diff --git a/package.lisp b/package.lisp
index b9f5e33..761f191 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,9 +1,9 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api
(:nicknames "sphinx-search-api")
- (:use :cl :usocket :cl-pack)
+ (:use :cl :iolib.sockets :cl-pack)
(:export #:bla))
diff --git a/sphinx-search-api.asd b/sphinx-search-api.asd
index 04187d7..95f24ec 100644
--- a/sphinx-search-api.asd
+++ b/sphinx-search-api.asd
@@ -1,28 +1,28 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-asd
(:use :cl :asdf))
(asdf:operate 'asdf:load-op :ieee-floats)
(asdf:operate 'asdf:load-op :cl-pack)
(in-package #:com.oppermannen.sphinx-search-api-asd)
(defsystem #:sphinx-search-api
:name "SPHINX-SEARCH-API"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Classifier based on bayes theorem"
:long-description "SPHINX-SEARCH-API is the Common Lisp connection layer to Sphinx Search <http://sphinxsearch.com/>"
:serial t
:components ((:file "package")
(:file "sphinx-search-api-config")
(:file "constants")
(:file "sphinx-search-api"))
- :depends-on (:usocket :cl-pack))
+ :depends-on (:iolib.sockets :cl-pack))
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 570ec2e..adf0d99 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,187 +1,262 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
((sphinx-host
:accessor sphinx-host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
(sphinx-port
:accessor sphinx-port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
(sphinx-path
:accessor sphinx-path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
(sphinx-socket
:accessor sphinx-socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
(sort-mode
:accessor sort-mode
:initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
(sort-by
:accessor sort-by
:initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
(min-id
:accessor min-id
:initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
(max-id
:accessor max-id
:initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters")
(groupby
:accessor groupby
:initarg :groupby
:initform ""
:documentation "group-by attribute name")
(groupfunc
:accessor groupfunc
:initarg :groupfunc
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(groupsort
:accessor groupsort
:initarg :groupsort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(groupdistinct
:accessor groupdistinct
:initarg :groupdistinct
:initform ""
:documentation "group-by count-distinct attribute")
(maxmatches
:accessor maxmatches
:initarg :maxmatches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
(retrycount
:accessor retrycount
:initarg :retrycount
:initform 0
:documentation "distributed retry count")
(retrydelay
:accessor retrydelay
:initarg :retrydelay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point")
(indexweights
:accessor indexweights
:initarg :indexweights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(maxquerytime
:accessor maxquerytime
:initarg :maxquerytime
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(fieldweights
:accessor fieldweights
:initarg :fieldweights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
:documentation "requests array for multi-query")))
-(defmethod set-server ((sph-obj sphinx-client) &key host port)
+(defmethod set-server ((client sphinx-client) &key host port)
(format t "~s : ~s" host port)
(assert (stringp host))
(cond ((string= host "/" :start1 0 :end1 1)
- (setf (sphinx-path sph-obj) host)
- (setf (sphinx-host sph-obj) ())
- (setf (sphinx-port sph-obj) ()))
+ (setf (sphinx-path client) host)
+ (setf (sphinx-host client) ())
+ (setf (sphinx-port client) ()))
((string= host "unix://" :start1 0 :end1 7)
- (setf (sphinx-path sph-obj) (subseq host 6 (length host)))
- (setf (sphinx-host sph-obj) ())
- (setf (sphinx-port sph-obj) ()))
+ (setf (sphinx-path client) (subseq host 6 (length host)))
+ (setf (sphinx-host client) ())
+ (setf (sphinx-port client) ()))
(t
(format t "~s : ~s" host port)
(assert (numberp port))
- (setf (sphinx-host sph-obj) host)
- (setf (sphinx-port sph-obj) port)
- (setf (sphinx-path sph-obj) ()))))
+ (setf (sphinx-host client) host)
+ (setf (sphinx-port client) port)
+ (setf (sphinx-path client) ()))))
+
+
+(defmethod %connect ((client sphinx-client))
+ (cond ((sphinx-socket client))
+ ((sphinx-path client)
+ (setf (sphinx-socket client)
+ (sockets:make-socket :address-family :local :type :stream
+ :local-filename (namestring (sphinx-path client)))))
+ (t
+ (setf (sphinx-socket client)
+ (sockets:make-socket :address-family :internet :type :stream
+ :remote-host (sphinx-host client)
+ :remote-port (sphinx-port client)))))
+ (let ((v (unpack "N*" (sb-ext:octets-to-string
+ (coerce
+ (sockets:receive-from (sphinx-socket client) :size 4)
+ '(vector (unsigned-byte 8)))
+ :external-format :latin-1))))
+ (if (< v 1)
+ (progn
+ (close (sphinx-socket client))
+ (setf (last-error client) "connection to socket failed"))
+ (progn
+ (sockets:send-to (sphinx-socket client)
+ (sb-ext:string-to-octets (pack "N" 1) :external-format :latin-1))
+ (format t "~a~%" v)
+ (sphinx-socket client)))))
+
+(defun read-from (socket size)
+ (let ((rec (sockets:receive-from socket :size size)))
+ (format t "~a~%" rec)
+ (let ((res
+ (sb-ext:octets-to-string
+ (coerce rec
+ '(vector (unsigned-byte 8)))
+ :external-format :latin-1)))
+ (format t "res: ~a~%" res)
+ res)))
+
+(defmethod %get-response ((client sphinx-client) &key client-version)
+ (multiple-value-bind (status version len) (unpack "n2N" (read-from (sphinx-socket client) 8))
+ (format t "~a : ~a : ~a~%" status version len)
+ (let ((response ())
+ (left len))
+ (loop
+ (when (< left 0)
+ (return))
+ (let ((chunk (read-from (sphinx-socket client) left)))
+ (if (> (length chunk) 0)
+ (progn
+ (setf response (concatenate 'vector response chunk))
+ (- left (length chunk)))
+ (return))))
+ (let ((done (length response)))
+ (cond ((or (not response)
+ (not (eql done len)))
+ (if len
+ (setf (last-error client) "failed to read searchd response (status=x, ver=x, len=x, read=x)")
+ (setf (last-error client) "received zero-sized searchd response"))
+ '())
+ ((eql status +searchd-warning+)
+ (let ((warn-length (+ 4 (unpack "N" (subseq response 0 4)))))
+ (setf (last-warning client) (subseq response 4 warn-length))
+ (subseq response warn-length)))
+ ((eql status +searchd-error+)
+ (setf (last-error client) (subseq response 4))
+ '())
+ ((eql status +searchd-retry+)
+ (setf (last-error client) (subseq response 4))
+ '())
+ ((not (eql status +searchd-ok+))
+ (setf (last-error client) "unknown status code: x")
+ '())
+ (t
+ (when (< version client-version)
+ (setf (last-warning client) "searchd v.x.x is older than client's v.y.y, some options might not work"))
+ response))))))
+
-(defmethod connect ((sph-obj sphinx-client))
- (cond ((sphinx-socket sph-obj))
- ((sphinx-path sph-obj)
|
thijs/cl-sphinx-search
|
d1dc3fdcc5b4c3257305e0573c6fabec27a9c593
|
Started adding methods
|
diff --git a/package.lisp b/package.lisp
index 6ceb7a7..b9f5e33 100644
--- a/package.lisp
+++ b/package.lisp
@@ -1,10 +1,9 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
-
(defpackage #:com.oppermannen.sphinx-search-api
(:nicknames "sphinx-search-api")
- (:use :cl)
+ (:use :cl :usocket :cl-pack)
(:export #:bla))
diff --git a/sphinx-search-api.asd b/sphinx-search-api.asd
index 457133e..04187d7 100644
--- a/sphinx-search-api.asd
+++ b/sphinx-search-api.asd
@@ -1,24 +1,28 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:cl-user)
(defpackage #:com.oppermannen.sphinx-search-api-asd
(:use :cl :asdf))
+(asdf:operate 'asdf:load-op :ieee-floats)
+(asdf:operate 'asdf:load-op :cl-pack)
+
+
(in-package #:com.oppermannen.sphinx-search-api-asd)
(defsystem #:sphinx-search-api
:name "SPHINX-SEARCH-API"
:version "0.0.1"
:maintainer "M.L. Oppermann <[email protected]>"
:author "M.L. Oppermann <[email protected]>"
:licence "To be determined"
:description "Classifier based on bayes theorem"
:long-description "SPHINX-SEARCH-API is the Common Lisp connection layer to Sphinx Search <http://sphinxsearch.com/>"
:serial t
:components ((:file "package")
(:file "sphinx-search-api-config")
(:file "constants")
(:file "sphinx-search-api"))
- :depends-on (:cl-pack))
+ :depends-on (:usocket :cl-pack))
diff --git a/sphinx-search-api.lisp b/sphinx-search-api.lisp
index 9ca2995..570ec2e 100644
--- a/sphinx-search-api.lisp
+++ b/sphinx-search-api.lisp
@@ -1,162 +1,187 @@
;;;; -*- Mode: Lisp; Syntax: ANSI-Common-Lisp; Base: 10 -*-
(in-package #:com.oppermannen.sphinx-search-api)
(defclass sphinx-client ()
- ((host
- :accessor host
+ ((sphinx-host
+ :accessor sphinx-host
:initarg :host
:initform "localhost"
:documentation "searchd host (default is 'localhost')")
- (port
- :accessor port
+ (sphinx-port
+ :accessor sphinx-port
:initarg :port
:initform 3312
:documentation "searchd port (default is 3312)")
- (path
- :accessor path
+ (sphinx-path
+ :accessor sphinx-path
:initarg :path
:initform ()
:documentation "searchd unix-domain socket path")
- (socket
- :accessor socket
+ (sphinx-socket
+ :accessor sphinx-socket
:initarg :socket
:initform ()
:documentation "searchd unix-domain socket")
(offset
:accessor offset
:initarg :offset
:initform 0
:documentation "how much records to seek from result-set start (default is 0)")
(limit
:accessor limit
:initarg :limit
:initform 20
:documentation "how much records to return from result-set starting at offset (default is 20)")
(mode
:accessor mode
:initarg :mode
:initform +sph-match-all+
:documentation "query matching mode (default is +sph-match-all+)")
(weights
:accessor weights
:initarg :weights
:initform ()
:documentation "per-field weights (default is 1 for all fields)")
- (sort
- :accessor sort
- :initarg :sort
+ (sort-mode
+ :accessor sort-mode
+ :initarg :sort-mode
:initform +sph-sort-relevance+
:documentation "match sorting mode (default is +sph-sort-relevance+)")
- (sortby
- :accessor sortby
- :initarg :sortby
+ (sort-by
+ :accessor sort-by
+ :initarg :sort-by
:initform ""
:documentation "attribute to sort by (defualt is '')")
- (min_id
- :accessor min_id
- :initarg :min_id
+ (min-id
+ :accessor min-id
+ :initarg :min-id
:initform 0
:documentation "min ID to match (default is 0)")
- (max_id
- :accessor max_id
- :initarg :max_id
+ (max-id
+ :accessor max-id
+ :initarg :max-id
:initform ()
:documentation "max ID to match (default is max value for uint on system)")
(filters
:accessor filters
:initarg :filters
:initform ()
:documentation "search filters")
(groupby
:accessor groupby
:initarg :groupby
:initform ""
:documentation "group-by attribute name")
(groupfunc
:accessor groupfunc
:initarg :groupfunc
:initform +sph-groupby-day+
:documentation "group-by function (to pre-process group-by attribute value with; default +sph-groupby-day+)")
(groupsort
:accessor groupsort
:initarg :groupsort
:initform "@group desc"
:documentation "group-by sorting clause (to sort groups in result set with; default '@group desc')")
(groupdistinct
:accessor groupdistinct
:initarg :groupdistinct
:initform ""
:documentation "group-by count-distinct attribute")
(maxmatches
:accessor maxmatches
:initarg :maxmatches
:initform 1000
:documentation "max matches to retrieve (default is 1000)")
(cutoff
:accessor cutoff
:initarg :cutoff
:initform ()
:documentation "cutoff to stop searching at")
(retrycount
:accessor retrycount
:initarg :retrycount
:initform 0
:documentation "distributed retry count")
(retrydelay
:accessor retrydelay
:initarg :retrydelay
:initform 0
:documentation "distributed retry delay")
(anchor
:accessor anchor
:initarg :anchor
:initform ()
:documentation "geographical anchor point")
(indexweights
:accessor indexweights
:initarg :indexweights
:initform ()
:documentation "per-index weights")
(ranker
:accessor ranker
:initarg :ranker
:initform +sph-rank-proximity-bm25+
:documentation "ranking mode (default is +sph-rank-proximity-bm25+)")
(maxquerytime
:accessor maxquerytime
:initarg :maxquerytime
:initform 0
:documentation "max query time, milliseconds (default is 0, do not limit)")
(fieldweights
:accessor fieldweights
:initarg :fieldweights
:initform ()
:documentation "per-field-name weights")
(overrides
:accessor overrides
:initarg :overrides
:initform ()
:documentation "per-query attribute values overrides")
(select
:accessor select
:initarg :select
:initform "*"
:documentation "select-list (attributes or expressions, with optional aliases)")
(last-error
:accessor last-error
:initarg :last-error
:initform ""
:documentation "last error message")
(last-warning
:accessor last-warning
:initarg :last-warning
:initform ""
:documentation "last warning message")
(reqs
:accessor reqs
:initarg :reqs
:initform ()
- :documentation "requests array for multi-query"))
+ :documentation "requests array for multi-query")))
+
+
+
+(defmethod set-server ((sph-obj sphinx-client) &key host port)
+ (format t "~s : ~s" host port)
+ (assert (stringp host))
+ (cond ((string= host "/" :start1 0 :end1 1)
+ (setf (sphinx-path sph-obj) host)
+ (setf (sphinx-host sph-obj) ())
+ (setf (sphinx-port sph-obj) ()))
+ ((string= host "unix://" :start1 0 :end1 7)
+ (setf (sphinx-path sph-obj) (subseq host 6 (length host)))
+ (setf (sphinx-host sph-obj) ())
+ (setf (sphinx-port sph-obj) ()))
+ (t
+ (format t "~s : ~s" host port)
+ (assert (numberp port))
+ (setf (sphinx-host sph-obj) host)
+ (setf (sphinx-port sph-obj) port)
+ (setf (sphinx-path sph-obj) ()))))
+
+
+(defmethod connect ((sph-obj sphinx-client))
+ (cond ((sphinx-socket sph-obj))
+ ((sphinx-path sph-obj)
|
Almad/Mechanize
|
cb77d2289448790a9857e0c1128dac7e27a9d8cb
|
Fix __init__.py typo that hid mechanize.seek_wrapped_response and mechanize.str2time ([email protected])
|
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index 4bb20aa..0f89fcb 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,140 +1,140 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
- 'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
+ 'seek_wrapped_response', # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
import logging
import sys
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \
effective_request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
try:
import sqlite3
except ImportError:
pass
else:
from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
logger = logging.getLogger("mechanize")
if logger.level is logging.NOTSET:
logger.setLevel(logging.CRITICAL)
del logger
diff --git a/test.py b/test.py
index 9ce0ad8..62b4e89 100755
--- a/test.py
+++ b/test.py
@@ -1,152 +1,153 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
+ "test_import",
# "test_performance", # too slow, run from release script
]
import sys, os, logging, glob
if __name__ == "__main__":
# XXX
# temporary stop-gap to run doctests &c.
# should switch to nose or something
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
log = "-l" in sys.argv
if log:
sys.argv.remove("-l")
level = logging.DEBUG
# level = logging.INFO
# level = logging.WARNING
# level = logging.NOTSET
logger = logging.getLogger("mechanize")
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
logger.addHandler(handler)
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
import testprogram
if run_coverage:
import coverage
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
pm_doctest_filename = os.path.join(
"test", "test_password_manager.special_doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
doctest.testfile(pm_doctest_filename, globs=globs)
try:
import robotparser
except ImportError:
pass
else:
doctest.testfile(os.path.join(
"test", "test_robotfileparser.special_doctest"))
# run .doctest files
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
_http, _rfc3986, _useragent
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
doctest.testmod(_useragent)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
test_runner = testprogram.CgitbTextTestRunner()
prog = testprogram.TestProgram(
MODULE_NAMES,
testRunner=test_runner,
localServerProcess=testprogram.TwistedServerProcess(),
)
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
diff --git a/test/test_import.py b/test/test_import.py
new file mode 100644
index 0000000..b1d6220
--- /dev/null
+++ b/test/test_import.py
@@ -0,0 +1,15 @@
+import unittest
+
+import mechanize
+from mechanize._testcase import TestCase
+
+
+class ImportTests(TestCase):
+
+ def test_import_all(self):
+ for name in mechanize.__all__:
+ exec "from mechanize import %s" % name
+
+
+if __name__ == "__main__":
+ unittest.main()
|
Almad/Mechanize
|
52bb52d8883f8e9849a823990cba9e688c7c2837
|
* Fix quadratic performance in number of response .read() calls * Add a performance test (not run from test.py, will run from release script)
|
diff --git a/mechanize/_response.py b/mechanize/_response.py
index 7635745..fad9b57 100644
--- a/mechanize/_response.py
+++ b/mechanize/_response.py
@@ -1,514 +1,527 @@
"""Response classes.
The seek_wrapper code is not used if you're using UserAgent with
.set_seekable_responses(False), or if you're using the urllib2-level interface
without SeekableProcessor or HTTPEquivProcessor. Class closeable_response is
instantiated by some handlers (AbstractHTTPHandler), but the closeable_response
interface is only depended upon by Browser-level code. Function
upgrade_response is only used if you're using Browser or
ResponseUpgradeProcessor.
Copyright 2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import copy, mimetools
from cStringIO import StringIO
import urllib2
+
+def len_of_seekable(file_):
+ # this function exists because evaluation of len(file_.getvalue()) on every
+ # .read() from seek_wrapper would be O(N**2) in number of .read()s
+ pos = file_.tell()
+ file_.seek(0, 2) # to end
+ try:
+ return file_.tell()
+ finally:
+ file_.seek(pos)
+
+
# XXX Andrew Dalke kindly sent me a similar class in response to my request on
# comp.lang.python, which I then proceeded to lose. I wrote this class
# instead, but I think he's released his code publicly since, could pinch the
# tests from it, at least...
# For testing seek_wrapper invariant (note that
# test_urllib2.HandlerTest.test_seekable is expected to fail when this
# invariant checking is turned on). The invariant checking is done by module
# ipdc, which is available here:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/436834
## from ipdbc import ContractBase
## class seek_wrapper(ContractBase):
class seek_wrapper:
"""Adds a seek method to a file object.
This is only designed for seeking on readonly file-like objects.
Wrapped file-like object must have a read method. The readline method is
only supported if that method is present on the wrapped object. The
readlines method is always supported. xreadlines and iteration are
supported only for Python 2.2 and above.
Public attributes:
wrapped: the wrapped file object
is_closed: true iff .close() has been called
WARNING: All other attributes of the wrapped object (ie. those that are not
one of wrapped, read, readline, readlines, xreadlines, __iter__ and next)
are passed through unaltered, which may or may not make sense for your
particular file object.
"""
# General strategy is to check that cache is full enough, then delegate to
# the cache (self.__cache, which is a cStringIO.StringIO instance). A seek
# position (self.__pos) is maintained independently of the cache, in order
# that a single cache may be shared between multiple seek_wrapper objects.
# Copying using module copy shares the cache in this way.
def __init__(self, wrapped):
self.wrapped = wrapped
self.__read_complete_state = [False]
self.__is_closed_state = [False]
self.__have_readline = hasattr(self.wrapped, "readline")
self.__cache = StringIO()
self.__pos = 0 # seek position
def invariant(self):
# The end of the cache is always at the same place as the end of the
- # wrapped file.
+ # wrapped file (though the .tell() method is not required to be present
+ # on wrapped file).
return self.wrapped.tell() == len(self.__cache.getvalue())
def close(self):
self.wrapped.close()
self.is_closed = True
def __getattr__(self, name):
if name == "is_closed":
return self.__is_closed_state[0]
elif name == "read_complete":
return self.__read_complete_state[0]
wrapped = self.__dict__.get("wrapped")
if wrapped:
return getattr(wrapped, name)
return getattr(self.__class__, name)
def __setattr__(self, name, value):
if name == "is_closed":
self.__is_closed_state[0] = bool(value)
elif name == "read_complete":
if not self.is_closed:
self.__read_complete_state[0] = bool(value)
else:
self.__dict__[name] = value
def seek(self, offset, whence=0):
assert whence in [0,1,2]
# how much data, if any, do we need to read?
if whence == 2: # 2: relative to end of *wrapped* file
if offset < 0: raise ValueError("negative seek offset")
# since we don't know yet where the end of that file is, we must
# read everything
to_read = None
else:
if whence == 0: # 0: absolute
if offset < 0: raise ValueError("negative seek offset")
dest = offset
else: # 1: relative to current position
pos = self.__pos
if pos < offset:
raise ValueError("seek to before start of file")
dest = pos + offset
- end = len(self.__cache.getvalue())
+ end = len_of_seekable(self.__cache)
to_read = dest - end
if to_read < 0:
to_read = 0
if to_read != 0:
self.__cache.seek(0, 2)
if to_read is None:
assert whence == 2
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__pos = self.__cache.tell() - offset
else:
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
# Don't raise an exception even if we've seek()ed past the end
# of .wrapped, since fseek() doesn't complain in that case.
# Also like fseek(), pretend we have seek()ed past the end,
# i.e. not:
#self.__pos = self.__cache.tell()
# but rather:
self.__pos = dest
else:
self.__pos = dest
def tell(self):
return self.__pos
def __copy__(self):
cpy = self.__class__(self.wrapped)
cpy.__cache = self.__cache
cpy.__read_complete_state = self.__read_complete_state
cpy.__is_closed_state = self.__is_closed_state
return cpy
def get_data(self):
pos = self.__pos
try:
self.seek(0)
return self.read(-1)
finally:
self.__pos = pos
def read(self, size=-1):
pos = self.__pos
- end = len(self.__cache.getvalue())
+ end = len_of_seekable(self.__cache)
available = end - pos
# enough data already cached?
if size <= available and size != -1:
self.__cache.seek(pos)
self.__pos = pos+size
return self.__cache.read(size)
# no, so read sufficient data from wrapped file and cache it
self.__cache.seek(0, 2)
if size == -1:
self.__cache.write(self.wrapped.read())
self.read_complete = True
else:
to_read = size - available
assert to_read > 0
data = self.wrapped.read(to_read)
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.read(size)
self.__pos = self.__cache.tell()
assert self.__pos == pos + len(data)
return data
def readline(self, size=-1):
if not self.__have_readline:
raise NotImplementedError("no readline method on wrapped object")
# line we're about to read might not be complete in the cache, so
# read another line first
pos = self.__pos
self.__cache.seek(0, 2)
data = self.wrapped.readline()
if not data:
self.read_complete = True
else:
self.__cache.write(data)
self.__cache.seek(pos)
data = self.__cache.readline()
if size != -1:
r = data[:size]
self.__pos = pos+size
else:
r = data
self.__pos = pos+len(data)
return r
def readlines(self, sizehint=-1):
pos = self.__pos
self.__cache.seek(0, 2)
self.__cache.write(self.wrapped.read())
self.read_complete = True
self.__cache.seek(pos)
data = self.__cache.readlines(sizehint)
self.__pos = self.__cache.tell()
return data
def __iter__(self): return self
def next(self):
line = self.readline()
if line == "": raise StopIteration
return line
xreadlines = __iter__
def __repr__(self):
return ("<%s at %s whose wrapped object = %r>" %
(self.__class__.__name__, hex(abs(id(self))), self.wrapped))
class response_seek_wrapper(seek_wrapper):
"""
Supports copying response objects and setting response body data.
"""
def __init__(self, wrapped):
seek_wrapper.__init__(self, wrapped)
self._headers = self.wrapped.info()
def __copy__(self):
cpy = seek_wrapper.__copy__(self)
# copy headers from delegate
cpy._headers = copy.copy(self.info())
return cpy
# Note that .info() and .geturl() (the only two urllib2 response methods
# that are not implemented by seek_wrapper) must be here explicitly rather
# than by seek_wrapper's __getattr__ delegation) so that the nasty
# dynamically-created HTTPError classes in get_seek_wrapper_class() get the
# wrapped object's implementation, and not HTTPError's.
def info(self):
return self._headers
def geturl(self):
return self.wrapped.geturl()
def set_data(self, data):
self.seek(0)
self.read()
self.close()
cache = self._seek_wrapper__cache = StringIO()
cache.write(data)
self.seek(0)
class eoffile:
# file-like object that always claims to be at end-of-file...
def read(self, size=-1): return ""
def readline(self, size=-1): return ""
def __iter__(self): return self
def next(self): return ""
def close(self): pass
class eofresponse(eoffile):
def __init__(self, url, headers, code, msg):
self._url = url
self._headers = headers
self.code = code
self.msg = msg
def geturl(self): return self._url
def info(self): return self._headers
class closeable_response:
"""Avoids unnecessarily clobbering urllib.addinfourl methods on .close().
Only supports responses returned by mechanize.HTTPHandler.
After .close(), the following methods are supported:
.read()
.readline()
.info()
.geturl()
.__iter__()
.next()
.close()
and the following attributes are supported:
.code
.msg
Also supports pickling (but the stdlib currently does something to prevent
it: http://python.org/sf/1144636).
"""
# presence of this attr indicates is useable after .close()
closeable_response = None
def __init__(self, fp, headers, url, code, msg):
self._set_fp(fp)
self._headers = headers
self._url = url
self.code = code
self.msg = msg
def _set_fp(self, fp):
self.fp = fp
self.read = self.fp.read
self.readline = self.fp.readline
if hasattr(self.fp, "readlines"): self.readlines = self.fp.readlines
if hasattr(self.fp, "fileno"):
self.fileno = self.fp.fileno
else:
self.fileno = lambda: None
self.__iter__ = self.fp.__iter__
self.next = self.fp.next
def __repr__(self):
return '<%s at %s whose fp = %r>' % (
self.__class__.__name__, hex(abs(id(self))), self.fp)
def info(self):
return self._headers
def geturl(self):
return self._url
def close(self):
wrapped = self.fp
wrapped.close()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
self._set_fp(new_wrapped)
def __getstate__(self):
# There are three obvious options here:
# 1. truncate
# 2. read to end
# 3. close socket, pickle state including read position, then open
# again on unpickle and use Range header
# XXXX um, 4. refuse to pickle unless .close()d. This is better,
# actually ("errors should never pass silently"). Pickling doesn't
# work anyway ATM, because of http://python.org/sf/1144636 so fix
# this later
# 2 breaks pickle protocol, because one expects the original object
# to be left unscathed by pickling. 3 is too complicated and
# surprising (and too much work ;-) to happen in a sane __getstate__.
# So we do 1.
state = self.__dict__.copy()
new_wrapped = eofresponse(
self._url, self._headers, self.code, self.msg)
state["wrapped"] = new_wrapped
return state
def test_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
return make_response(data, headers, url, code, msg)
def test_html_response(data='test data', headers=[],
url="http://example.com/", code=200, msg="OK"):
headers += [("Content-type", "text/html")]
return make_response(data, headers, url, code, msg)
def make_response(data, headers, url, code, msg):
"""Convenient factory for objects implementing response interface.
data: string containing response body data
headers: sequence of (name, value) pairs
url: URL of response
code: integer response code (e.g. 200)
msg: string response code message (e.g. "OK")
"""
mime_headers = make_headers(headers)
r = closeable_response(StringIO(data), mime_headers, url, code, msg)
return response_seek_wrapper(r)
def make_headers(headers):
"""
headers: sequence of (name, value) pairs
"""
hdr_text = []
for name_value in headers:
hdr_text.append("%s: %s" % name_value)
return mimetools.Message(StringIO("\n".join(hdr_text)))
# Rest of this module is especially horrible, but needed, at least until fork
# urllib2. Even then, may want to preseve urllib2 compatibility.
def get_seek_wrapper_class(response):
# in order to wrap response objects that are also exceptions, we must
# dynamically subclass the exception :-(((
if (isinstance(response, urllib2.HTTPError) and
not hasattr(response, "seek")):
if response.__class__.__module__ == "__builtin__":
exc_class_name = response.__class__.__name__
else:
exc_class_name = "%s.%s" % (
response.__class__.__module__, response.__class__.__name__)
class httperror_seek_wrapper(response_seek_wrapper, response.__class__):
# this only derives from HTTPError in order to be a subclass --
# the HTTPError behaviour comes from delegation
_exc_class_name = exc_class_name
def __init__(self, wrapped):
response_seek_wrapper.__init__(self, wrapped)
# be compatible with undocumented HTTPError attributes :-(
self.hdrs = wrapped.info()
self.filename = wrapped.geturl()
def __repr__(self):
return (
"<%s (%s instance) at %s "
"whose wrapped object = %r>" % (
self.__class__.__name__, self._exc_class_name,
hex(abs(id(self))), self.wrapped)
)
wrapper_class = httperror_seek_wrapper
else:
wrapper_class = response_seek_wrapper
return wrapper_class
def seek_wrapped_response(response):
"""Return a copy of response that supports seekable response interface.
Accepts responses from both mechanize and urllib2 handlers.
Copes with both oridinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
if not hasattr(response, "seek"):
wrapper_class = get_seek_wrapper_class(response)
response = wrapper_class(response)
assert hasattr(response, "get_data")
return response
def upgrade_response(response):
"""Return a copy of response that supports Browser response interface.
Browser response interface is that of "seekable responses"
(response_seek_wrapper), plus the requirement that responses must be
useable after .close() (closeable_response).
Accepts responses from both mechanize and urllib2 handlers.
Copes with both ordinary response instances and HTTPError instances (which
can't be simply wrapped due to the requirement of preserving the exception
base class).
"""
wrapper_class = get_seek_wrapper_class(response)
if hasattr(response, "closeable_response"):
if not hasattr(response, "seek"):
response = wrapper_class(response)
assert hasattr(response, "get_data")
return copy.copy(response)
# a urllib2 handler constructed the response, i.e. the response is an
# urllib.addinfourl or a urllib2.HTTPError, instead of a
# _Util.closeable_response as returned by e.g. mechanize.HTTPHandler
try:
code = response.code
except AttributeError:
code = None
try:
msg = response.msg
except AttributeError:
msg = None
# may have already-.read() data from .seek() cache
data = None
get_data = getattr(response, "get_data", None)
if get_data:
data = get_data()
response = closeable_response(
response.fp, response.info(), response.geturl(), code, msg)
response = wrapper_class(response)
if data:
response.set_data(data)
return response
diff --git a/mechanize/_testcase.py b/mechanize/_testcase.py
index c711432..a13cca3 100644
--- a/mechanize/_testcase.py
+++ b/mechanize/_testcase.py
@@ -1,32 +1,73 @@
import shutil
import tempfile
import unittest
-class TestCase(unittest.TestCase):
+class SetupStack(object):
- def setUp(self):
- super(TestCase, self).setUp()
+ def __init__(self):
self._on_teardown = []
+ def add_teardown(self, teardown):
+ self._on_teardown.append(teardown)
+
+ def tear_down(self):
+ for func in reversed(self._on_teardown):
+ func()
+
+
+class TearDownConvenience(object):
+
+ def __init__(self, setup_stack=None):
+ self._own_setup_stack = setup_stack is None
+ if setup_stack is None:
+ setup_stack = SetupStack()
+ self._setup_stack = setup_stack
+
+ # only call this convenience method if no setup_stack was supplied to c'tor
+ def tear_down(self):
+ assert self._own_setup_stack
+ self._setup_stack.tear_down()
+
+
+class TempDirMaker(TearDownConvenience):
+
def make_temp_dir(self):
temp_dir = tempfile.mkdtemp(prefix="tmp-%s-" % self.__class__.__name__)
def tear_down():
shutil.rmtree(temp_dir)
- self._on_teardown.append(tear_down)
+ self._setup_stack.add_teardown(tear_down)
return temp_dir
+
+class MonkeyPatcher(TearDownConvenience):
+
def monkey_patch(self, obj, name, value):
orig_value = getattr(obj, name)
setattr(obj, name, value)
def reverse_patch():
setattr(obj, name, orig_value)
- self._on_teardown.append(reverse_patch)
+ self._setup_stack.add_teardown(reverse_patch)
+
+
+class TestCase(unittest.TestCase):
+
+ def setUp(self):
+ self._setup_stack = SetupStack()
+
+ def tearDown(self):
+ self._setup_stack.tear_down()
+
+ def make_temp_dir(self, *args, **kwds):
+ return TempDirMaker(self._setup_stack).make_temp_dir(*args, **kwds)
+
+ def monkey_patch(self, *args, **kwds):
+ return MonkeyPatcher(self._setup_stack).monkey_patch(*args, **kwds)
def assert_contains(self, container, containee):
self.assertTrue(containee in container, "%r not in %r" %
(containee, container))
- def tearDown(self):
- for func in reversed(self._on_teardown):
- func()
+ def assert_less_than(self, got, expected):
+ self.assertTrue(got < expected, "%r >= %r" %
+ (got, expected))
diff --git a/test.py b/test.py
index 8e497ee..9ce0ad8 100755
--- a/test.py
+++ b/test.py
@@ -1,151 +1,152 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
+# "test_performance", # too slow, run from release script
]
import sys, os, logging, glob
if __name__ == "__main__":
# XXX
# temporary stop-gap to run doctests &c.
# should switch to nose or something
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
log = "-l" in sys.argv
if log:
sys.argv.remove("-l")
level = logging.DEBUG
# level = logging.INFO
# level = logging.WARNING
# level = logging.NOTSET
logger = logging.getLogger("mechanize")
logger.setLevel(level)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(level)
logger.addHandler(handler)
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
import testprogram
if run_coverage:
import coverage
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
pm_doctest_filename = os.path.join(
"test", "test_password_manager.special_doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
doctest.testfile(pm_doctest_filename, globs=globs)
try:
import robotparser
except ImportError:
pass
else:
doctest.testfile(os.path.join(
"test", "test_robotfileparser.special_doctest"))
# run .doctest files
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
_http, _rfc3986, _useragent
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
doctest.testmod(_useragent)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
test_runner = testprogram.CgitbTextTestRunner()
prog = testprogram.TestProgram(
MODULE_NAMES,
testRunner=test_runner,
localServerProcess=testprogram.TwistedServerProcess(),
)
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
diff --git a/test/test_performance.py b/test/test_performance.py
new file mode 100644
index 0000000..e9030c8
--- /dev/null
+++ b/test/test_performance.py
@@ -0,0 +1,104 @@
+import os
+import time
+import sys
+import unittest
+
+import mechanize
+from mechanize._testcase import TestCase, TempDirMaker
+from mechanize._rfc3986 import urljoin
+
+
+KB = 1024
+MB = 1024**2
+GB = 1024**3
+
+
+def time_it(operation):
+ t = time.time()
+ operation()
+ return time.time() - t
+
+def write_data(filename, nr_bytes):
+ block_size = 4096
+ block = "01234567" * (block_size // 8)
+ fh = open(filename, "w")
+ try:
+ for i in range(nr_bytes // block_size):
+ fh.write(block)
+ finally:
+ fh.close()
+
+def time_retrieve_local_file(temp_maker, size, retrieve_fn):
+ temp_dir = temp_maker.make_temp_dir()
+ filename = os.path.join(temp_dir, "data")
+ write_data(filename, size)
+ def operation():
+ retrieve_fn(urljoin("file://", filename),
+ os.path.join(temp_dir, "retrieved"))
+ return time_it(operation)
+
+
+class PerformanceTests(TestCase):
+
+ def test_retrieve_local_file(self):
+ def retrieve(url, filename):
+ br = mechanize.Browser()
+ br.retrieve(url, filename)
+ size = 100 * MB
+# size = 1 * KB
+ desired_rate = 2*MB # per second
+ desired_time = size / float(desired_rate)
+ fudge_factor = 2.
+ self.assert_less_than(
+ time_retrieve_local_file(self, size, retrieve),
+ desired_time * fudge_factor)
+
+
+def show_plot(rows):
+ import matplotlib.pyplot
+ figure = matplotlib.pyplot.figure()
+ axes = figure.add_subplot(111)
+ axes.plot([row[0] for row in rows], [row[1] for row in rows])
+ matplotlib.pyplot.show()
+
+
+def power_2_range(start, stop):
+ n = start
+ while n <= stop:
+ yield n
+ n *= 2
+
+
+def performance_plot():
+ def retrieve(url, filename):
+ br = mechanize.Browser()
+ br.retrieve(url, filename)
+
+# import urllib2
+# def retrieve(url, filename):
+# urllib2.urlopen(url).read()
+
+# from mechanize import _useragent
+# ua = _useragent.UserAgent()
+# ua.set_seekable_responses(True)
+# ua.set_handle_equiv(False)
+# def retrieve(url, filename):
+# ua.retrieve(url, filename)
+
+ rows = []
+ for size in power_2_range(256 * KB, 256 * MB):
+ temp_maker = TempDirMaker()
+ try:
+ elapsed = time_retrieve_local_file(temp_maker, size, retrieve)
+ finally:
+ temp_maker.tear_down()
+ rows.append((size//float(MB), elapsed))
+ show_plot(rows)
+
+
+if __name__ == "__main__":
+ args = sys.argv[1:]
+ if "--plot" in args:
+ performance_plot()
+ else:
+ unittest.main()
|
Almad/Mechanize
|
4a2eef42f09d35ae78aecc7c67c5d9e53b79c9d5
|
Oops, add missing modules
|
diff --git a/mechanize/_sockettimeout.py b/mechanize/_sockettimeout.py
new file mode 100644
index 0000000..c22b734
--- /dev/null
+++ b/mechanize/_sockettimeout.py
@@ -0,0 +1,6 @@
+import socket
+
+try:
+ _GLOBAL_DEFAULT_TIMEOUT = socket._GLOBAL_DEFAULT_TIMEOUT
+except AttributeError:
+ _GLOBAL_DEFAULT_TIMEOUT = object()
diff --git a/mechanize/_testcase.py b/mechanize/_testcase.py
new file mode 100644
index 0000000..c711432
--- /dev/null
+++ b/mechanize/_testcase.py
@@ -0,0 +1,32 @@
+import shutil
+import tempfile
+import unittest
+
+
+class TestCase(unittest.TestCase):
+
+ def setUp(self):
+ super(TestCase, self).setUp()
+ self._on_teardown = []
+
+ def make_temp_dir(self):
+ temp_dir = tempfile.mkdtemp(prefix="tmp-%s-" % self.__class__.__name__)
+ def tear_down():
+ shutil.rmtree(temp_dir)
+ self._on_teardown.append(tear_down)
+ return temp_dir
+
+ def monkey_patch(self, obj, name, value):
+ orig_value = getattr(obj, name)
+ setattr(obj, name, value)
+ def reverse_patch():
+ setattr(obj, name, orig_value)
+ self._on_teardown.append(reverse_patch)
+
+ def assert_contains(self, container, containee):
+ self.assertTrue(containee in container, "%r not in %r" %
+ (containee, container))
+
+ def tearDown(self):
+ for func in reversed(self._on_teardown):
+ func()
|
Almad/Mechanize
|
aad4c59db83ff7d52d7bcde0b269ad47e946a060
|
Revert upstream urllib2 fix for issue1401 (r60648). Not a bug (see http://bugs.python.org/msg76797).
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index 8306240..caeb82b 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -550,1025 +550,1024 @@ class DefaultCookiePolicy(CookiePolicy):
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host_lc(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
get_policy()
set_policy(policy)
cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def get_policy(self):
return self._policy
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
-
def cookies_for_request(self, request):
"""Return a list of cookies to be returned to server.
The returned list of cookie instances is sorted in the order they
should appear in the Cookie: header for return to the server.
See add_cookie_header.__doc__ for the interface required of the
request argument.
New in version 0.1.10
"""
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
# add cookies in order of most specific (i.e. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
# this method still exists (alongside cookies_for_request) because it
# is part of an implied protected interface for subclasses of cookiejar
# XXX document that implied interface, or provide another way of
# implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
The $Version attribute is also added when appropriate (currently only
once per request).
>>> jar = CookieJar()
>>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
... "example.com", False, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([ns_cookie])
['foo="bar"']
>>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
... ".example.com", True, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([rfc2965_cookie])
['$Version=1', 'foo=bar', '$Domain="example.com"']
"""
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, is_unverifiable, get_type,
has_header, get_header, header_items and add_unredirected_header, as
documented by urllib2, and the port attribute (the port number).
Actually, RequestUpgradeProcessor will automatically upgrade your
Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host_lc(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def _make_cookies(self, response, request):
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
See extract_cookies.__doc__ for the interface required of the
response and request arguments.
"""
self._policy._now = self._now = int(time.time())
return [cookie for cookie in self._make_cookies(response, request)
if cookie.expires is None or not cookie.expires <= self._now]
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_type, get_host, and is_unverifiable, as
documented by urllib2, and the port attribute (the port number). The
request is used to set default values for cookie-attributes as well as
for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self._make_cookies(response, request):
if cookie.expires is not None and cookie.expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(cookie.domain, cookie.path, cookie.name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
cookie.domain, cookie.path, cookie.name)
elif self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
diff --git a/mechanize/_http.py b/mechanize/_http.py
index 3b3ad42..1b80e2b 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -1,615 +1,612 @@
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import time, htmlentitydefs, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _clientcookie import CookieJar
from _headersutil import is_html
from _html import unescape, unescape_charref
from _request import Request
from _response import closeable_response, response_seek_wrapper
import _rfc3986
import _sockettimeout
debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
try:
socket._fileobject("fake socket", close=True)
except TypeError:
# python <= 2.4
create_readline_wrapper = socket._fileobject
else:
def create_readline_wrapper(fh):
return socket._fileobject(fh, close=True)
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
- new_headers = dict((k, v) for k, v in req.headers.items()
- if k.lower() not in
- ["content-length", "content-type"])
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
- headers=new_headers,
+ headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def set_timeout(self, timeout):
self._timeout = timeout
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False,
timeout=self._timeout)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
debug_robots("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
debug_robots("disallow all")
elif status >= 400:
self.allow_all = True
debug_robots("allow all")
elif status == 200 and lines:
debug_robots("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.set_timeout(request.timeout)
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
diff --git a/test/test_urllib2.py b/test/test_urllib2.py
index dfe808c..86101f9 100644
--- a/test/test_urllib2.py
+++ b/test/test_urllib2.py
@@ -472,867 +472,859 @@ class HandlerTests(unittest.TestCase):
]:
request = Request(url)
r = h.ftp_open(request)
# ftp authentication not yet implemented by FTPHandler
self.assert_(h.user == h.passwd == "")
self.assert_(h.host == socket.gethostbyname(host))
self.assert_(h.port == port)
self.assert_(h.dirs == dirs)
if sys.version_info >= (2, 6):
self.assertEquals(h.timeout, timeout)
self.assert_(h.ftpwrapper.filename == filename)
self.assert_(h.ftpwrapper.filetype == type_)
headers = r.info()
self.assert_(headers["Content-type"] == mimetype)
self.assert_(int(headers["Content-length"]) == len(data))
def test_file(self):
import time, rfc822, socket
h = mechanize.FileHandler()
o = h.parent = MockOpener()
#TESTFN = test_support.TESTFN
TESTFN = "test.txt"
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
try:
fqdn = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
fqdn = "localhost"
for url in [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
"file://%s%s" % (fqdn, urlpath)
]:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
newurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
for url in [
"file://localhost:80%s" % urlpath,
# XXXX bug: these fail with socket.gaierror, should be URLError
## "file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
## os.getcwd(), TESTFN),
## "file://somerandomhost.ontheinternet.com%s/%s" %
## (os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(mechanize.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = mechanize.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (mechanize.URLError, OSError):
self.assert_(not ftp)
else:
self.assert_(o.req is req)
self.assertEqual(req.type, "ftp")
def test_http(self):
h = AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assert_(r.geturl() == url)
self.assert_(http.host == "example.com")
self.assert_(http.level == 0)
self.assert_(http.method == method)
self.assert_(http.selector == "/")
http.req_headers.sort()
self.assert_(http.req_headers == [
("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assert_(http.data == data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(mechanize.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assert_("Content-length" not in req.unredirected_hdrs)
self.assert_("Content-type" not in req.unredirected_hdrs)
else: # POST
self.assert_(req.unredirected_hdrs["Content-length"] == "0")
self.assert_(req.unredirected_hdrs["Content-type"] ==
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assert_(req.unredirected_hdrs["Host"] == "example.com")
self.assert_(req.unredirected_hdrs["Spam"] == "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assert_(req.unredirected_hdrs["Content-length"] == "foo")
self.assert_(req.unredirected_hdrs["Content-type"] == "bar")
self.assert_(req.unredirected_hdrs["Host"] == "baz")
self.assert_(req.unredirected_hdrs["Spam"] == "foo")
def test_request_upgrade(self):
import urllib2
new_req_class = hasattr(urllib2.Request, "has_header")
h = HTTPRequestUpgradeProcessor()
o = h.parent = MockOpener()
# urllib2.Request gets upgraded, unless it's the new Request
# class from 2.4
req = urllib2.Request("http://example.com/")
newreq = h.http_request(req)
if new_req_class:
self.assert_(newreq is req)
else:
self.assert_(newreq is not req)
if new_req_class:
self.assert_(newreq.__class__ is not Request)
else:
self.assert_(newreq.__class__ is Request)
# ClientCookie._urllib2_support.Request doesn't get upgraded
req = Request("http://example.com/")
newreq = h.http_request(req)
self.assert_(newreq is req)
self.assert_(newreq.__class__ is Request)
def test_referer(self):
h = HTTPRefererProcessor()
o = h.parent = MockOpener()
# normal case
url = "http://example.com/"
req = Request(url)
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(h.referer == url)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == url)
# don't clobber existing Referer
ref = "http://set.by.user.com/"
req.add_unredirected_header("Referer", ref)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == ref)
def test_errors(self):
from mechanize import _response
h = HTTPErrorProcessor()
o = h.parent = MockOpener()
req = Request("http://example.com")
# 200 OK is passed through
r = _response.test_response()
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = _response.test_response(code=201, msg="Created")
self.assert_(h.http_response(req, r) is None)
self.assert_(o.proto == "http") # o.error called
self.assert_(o.args == (req, r, 201, "Created", AlwaysEqual()))
def test_raise_http_errors(self):
# HTTPDefaultErrorHandler should raise HTTPError if no error handler
# handled the error response
from mechanize import _response
h = mechanize.HTTPDefaultErrorHandler()
url = "http://example.com"; code = 500; msg = "Error"
request = mechanize.Request(url)
response = _response.test_response(url=url, code=code, msg=msg)
# case 1. it's not an HTTPError
try:
h.http_error_default(
request, response, code, msg, response.info())
except mechanize.HTTPError, exc:
self.assert_(exc is not response)
self.assert_(exc.fp is response)
else:
self.assert_(False)
# case 2. response object is already an HTTPError, so just re-raise it
error = mechanize.HTTPError(
url, code, msg, "fake headers", response)
try:
h.http_error_default(
request, error, code, msg, error.info())
except mechanize.HTTPError, exc:
self.assert_(exc is error)
else:
self.assert_(False)
def test_robots(self):
# XXX useragent
try:
import robotparser
except ImportError:
return # skip test
else:
from mechanize import HTTPRobotRulesProcessor
opener = OpenerDirector()
rfpc = MockRobotFileParserClass()
h = HTTPRobotRulesProcessor(rfpc)
opener.add_handler(h)
url = "http://example.com:80/foo/bar.html"
req = Request(url)
# first time: initialise and set up robots.txt parser before checking
# whether OK to fetch URL
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "http://example.com:80/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# second time: just use existing parser
rfpc.clear()
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# different URL on same server: same again
rfpc.clear()
url = "http://example.com:80/blah.html"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# disallowed URL
rfpc.clear()
rfpc._can_fetch = False
url = "http://example.com:80/rhubarb.html"
req = Request(url)
try:
h.http_request(req)
except mechanize.HTTPError, e:
self.assert_(e.request == req)
self.assert_(e.code == 403)
# new host: reload robots.txt (even though the host and port are
# unchanged, we treat this as a new host because
# "example.com" != "example.com:80")
rfpc.clear()
rfpc._can_fetch = True
url = "http://example.com/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "http://example.com/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# https url -> should fetch robots.txt from https url too
rfpc.clear()
url = "https://example.org/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "https://example.org/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# non-HTTP URL -> ignore robots.txt
rfpc.clear()
url = "ftp://example.com/"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [])
def test_redirected_robots_txt(self):
# redirected robots.txt fetch shouldn't result in another attempted
# robots.txt fetch to check the redirection is allowed!
import mechanize
from mechanize import build_opener, HTTPHandler, \
HTTPDefaultErrorHandler, HTTPRedirectHandler, \
HTTPRobotRulesProcessor
class MockHTTPHandler(mechanize.BaseHandler):
def __init__(self):
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if req.get_full_url() == "http://example.com/robots.txt":
hdr = "Location: http://example.com/en/robots.txt\r\n\r\n"
msg = mimetools.Message(StringIO(hdr))
return self.parent.error(
"http", req, test_response(), 302, "Blah", msg)
else:
return test_response("Allow: *", [], req.get_full_url())
hh = MockHTTPHandler()
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
rh = HTTPRobotRulesProcessor()
o = build_test_opener(hh, hdeh, hrh, rh)
o.open("http://example.com/")
self.assertEqual([req.get_full_url() for req in hh.requests],
["http://example.com/robots.txt",
"http://example.com/en/robots.txt",
"http://example.com/",
])
def test_cookies(self):
cj = MockCookieJar()
h = HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assert_(cj.ach_req is req is newreq)
self.assert_(req.origin_req_host == "example.com")
self.assert_(cj.ach_u == False)
newr = h.http_response(req, r)
self.assert_(cj.ec_req is req)
self.assert_(cj.ec_r is r is newr)
self.assert_(cj.ec_u == False)
def test_seekable(self):
hide_deprecations()
try:
h = SeekableProcessor()
finally:
reset_deprecations()
o = h.parent = MockOpener()
req = mechanize.Request("http://example.com/")
class MockUnseekableResponse:
code = 200
msg = "OK"
def info(self): pass
def geturl(self): return ""
r = MockUnseekableResponse()
newr = h.any_response(req, r)
self.assert_(not hasattr(r, "seek"))
self.assert_(hasattr(newr, "seek"))
def test_http_equiv(self):
from mechanize import _response
h = HTTPEquivProcessor()
o = h.parent = MockOpener()
data = ('<html><head>'
'<meta http-equiv="Refresh" content="spam&eggs">'
'</head></html>'
)
headers = [("Foo", "Bar"),
("Content-type", "text/html"),
("Refresh", "blah"),
]
url = "http://example.com/"
req = Request(url)
r = _response.make_response(data, headers, url, 200, "OK")
newr = h.http_response(req, r)
new_headers = newr.info()
self.assertEqual(new_headers["Foo"], "Bar")
self.assertEqual(new_headers["Refresh"], "spam&eggs")
self.assertEqual(new_headers.getheaders("Refresh"),
["blah", "spam&eggs"])
def test_refresh(self):
# XXX test processor constructor optional args
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
for val, valid in [
('0; url="http://example.com/foo/"', True),
("2", True),
# in the past, this failed with UnboundLocalError
('0; "http://example.com/foo/"', False),
]:
o = h.parent = MockOpener()
req = Request("http://example.com/")
headers = http_message({"refresh": val})
r = MockResponse(200, "OK", headers, "", "http://example.com/")
newr = h.http_response(req, r)
if valid:
self.assertEqual(o.proto, "http")
self.assertEqual(o.args, (req, r, "refresh", "OK", headers))
def test_refresh_honor_time(self):
class SleepTester:
def __init__(self, test, seconds):
self._test = test
if seconds is 0:
seconds = None # don't expect a sleep for 0 seconds
self._expected = seconds
self._got = None
def sleep(self, seconds):
self._got = seconds
def verify(self):
self._test.assertEqual(self._expected, self._got)
class Opener:
called = False
def error(self, *args, **kwds):
self.called = True
def test(rp, header, refresh_after):
expect_refresh = refresh_after is not None
opener = Opener()
rp.parent = opener
st = SleepTester(self, refresh_after)
rp._sleep = st.sleep
rp.http_response(Request("http://example.com"),
test_response(headers=[("Refresh", header)]),
)
self.assertEqual(expect_refresh, opener.called)
st.verify()
# by default, only zero-time refreshes are honoured
test(HTTPRefreshProcessor(), "0", 0)
test(HTTPRefreshProcessor(), "2", None)
# if requested, more than zero seconds are allowed
test(HTTPRefreshProcessor(max_time=None), "2", 2)
test(HTTPRefreshProcessor(max_time=30), "2", 2)
# no sleep if we don't "honor_time"
test(HTTPRefreshProcessor(max_time=30, honor_time=False), "2", 0)
# request for too-long wait before refreshing --> no refresh occurs
test(HTTPRefreshProcessor(max_time=30), "60", None)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307, "refresh":
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
- if data is not None:
- req.add_header("Content-length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
req.origin_req_host = "example.com" # XXX
try:
method(req, MockFile(), code, "Blah",
http_message({"location": to_url}))
except mechanize.HTTPError:
# 307 in response to POST requires user OK
self.assert_(code == 307 and data is not None)
self.assert_(o.req.get_full_url() == to_url)
try:
self.assert_(o.req.get_method() == "GET")
except AttributeError:
self.assert_(not o.req.has_data())
-
- # now it's a GET, there should not be headers regarding content
- # (possibly dragged from before being a POST)
- self.assertFalse(o.req.has_header("Content-length"))
- self.assertFalse(o.req.has_header("Content-type"))
-
self.assert_(o.req.headers["Nonsense"] == "viking=withhold")
self.assert_(not o.req.headers.has_key("Spam"))
self.assert_(not o.req.unredirected_hdrs.has_key("Spam"))
# loop detection
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
http_message({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except mechanize.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assert_(count == HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except mechanize.HTTPError:
self.assert_(count == HTTPRedirectHandler.max_redirections)
def test_redirect_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
h.http_error_302(req, test_html_response(), 302, "Blah",
http_message({"location": bad_to_url}),
)
self.assertEqual(o.req.get_full_url(), good_to_url)
def test_refresh_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = test_html_response(
headers=[("refresh", '0; url="%s"' % bad_to_url)])
newr = h.http_response(req, r)
headers = o.args[-1]
self.assertEqual(headers["Location"], good_to_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
import mechanize
from mechanize import CookieJar, build_opener, HTTPHandler, \
HTTPCookieProcessor, HTTPError, HTTPDefaultErrorHandler, \
HTTPRedirectHandler
from test_cookies import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
cp = HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assert_(not hh.req.has_header("Cookie"))
def test_proxy(self):
o = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o._maybe_reindex_handlers()
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_basic_auth(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = mechanize.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = mechanize.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/1479302, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(mechanize.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
mechanize.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(mechanize.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
mechanize.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(digest_handler)
opener.add_handler(basic_handler)
opener.add_handler(http_handler)
opener._maybe_reindex_handlers()
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64, httplib
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.failIf(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.failIf(http_handler.requests[0].has_header(auth_header))
class HeadParserTests(unittest.TestCase):
def test(self):
# XXX XHTML
from mechanize import HeadParser
htmls = [
("""<meta http-equiv="refresh" content="1; http://example.com/">
""",
[("refresh", "1; http://example.com/")]
),
("""
<html><head>
<meta http-equiv="refresh" content="1; http://example.com/">
<meta name="spam" content="eggs">
<meta http-equiv="foo" content="bar">
<p> <!-- p is not allowed in head, so parsing should stop here-->
<meta http-equiv="moo" content="cow">
</html>
""",
[("refresh", "1; http://example.com/"), ("foo", "bar")]),
("""<meta http-equiv="refresh">
""",
[])
]
for html, result in htmls:
self.assertEqual(parse_head(StringIO.StringIO(html), HeadParser()), result)
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(mechanize.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, test_response(), self.code, "Blah", msg)
else:
self.req = req
return test_response("", [], req.get_full_url())
class MyHTTPHandler(HTTPHandler): pass
class FooHandler(mechanize.BaseHandler):
def foo_open(self): pass
class BarHandler(mechanize.BaseHandler):
def bar_open(self): pass
class A:
def a(self): pass
class B(A):
def a(self): pass
def b(self): pass
class C(A):
def c(self): pass
class D(C, B):
def a(self): pass
def d(self): pass
class FunctionTests(unittest.TestCase):
def test_build_opener(self):
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler)
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler())
self.opener_has_handler(o, HTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assert_(False)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
|
Almad/Mechanize
|
ebc5b80471d72a607768a3bec55c03dd306ed566
|
Port upstream urllib2 fix for issue1401 (r60648)
|
diff --git a/mechanize/_http.py b/mechanize/_http.py
index 1b80e2b..3b3ad42 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -1,612 +1,615 @@
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import time, htmlentitydefs, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _clientcookie import CookieJar
from _headersutil import is_html
from _html import unescape, unescape_charref
from _request import Request
from _response import closeable_response, response_seek_wrapper
import _rfc3986
import _sockettimeout
debug = logging.getLogger("mechanize").debug
debug_robots = logging.getLogger("mechanize.robots").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
try:
socket._fileobject("fake socket", close=True)
except TypeError:
# python <= 2.4
create_readline_wrapper = socket._fileobject
else:
def create_readline_wrapper(fh):
return socket._fileobject(fh, close=True)
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
+ new_headers = dict((k, v) for k, v in req.headers.items()
+ if k.lower() not in
+ ["content-length", "content-type"])
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
- headers=req.headers,
+ headers=new_headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
self._timeout = _sockettimeout._GLOBAL_DEFAULT_TIMEOUT
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def set_timeout(self, timeout):
self._timeout = timeout
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False,
timeout=self._timeout)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
debug_robots("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
debug_robots("disallow all")
elif status >= 400:
self.allow_all = True
debug_robots("allow all")
elif status == 200 and lines:
debug_robots("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.set_timeout(request.timeout)
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
diff --git a/test/test_urllib2.py b/test/test_urllib2.py
index 86101f9..dfe808c 100644
--- a/test/test_urllib2.py
+++ b/test/test_urllib2.py
@@ -472,859 +472,867 @@ class HandlerTests(unittest.TestCase):
]:
request = Request(url)
r = h.ftp_open(request)
# ftp authentication not yet implemented by FTPHandler
self.assert_(h.user == h.passwd == "")
self.assert_(h.host == socket.gethostbyname(host))
self.assert_(h.port == port)
self.assert_(h.dirs == dirs)
if sys.version_info >= (2, 6):
self.assertEquals(h.timeout, timeout)
self.assert_(h.ftpwrapper.filename == filename)
self.assert_(h.ftpwrapper.filetype == type_)
headers = r.info()
self.assert_(headers["Content-type"] == mimetype)
self.assert_(int(headers["Content-length"]) == len(data))
def test_file(self):
import time, rfc822, socket
h = mechanize.FileHandler()
o = h.parent = MockOpener()
#TESTFN = test_support.TESTFN
TESTFN = "test.txt"
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
try:
fqdn = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
fqdn = "localhost"
for url in [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
"file://%s%s" % (fqdn, urlpath)
]:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
newurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
for url in [
"file://localhost:80%s" % urlpath,
# XXXX bug: these fail with socket.gaierror, should be URLError
## "file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
## os.getcwd(), TESTFN),
## "file://somerandomhost.ontheinternet.com%s/%s" %
## (os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(mechanize.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = mechanize.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (mechanize.URLError, OSError):
self.assert_(not ftp)
else:
self.assert_(o.req is req)
self.assertEqual(req.type, "ftp")
def test_http(self):
h = AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assert_(r.geturl() == url)
self.assert_(http.host == "example.com")
self.assert_(http.level == 0)
self.assert_(http.method == method)
self.assert_(http.selector == "/")
http.req_headers.sort()
self.assert_(http.req_headers == [
("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assert_(http.data == data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(mechanize.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assert_("Content-length" not in req.unredirected_hdrs)
self.assert_("Content-type" not in req.unredirected_hdrs)
else: # POST
self.assert_(req.unredirected_hdrs["Content-length"] == "0")
self.assert_(req.unredirected_hdrs["Content-type"] ==
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assert_(req.unredirected_hdrs["Host"] == "example.com")
self.assert_(req.unredirected_hdrs["Spam"] == "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assert_(req.unredirected_hdrs["Content-length"] == "foo")
self.assert_(req.unredirected_hdrs["Content-type"] == "bar")
self.assert_(req.unredirected_hdrs["Host"] == "baz")
self.assert_(req.unredirected_hdrs["Spam"] == "foo")
def test_request_upgrade(self):
import urllib2
new_req_class = hasattr(urllib2.Request, "has_header")
h = HTTPRequestUpgradeProcessor()
o = h.parent = MockOpener()
# urllib2.Request gets upgraded, unless it's the new Request
# class from 2.4
req = urllib2.Request("http://example.com/")
newreq = h.http_request(req)
if new_req_class:
self.assert_(newreq is req)
else:
self.assert_(newreq is not req)
if new_req_class:
self.assert_(newreq.__class__ is not Request)
else:
self.assert_(newreq.__class__ is Request)
# ClientCookie._urllib2_support.Request doesn't get upgraded
req = Request("http://example.com/")
newreq = h.http_request(req)
self.assert_(newreq is req)
self.assert_(newreq.__class__ is Request)
def test_referer(self):
h = HTTPRefererProcessor()
o = h.parent = MockOpener()
# normal case
url = "http://example.com/"
req = Request(url)
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(h.referer == url)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == url)
# don't clobber existing Referer
ref = "http://set.by.user.com/"
req.add_unredirected_header("Referer", ref)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == ref)
def test_errors(self):
from mechanize import _response
h = HTTPErrorProcessor()
o = h.parent = MockOpener()
req = Request("http://example.com")
# 200 OK is passed through
r = _response.test_response()
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = _response.test_response(code=201, msg="Created")
self.assert_(h.http_response(req, r) is None)
self.assert_(o.proto == "http") # o.error called
self.assert_(o.args == (req, r, 201, "Created", AlwaysEqual()))
def test_raise_http_errors(self):
# HTTPDefaultErrorHandler should raise HTTPError if no error handler
# handled the error response
from mechanize import _response
h = mechanize.HTTPDefaultErrorHandler()
url = "http://example.com"; code = 500; msg = "Error"
request = mechanize.Request(url)
response = _response.test_response(url=url, code=code, msg=msg)
# case 1. it's not an HTTPError
try:
h.http_error_default(
request, response, code, msg, response.info())
except mechanize.HTTPError, exc:
self.assert_(exc is not response)
self.assert_(exc.fp is response)
else:
self.assert_(False)
# case 2. response object is already an HTTPError, so just re-raise it
error = mechanize.HTTPError(
url, code, msg, "fake headers", response)
try:
h.http_error_default(
request, error, code, msg, error.info())
except mechanize.HTTPError, exc:
self.assert_(exc is error)
else:
self.assert_(False)
def test_robots(self):
# XXX useragent
try:
import robotparser
except ImportError:
return # skip test
else:
from mechanize import HTTPRobotRulesProcessor
opener = OpenerDirector()
rfpc = MockRobotFileParserClass()
h = HTTPRobotRulesProcessor(rfpc)
opener.add_handler(h)
url = "http://example.com:80/foo/bar.html"
req = Request(url)
# first time: initialise and set up robots.txt parser before checking
# whether OK to fetch URL
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "http://example.com:80/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# second time: just use existing parser
rfpc.clear()
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# different URL on same server: same again
rfpc.clear()
url = "http://example.com:80/blah.html"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# disallowed URL
rfpc.clear()
rfpc._can_fetch = False
url = "http://example.com:80/rhubarb.html"
req = Request(url)
try:
h.http_request(req)
except mechanize.HTTPError, e:
self.assert_(e.request == req)
self.assert_(e.code == 403)
# new host: reload robots.txt (even though the host and port are
# unchanged, we treat this as a new host because
# "example.com" != "example.com:80")
rfpc.clear()
rfpc._can_fetch = True
url = "http://example.com/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "http://example.com/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# https url -> should fetch robots.txt from https url too
rfpc.clear()
url = "https://example.org/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assertEquals(rfpc.calls, [
"__call__",
("set_opener", opener),
("set_url", "https://example.org/robots.txt"),
("set_timeout", _sockettimeout._GLOBAL_DEFAULT_TIMEOUT),
"read",
("can_fetch", "", url),
])
# non-HTTP URL -> ignore robots.txt
rfpc.clear()
url = "ftp://example.com/"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [])
def test_redirected_robots_txt(self):
# redirected robots.txt fetch shouldn't result in another attempted
# robots.txt fetch to check the redirection is allowed!
import mechanize
from mechanize import build_opener, HTTPHandler, \
HTTPDefaultErrorHandler, HTTPRedirectHandler, \
HTTPRobotRulesProcessor
class MockHTTPHandler(mechanize.BaseHandler):
def __init__(self):
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if req.get_full_url() == "http://example.com/robots.txt":
hdr = "Location: http://example.com/en/robots.txt\r\n\r\n"
msg = mimetools.Message(StringIO(hdr))
return self.parent.error(
"http", req, test_response(), 302, "Blah", msg)
else:
return test_response("Allow: *", [], req.get_full_url())
hh = MockHTTPHandler()
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
rh = HTTPRobotRulesProcessor()
o = build_test_opener(hh, hdeh, hrh, rh)
o.open("http://example.com/")
self.assertEqual([req.get_full_url() for req in hh.requests],
["http://example.com/robots.txt",
"http://example.com/en/robots.txt",
"http://example.com/",
])
def test_cookies(self):
cj = MockCookieJar()
h = HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assert_(cj.ach_req is req is newreq)
self.assert_(req.origin_req_host == "example.com")
self.assert_(cj.ach_u == False)
newr = h.http_response(req, r)
self.assert_(cj.ec_req is req)
self.assert_(cj.ec_r is r is newr)
self.assert_(cj.ec_u == False)
def test_seekable(self):
hide_deprecations()
try:
h = SeekableProcessor()
finally:
reset_deprecations()
o = h.parent = MockOpener()
req = mechanize.Request("http://example.com/")
class MockUnseekableResponse:
code = 200
msg = "OK"
def info(self): pass
def geturl(self): return ""
r = MockUnseekableResponse()
newr = h.any_response(req, r)
self.assert_(not hasattr(r, "seek"))
self.assert_(hasattr(newr, "seek"))
def test_http_equiv(self):
from mechanize import _response
h = HTTPEquivProcessor()
o = h.parent = MockOpener()
data = ('<html><head>'
'<meta http-equiv="Refresh" content="spam&eggs">'
'</head></html>'
)
headers = [("Foo", "Bar"),
("Content-type", "text/html"),
("Refresh", "blah"),
]
url = "http://example.com/"
req = Request(url)
r = _response.make_response(data, headers, url, 200, "OK")
newr = h.http_response(req, r)
new_headers = newr.info()
self.assertEqual(new_headers["Foo"], "Bar")
self.assertEqual(new_headers["Refresh"], "spam&eggs")
self.assertEqual(new_headers.getheaders("Refresh"),
["blah", "spam&eggs"])
def test_refresh(self):
# XXX test processor constructor optional args
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
for val, valid in [
('0; url="http://example.com/foo/"', True),
("2", True),
# in the past, this failed with UnboundLocalError
('0; "http://example.com/foo/"', False),
]:
o = h.parent = MockOpener()
req = Request("http://example.com/")
headers = http_message({"refresh": val})
r = MockResponse(200, "OK", headers, "", "http://example.com/")
newr = h.http_response(req, r)
if valid:
self.assertEqual(o.proto, "http")
self.assertEqual(o.args, (req, r, "refresh", "OK", headers))
def test_refresh_honor_time(self):
class SleepTester:
def __init__(self, test, seconds):
self._test = test
if seconds is 0:
seconds = None # don't expect a sleep for 0 seconds
self._expected = seconds
self._got = None
def sleep(self, seconds):
self._got = seconds
def verify(self):
self._test.assertEqual(self._expected, self._got)
class Opener:
called = False
def error(self, *args, **kwds):
self.called = True
def test(rp, header, refresh_after):
expect_refresh = refresh_after is not None
opener = Opener()
rp.parent = opener
st = SleepTester(self, refresh_after)
rp._sleep = st.sleep
rp.http_response(Request("http://example.com"),
test_response(headers=[("Refresh", header)]),
)
self.assertEqual(expect_refresh, opener.called)
st.verify()
# by default, only zero-time refreshes are honoured
test(HTTPRefreshProcessor(), "0", 0)
test(HTTPRefreshProcessor(), "2", None)
# if requested, more than zero seconds are allowed
test(HTTPRefreshProcessor(max_time=None), "2", 2)
test(HTTPRefreshProcessor(max_time=30), "2", 2)
# no sleep if we don't "honor_time"
test(HTTPRefreshProcessor(max_time=30, honor_time=False), "2", 0)
# request for too-long wait before refreshing --> no refresh occurs
test(HTTPRefreshProcessor(max_time=30), "60", None)
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307, "refresh":
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
+ if data is not None:
+ req.add_header("Content-length", str(len(data)))
req.add_unredirected_header("Spam", "spam")
req.origin_req_host = "example.com" # XXX
try:
method(req, MockFile(), code, "Blah",
http_message({"location": to_url}))
except mechanize.HTTPError:
# 307 in response to POST requires user OK
self.assert_(code == 307 and data is not None)
self.assert_(o.req.get_full_url() == to_url)
try:
self.assert_(o.req.get_method() == "GET")
except AttributeError:
self.assert_(not o.req.has_data())
+
+ # now it's a GET, there should not be headers regarding content
+ # (possibly dragged from before being a POST)
+ self.assertFalse(o.req.has_header("Content-length"))
+ self.assertFalse(o.req.has_header("Content-type"))
+
self.assert_(o.req.headers["Nonsense"] == "viking=withhold")
self.assert_(not o.req.headers.has_key("Spam"))
self.assert_(not o.req.unredirected_hdrs.has_key("Spam"))
# loop detection
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
http_message({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except mechanize.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assert_(count == HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except mechanize.HTTPError:
self.assert_(count == HTTPRedirectHandler.max_redirections)
def test_redirect_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
h.http_error_302(req, test_html_response(), 302, "Blah",
http_message({"location": bad_to_url}),
)
self.assertEqual(o.req.get_full_url(), good_to_url)
def test_refresh_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = test_html_response(
headers=[("refresh", '0; url="%s"' % bad_to_url)])
newr = h.http_response(req, r)
headers = o.args[-1]
self.assertEqual(headers["Location"], good_to_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
import mechanize
from mechanize import CookieJar, build_opener, HTTPHandler, \
HTTPCookieProcessor, HTTPError, HTTPDefaultErrorHandler, \
HTTPRedirectHandler
from test_cookies import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
cp = HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assert_(not hh.req.has_header("Cookie"))
def test_proxy(self):
o = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o._maybe_reindex_handlers()
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_basic_auth(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = mechanize.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = mechanize.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/1479302, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(mechanize.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
mechanize.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(mechanize.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
mechanize.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(digest_handler)
opener.add_handler(basic_handler)
opener.add_handler(http_handler)
opener._maybe_reindex_handlers()
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64, httplib
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.failIf(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.failIf(http_handler.requests[0].has_header(auth_header))
class HeadParserTests(unittest.TestCase):
def test(self):
# XXX XHTML
from mechanize import HeadParser
htmls = [
("""<meta http-equiv="refresh" content="1; http://example.com/">
""",
[("refresh", "1; http://example.com/")]
),
("""
<html><head>
<meta http-equiv="refresh" content="1; http://example.com/">
<meta name="spam" content="eggs">
<meta http-equiv="foo" content="bar">
<p> <!-- p is not allowed in head, so parsing should stop here-->
<meta http-equiv="moo" content="cow">
</html>
""",
[("refresh", "1; http://example.com/"), ("foo", "bar")]),
("""<meta http-equiv="refresh">
""",
[])
]
for html, result in htmls:
self.assertEqual(parse_head(StringIO.StringIO(html), HeadParser()), result)
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(mechanize.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, test_response(), self.code, "Blah", msg)
else:
self.req = req
return test_response("", [], req.get_full_url())
class MyHTTPHandler(HTTPHandler): pass
class FooHandler(mechanize.BaseHandler):
def foo_open(self): pass
class BarHandler(mechanize.BaseHandler):
def bar_open(self): pass
class A:
def a(self): pass
class B(A):
def a(self): pass
def b(self): pass
class C(A):
def c(self): pass
class D(C, B):
def a(self): pass
def d(self): pass
class FunctionTests(unittest.TestCase):
def test_build_opener(self):
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler)
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler())
self.opener_has_handler(o, HTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assert_(False)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
|
Almad/Mechanize
|
38b4a3c2a3ea31051dde0562247d60c40c672510
|
Make functional_tests -l exit if local test server exits unexpectedly (use module subprocess instead of os.spawnv)
|
diff --git a/test-tools/testprogram.py b/test-tools/testprogram.py
index fa12ff8..02de3ee 100644
--- a/test-tools/testprogram.py
+++ b/test-tools/testprogram.py
@@ -1,314 +1,330 @@
"""Local server and cgitb support."""
import cgitb
#cgitb.enable(format="text")
-import sys, os, traceback, logging, glob, time
+import glob
+import logging
+import os
+import subprocess
+import sys
+import time
+import traceback
from unittest import defaultTestLoader, TextTestRunner, TestSuite, TestCase, \
_TextTestResult
+class ServerStartupError(Exception):
+
+ pass
+
+
class ServerProcess:
def __init__(self, filename, name=None):
if filename is None:
raise ValueError('filename arg must be a string')
if name is None:
name = filename
self.name = os.path.basename(name)
self.port = None
self.report_hook = lambda msg: None
self._filename = filename
+ self._args = None
+ self._process = None
def _get_args(self):
"""Return list of command line arguments.
Override me.
"""
return []
def start(self):
- self.report_hook("starting (%s)" % (
- [sys.executable, self._filename]+self._get_args()))
- self._pid = os.spawnv(
- os.P_NOWAIT,
- sys.executable,
- [sys.executable, self._filename]+self._get_args())
+ self._args = [sys.executable, self._filename]+self._get_args()
+ self.report_hook("starting (%s)" % (self._args,))
+ self._process = subprocess.Popen(self._args)
self.report_hook("waiting for startup")
self._wait_for_startup()
self.report_hook("running")
def _wait_for_startup(self):
import socket
def connect():
+ self._process.poll()
+ if self._process.returncode is not None:
+ message = ("server exited on startup with status %d: %r" %
+ (self._process.returncode, self._args))
+ raise ServerStartupError(message)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(1.0)
try:
sock.connect(('127.0.0.1', self.port))
finally:
sock.close()
backoff(connect, (socket.error,))
def stop(self):
"""Kill process (forcefully if necessary)."""
+ pid = self._process.pid
if os.name == 'nt':
- kill_windows(self._pid, self.report_hook)
+ kill_windows(pid, self.report_hook)
else:
- kill_posix(self._pid, self.report_hook)
+ kill_posix(pid, self.report_hook)
def backoff(func, errors,
initial_timeout=1., hard_timeout=60., factor=1.2):
starttime = time.time()
timeout = initial_timeout
while time.time() < starttime + hard_timeout - 0.01:
try:
func()
except errors, exc:
time.sleep(timeout)
timeout *= factor
hard_limit = hard_timeout - (time.time() - starttime)
timeout = min(timeout, hard_limit)
else:
break
else:
raise
def kill_windows(handle, report_hook):
try:
import win32api
except ImportError:
import ctypes
ctypes.windll.kernel32.TerminateProcess(int(handle), -1)
else:
win32api.TerminateProcess(int(handle), -1)
def kill_posix(pid, report_hook):
import signal
os.kill(pid, signal.SIGTERM)
timeout = 10.
starttime = time.time()
report_hook("waiting for exit")
def do_nothing(*args):
pass
old_handler = signal.signal(signal.SIGCHLD, do_nothing)
try:
while time.time() < starttime + timeout - 0.01:
pid, sts = os.waitpid(pid, os.WNOHANG)
if pid != 0:
# exited, or error
break
newtimeout = timeout - (time.time() - starttime) - 1.
time.sleep(newtimeout) # wait for signal
else:
report_hook("forcefully killing")
try:
os.kill(pid, signal.SIGKILL)
except OSError, exc:
if exc.errno != errno.ECHILD:
raise
finally:
signal.signal(signal.SIGCHLD, old_handler)
class TwistedServerProcess(ServerProcess):
def __init__(self, name=None):
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
path = os.path.join(top_level_dir, "test-tools/twisted-localserver.py")
ServerProcess.__init__(self, path, name)
def _get_args(self):
return [str(self.port)]
class CgitbTextResult(_TextTestResult):
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return cgitb.text((exctype, value, tb))
return cgitb.text((exctype, value, tb))
class CgitbTextTestRunner(TextTestRunner):
def _makeResult(self):
return CgitbTextResult(self.stream, self.descriptions, self.verbosity)
def add_uri_attribute_to_test_cases(suite, uri):
for test in suite._tests:
if isinstance(test, TestCase):
test.uri = uri
else:
try:
add_uri_attribute_to_test_cases(test, uri)
except AttributeError:
pass
class TestProgram:
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Note not all the functional tests take note of the --uri argument yet --
some currently always access the internet regardless of the --uri and
--run-local-server options.
Options:
-l, --run-local-server
Run a local Twisted HTTP server for the functional
tests. You need Twisted installed for this to work.
The server is run on the port given in the --uri
option. If --run-local-server is given but no --uri is
given, http://127.0.0.1:8000 is used as the base URI.
Also, if you're on Windows and don't have pywin32 or
ctypes installed, this option won't work, and you'll
have to start up test-tools/localserver.py manually.
--uri=URL Base URI for functional tests
(test.py does not access the network, unless you tell
it to run module functional_tests;
functional_tests.py does access the network)
e.g. --uri=http://127.0.0.1:8000/
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
The following options are only available through test.py (you can still run the
functional tests through test.py, just give 'functional_tests' as the module
name to run):
-u Skip plain (non-doctest) unittests
-d Skip doctests
-c Run coverage (requires coverage.py, seems buggy)
-t Display tracebacks using cgitb's text mode
"""
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s test_cookies
- run module 'test_cookies'
%(progName)s test_cookies.CookieTests
- run all 'test*' test methods in test_cookies.CookieTests
%(progName)s test_cookies.CookieTests.test_expires
- run test_cookies.CookieTests.test_expires
%(progName)s functional_tests
- run the functional tests
%(progName)s -l functional_tests
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
"""
def __init__(self, moduleNames, localServerProcess, defaultTest=None,
argv=None, testRunner=None, testLoader=defaultTestLoader,
defaultUri="http://wwwsearch.sourceforge.net/",
usageExamples=USAGE_EXAMPLES,
):
self.modules = []
for moduleName in moduleNames:
module = __import__(moduleName)
for part in moduleName.split('.')[1:]:
module = getattr(module, part)
self.modules.append(module)
self.uri = None
self._defaultUri = defaultUri
if argv is None:
argv = sys.argv
self.verbosity = 1
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.usageExamples = usageExamples
self.runLocalServer = False
self.parseArgs(argv)
if self.runLocalServer:
import urllib
from mechanize._rfc3986 import urlsplit
authority = urlsplit(self.uri)[1]
host, port = urllib.splitport(authority)
if port is None:
port = "80"
try:
port = int(port)
except:
self.usageExit("port in --uri value must be an integer "
"(try --uri=http://127.0.0.1:8000/)")
self._serverProcess = localServerProcess
def report(msg):
print "%s: %s" % (localServerProcess.name, msg)
localServerProcess.port = port
localServerProcess.report_hook = report
def usageExit(self, msg=None):
if msg: print msg
print (self.USAGE + self.usageExamples) % self.__dict__
sys.exit(2)
def parseArgs(self, argv):
import getopt
try:
options, args = getopt.getopt(
argv[1:],
'hHvql',
['help','verbose','quiet', 'uri=', 'run-local-server'],
)
uri = None
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('--uri',):
uri = value
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-l', '--run-local-server'):
self.runLocalServer = True
if uri is None:
if self.runLocalServer:
uri = "http://127.0.0.1:8000"
else:
uri = self._defaultUri
self.uri = uri
if len(args) == 0 and self.defaultTest is None:
suite = TestSuite()
for module in self.modules:
test = self.testLoader.loadTestsFromModule(module)
suite.addTest(test)
self.test = suite
add_uri_attribute_to_test_cases(self.test, self.uri)
return
if len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
add_uri_attribute_to_test_cases(self.test, self.uri)
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
if self.testRunner is None:
self.testRunner = TextTestRunner(verbosity=self.verbosity)
if self.runLocalServer:
self._serverProcess.start()
try:
result = self.testRunner.run(self.test)
finally:
if self.runLocalServer:
self._serverProcess.stop()
return result
|
Almad/Mechanize
|
dc507f33e1145452a85bb7a43df3d26ec59481a9
|
* Don't change CookieJar state in .make_cookies(). * Fix AttributeError in case where .make_cookies() or .cookies_for_request() is called before other methods like .extract_cookies() or .make_cookie_header()
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index 45cf970..8306240 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -550,1155 +550,1159 @@ class DefaultCookiePolicy(CookiePolicy):
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host_lc(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
get_policy()
set_policy(policy)
cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def get_policy(self):
return self._policy
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
+
def cookies_for_request(self, request):
"""Return a list of cookies to be returned to server.
The returned list of cookie instances is sorted in the order they
should appear in the Cookie: header for return to the server.
See add_cookie_header.__doc__ for the interface required of the
request argument.
New in version 0.1.10
"""
+ self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
# add cookies in order of most specific (i.e. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
# this method still exists (alongside cookies_for_request) because it
# is part of an implied protected interface for subclasses of cookiejar
# XXX document that implied interface, or provide another way of
# implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
The $Version attribute is also added when appropriate (currently only
once per request).
>>> jar = CookieJar()
>>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
... "example.com", False, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([ns_cookie])
['foo="bar"']
>>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
... ".example.com", True, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([rfc2965_cookie])
['$Version=1', 'foo=bar', '$Domain="example.com"']
"""
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, is_unverifiable, get_type,
has_header, get_header, header_items and add_unredirected_header, as
documented by urllib2, and the port attribute (the port number).
Actually, RequestUpgradeProcessor will automatically upgrade your
Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
- self._policy._now = self._now = int(time.time())
-
cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host_lc(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
- elif expires <= self._now:
- # Expiry date in past is request to delete cookie. This can't be
- # in DefaultCookiePolicy, because can't delete cookies there.
- try:
- self.clear(domain, path, name)
- except KeyError:
- pass
- debug("Expiring cookie, domain='%s', path='%s', name='%s'",
- domain, path, name)
- return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
- def make_cookies(self, response, request):
- """Return sequence of Cookie objects extracted from response object.
-
- See extract_cookies.__doc__ for the interface required of the
- response and request arguments.
-
- """
+ def _make_cookies(self, response, request):
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
+ def make_cookies(self, response, request):
+ """Return sequence of Cookie objects extracted from response object.
+
+ See extract_cookies.__doc__ for the interface required of the
+ response and request arguments.
+
+ """
+ self._policy._now = self._now = int(time.time())
+ return [cookie for cookie in self._make_cookies(response, request)
+ if cookie.expires is None or not cookie.expires <= self._now]
+
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_type, get_host, and is_unverifiable, as
documented by urllib2, and the port attribute (the port number). The
request is used to set default values for cookie-attributes as well as
for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
- for cookie in self.make_cookies(response, request):
- if self._policy.set_ok(cookie, request):
+ for cookie in self._make_cookies(response, request):
+ if cookie.expires is not None and cookie.expires <= self._now:
+ # Expiry date in past is request to delete cookie. This can't be
+ # in DefaultCookiePolicy, because can't delete cookies there.
+ try:
+ self.clear(cookie.domain, cookie.path, cookie.name)
+ except KeyError:
+ pass
+ debug("Expiring cookie, domain='%s', path='%s', name='%s'",
+ cookie.domain, cookie.path, cookie.name)
+ elif self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
"""
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
def __getitem__(self, i):
if i == 0:
self._getitem_iterator = self.__iter__()
elif self._prev_getitem_index != i-1: raise IndexError(
"CookieJar.__getitem__ only supports sequential iteration")
self._prev_getitem_index = i
try:
return self._getitem_iterator.next()
except StopIteration:
raise IndexError()
def __iter__(self):
return MappingIterator(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
class LoadError(Exception): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file.
Additional methods
save(filename=None, ignore_discard=False, ignore_expires=False)
load(filename=None, ignore_discard=False, ignore_expires=False)
revert(filename=None, ignore_discard=False, ignore_expires=False)
Additional public attributes
filename: filename for loading and saving cookies
Additional public readable attributes
delayload: request that cookies are lazily loaded from disk; this is only
a hint since this only affects performance, not behaviour (unless the
cookies on disk are changing); a CookieJar object may ignore it (in fact,
only MSIECookieJar lazily loads cookies at the moment)
"""
def __init__(self, filename=None, delayload=False, policy=None):
"""
See FileCookieJar.__doc__ for argument documentation.
Cookies are NOT loaded from the named file until either the load or
revert method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file.
filename: name of file in which to save cookies
ignore_discard: save even cookies set to be discarded
ignore_expires: save even cookies that have expired
The file is overwritten if it already exists, thus wiping all its
cookies. Saved cookies can be restored later using the load or revert
methods. If filename is not specified, self.filename is used; if
self.filename is None, ValueError is raised.
"""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file.
Old cookies are kept unless overwritten by newly loaded ones.
Arguments are as for .save().
If filename is not specified, self.filename is used; if self.filename
is None, ValueError is raised. The named file must be in the format
understood by the class, or LoadError will be raised. This format will
be identical to that written by the save method, unless the load format
is not sufficiently well understood (as is the case for MSIECookieJar).
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 0a41afb..0232566 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,752 +1,776 @@
"""Tests for _ClientCookie."""
import sys, urllib2, re, os, StringIO, mimetools, time, tempfile, errno, inspect
from time import localtime
from unittest import TestCase
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class TempfileTestMixin:
def setUp(self):
self._tempfiles = []
def tearDown(self):
for fn in self._tempfiles:
try:
os.remove(fn)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def mktemp(self):
fn = tempfile.mktemp()
self._tempfiles.append(fn)
return fn
def caller():
return sys._getframe().f_back.f_back.f_code.co_name
def attribute_names(obj):
return set([spec[0] for spec in inspect.getmembers(obj)
if not spec[0].startswith("__")])
class CookieJarInterfaceTests(TestCase):
def test_add_cookie_header(self):
from mechanize import CookieJar
# verify only these methods are used
class MockRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def get_type(self):
self.log_called()
return "https"
def has_header(self, header_name):
self.log_called()
return False
def get_header(self, header_name, default=None):
self.log_called()
pass # currently not called
def header_items(self):
self.log_called()
pass # currently not called
def add_unredirected_header(self, key, val):
self.log_called()
self.added_headers.append((key, val))
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
interact_netscape(jar, "https://example.com:443",
"foo=bar; port=443; secure")
request = MockRequest()
jar.add_cookie_header(request)
expect_called = attribute_names(MockRequest) - set(
["port", "get_header", "header_items", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals(request.added_headers, [("Cookie", "foo=bar")])
def test_extract_cookies(self):
from mechanize import CookieJar
# verify only these methods are used
class StubMessage(object):
def getheaders(self, name):
return ["foo=bar; port=443"]
class StubResponse(object):
def info(self):
return StubMessage()
class StubRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
response = StubResponse()
request = StubRequest()
jar.extract_cookies(response, request)
expect_called = attribute_names(StubRequest) - set(
["port", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals([(cookie.name, cookie.value) for cookie in jar],
[("foo", "bar")])
def test_unverifiable(self):
from mechanize._clientcookie import request_is_unverifiable
# .unverifiable was added in mechanize, .is_unverifiable() later got
# added in cookielib. XXX deprecate .unverifiable
class StubRequest(object):
def __init__(self, attrs):
self._attrs = attrs
self.accessed = set()
def __getattr__(self, name):
self.accessed.add(name)
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
request = StubRequest(dict(is_unverifiable=lambda: False))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(is_unverifiable=lambda: False,
unverifiable=True))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(unverifiable=False))
self.assertEquals(request_is_unverifiable(request), False)
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_policy(self):
import mechanize
policy = mechanize.DefaultCookiePolicy()
jar = mechanize.CookieJar()
jar.set_policy(policy)
self.assertEquals(jar.get_policy(), policy)
+ def test_make_cookies_doesnt_change_jar_state(self):
+ from mechanize import CookieJar, Request, Cookie
+ from mechanize._util import time2netscape
+ from mechanize._response import test_response
+ cookie = Cookie(0, "spam", "eggs",
+ "80", False,
+ "example.com", False, False,
+ "/", False,
+ False,
+ None,
+ False,
+ "",
+ "",
+ {})
+ jar = CookieJar()
+ jar._policy._now = jar._now = int(time.time())
+ jar.set_cookie(cookie)
+ self.assertEquals(len(jar), 1)
+ set_cookie = "spam=eggs; expires=%s" % time2netscape(time.time()- 1000)
+ url = "http://example.com/"
+ response = test_response(url=url, headers=[("Set-Cookie", set_cookie)])
+ jar.make_cookies(response, Request(url))
+ self.assertEquals(len(jar), 1)
+
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host_lc(self):
from mechanize import Request
from mechanize._clientcookie import request_host_lc
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host_lc(req) == "www.acme.com"
assert request_host_lc(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host_lc(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host_lc function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host_lc(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host_lc(req) == "www.acme.com"
# the _lc function lower-cases the result
req = Request("http://EXAMPLE.com")
assert request_host_lc(req) == "example.com"
def test_effective_request_host(self):
from mechanize import Request, effective_request_host
self.assertEquals(
effective_request_host(Request("http://www.EXAMPLE.com/spam")),
"www.EXAMPLE.com")
self.assertEquals(
effective_request_host(Request("http://bob/spam")),
"bob.local")
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
|
Almad/Mechanize
|
5f3cc188068d1842951548a671c9d237d7719d1a
|
Add new public function effective_request_host
|
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index 3f786ec..4bb20aa 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,139 +1,140 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
import logging
import sys
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
- CookieJar, FileCookieJar, LoadError, request_host
+ CookieJar, FileCookieJar, LoadError, request_host_lc as request_host, \
+ effective_request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
try:
import sqlite3
except ImportError:
pass
else:
from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
logger = logging.getLogger("mechanize")
if logger.level is logging.NOTSET:
logger.setLevel(logging.CRITICAL)
del logger
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index 7f265dd..45cf970 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -1,1699 +1,1704 @@
"""HTTP cookie handling for web clients.
This module originally developed from my port of Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
Comments to John J Lee <[email protected]>.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
Copyright 2002-2003 Johnny Lee (original MSIE Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import sys, re, copy, time, urllib, types, logging
try:
import threading
_threading = threading; del threading
except ImportError:
import dummy_threading
_threading = dummy_threading; del dummy_threading
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
DEFAULT_HTTP_PORT = "80"
from _headersutil import split_header_words, parse_ns_headers
from _util import isstringlike
import _rfc3986
debug = logging.getLogger("mechanize.cookies").debug
def reraise_unmasked_exceptions(unmasked=()):
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways.
# This function re-raises some exceptions we don't want to trap.
import mechanize, warnings
if not mechanize.USE_BARE_EXCEPT:
raise
unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError)
etype = sys.exc_info()[0]
if issubclass(etype, unmasked):
raise
# swallowed an exception
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2)
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
return not (IPV4_RE.search(text) or
text == "" or
text[0] == "." or text[-1] == ".")
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
has_form_nb = not (i == -1 or i == 0)
return (
has_form_nb and
B.startswith(".") and
is_HDN(B[1:])
)
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
return not IPV4_RE.search(text)
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = _rfc3986.urlsplit(url)[1]
if host is None:
host = request.get_header("Host", "")
-
# remove port, if present
- host = cut_port_re.sub("", host, 1)
- return host.lower()
-
-def eff_request_host(request):
- """Return a tuple (request-host, effective request-host name).
+ return cut_port_re.sub("", host, 1)
- As defined by RFC 2965, except both are lowercased.
+def request_host_lc(request):
+ return request_host(request).lower()
- """
+def eff_request_host(request):
+ """Return a tuple (request-host, effective request-host name)."""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
+def eff_request_host_lc(request):
+ req_host, erhn = eff_request_host(request)
+ return req_host.lower(), erhn.lower()
+
+def effective_request_host(request):
+ """Return the effective request-host, as defined by RFC 2965."""
+ return eff_request_host(request)[1]
+
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
path, query, frag = _rfc3986.urlsplit(url)[2:]
path = escape_path(path)
req_path = _rfc3986.urlunsplit((None, None, path, query, frag))
if not req_path.startswith("/"):
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
def request_is_unverifiable(request):
try:
return request.is_unverifiable()
except AttributeError:
if hasattr(request, "unverifiable"):
return request.unverifiable
else:
raise
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, types.UnicodeType):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
- req_host = request_host(request)
+ req_host = request_host_lc(request)
# the origin request's request-host was stuffed into request by
# _urllib2_support.AbstractHTTPHandler
return not domain_match(req_host, reach(request.origin_req_host))
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
version: integer;
name: string;
value: string (may be None);
port: string; None indicates no attribute was supplied (eg. "Port", rather
than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list
string (eg. "80,8080")
port_specified: boolean; true if a value was supplied with the Port
cookie-attribute
domain: string;
domain_specified: boolean; true if Domain was explicitly set
domain_initial_dot: boolean; true if Domain as set in HTTP header by server
started with a dot (yes, this really is necessary!)
path: string;
path_specified: boolean; true if Path was explicitly set
secure: boolean; true if should only be returned over secure connection
expires: integer; seconds since epoch (RFC 2965 cookies should calculate
this value from the Max-Age attribute)
discard: boolean, true if this is a session cookie; (if no expires value,
this should be true)
comment: string;
comment_url: string;
rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not
Set-Cookie2:) header, but had a version cookie-attribute of 1
rest: mapping of other cookie-attributes
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return self._rest.has_key(name)
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def nonstandard_attr_keys(self):
return self._rest.keys()
def is_expired(self, now=None):
if now is None: now = time.time()
return (self.expires is not None) and (self.expires <= now)
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ["version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
]:
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
As well as implementing set_ok and return_ok, implementations of this
interface must also supply the following attributes, indicating which
protocols should be used, and how. These can be read and set at any time,
though whether that makes complete sense from the protocol point of view is
doubtful.
Public attributes:
netscape: implement netscape protocol
rfc2965: implement RFC 2965 protocol
rfc2109_as_netscape:
WARNING: This argument will change or go away if is not accepted into
the Python standard library in this form!
If true, treat RFC 2109 cookies as though they were Netscape cookies. The
default is for this attribute to be None, which means treat 2109 cookies
as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is,
by default), and as Netscape cookies otherwise.
hide_cookie2: don't add Cookie2 header to requests (the presence of
this header indicates to the server that we understand RFC 2965
cookies)
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.extract_cookies.__doc__
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.add_cookie_header.__doc__
"""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
This is here as an optimization, to remove the need for checking every
cookie with a particular domain (which may involve reading many files).
The default implementations of domain_return_ok and path_return_ok
(return True) leave all the work to return_ok.
If domain_return_ok returns true for the cookie domain, path_return_ok
is called for the cookie path. Otherwise, path_return_ok and return_ok
are never called for that cookie domain. If path_return_ok returns
true, return_ok is called with the Cookie object itself for a full
check. Otherwise, return_ok is never called for that cookie path.
Note that domain_return_ok is called for every *cookie* domain, not
just for the *request* domain. For example, the function might be
called with both ".acme.com" and "www.acme.com" if the request domain
is "www.acme.com". The same goes for path_return_ok.
For argument documentation, see the docstring for return_ok.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
See the docstring for domain_return_ok.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies.
Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is
switched off by default.
The easiest way to provide your own policy is to override this class and
call its methods in your overriden implementations before adding your own
additional checks.
import mechanize
class MyCookiePolicy(mechanize.DefaultCookiePolicy):
def set_ok(self, cookie, request):
if not mechanize.DefaultCookiePolicy.set_ok(
self, cookie, request):
return False
if i_dont_want_to_store_this_cookie():
return False
return True
In addition to the features required to implement the CookiePolicy
interface, this class allows you to block and allow domains from setting
and receiving cookies. There are also some strictness switches that allow
you to tighten up the rather loose Netscape protocol rules a little bit (at
the cost of blocking some benign cookies).
A domain blacklist and whitelist is provided (both off by default). Only
domains not in the blacklist and present in the whitelist (if the whitelist
is active) participate in cookie setting and returning. Use the
blocked_domains constructor argument, and blocked_domains and
set_blocked_domains methods (and the corresponding argument and methods for
allowed_domains). If you set a whitelist, you can turn it off again by
setting it to None.
Domains in block or allow lists that do not start with a dot must
string-compare equal. For example, "acme.com" matches a blacklist entry of
"acme.com", but "www.acme.com" does not. Domains that do start with a dot
are matched by more specific domains too. For example, both "www.acme.com"
and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
not). IP addresses are an exception, and must match exactly. For example,
if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
blocked, but 193.168.1.2 is not.
Additional Public Attributes:
General strictness switches
strict_domain: don't allow sites to set two-component domains with
country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
This is far from perfect and isn't guaranteed to work!
RFC 2965 protocol strictness switches
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
- req_host, erhn = eff_request_host(request)
+ req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
- req_host, erhn = eff_request_host(request)
+ req_host, erhn = eff_request_host_lc(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
- dotted_req_host, dotted_erhn = eff_request_host(request)
+ dotted_req_host, dotted_erhn = eff_request_host_lc(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
get_policy()
set_policy(policy)
cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def get_policy(self):
return self._policy
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
def cookies_for_request(self, request):
"""Return a list of cookies to be returned to server.
The returned list of cookie instances is sorted in the order they
should appear in the Cookie: header for return to the server.
See add_cookie_header.__doc__ for the interface required of the
request argument.
New in version 0.1.10
"""
cookies = self._cookies_for_request(request)
# add cookies in order of most specific (i.e. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
# this method still exists (alongside cookies_for_request) because it
# is part of an implied protected interface for subclasses of cookiejar
# XXX document that implied interface, or provide another way of
# implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
The $Version attribute is also added when appropriate (currently only
once per request).
>>> jar = CookieJar()
>>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
... "example.com", False, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([ns_cookie])
['foo="bar"']
>>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
... ".example.com", True, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([rfc2965_cookie])
['$Version=1', 'foo=bar', '$Domain="example.com"']
"""
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, is_unverifiable, get_type,
has_header, get_header, header_items and add_unredirected_header, as
documented by urllib2, and the port attribute (the port number).
Actually, RequestUpgradeProcessor will automatically upgrade your
Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
- req_host, erhn = eff_request_host(request)
+ req_host, erhn = eff_request_host_lc(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
See extract_cookies.__doc__ for the interface required of the
response and request arguments.
"""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_type, get_host, and is_unverifiable, as
documented by urllib2, and the port attribute (the port number). The
request is used to set default values for cookie-attributes as well as
for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
"""
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
def __getitem__(self, i):
if i == 0:
self._getitem_iterator = self.__iter__()
elif self._prev_getitem_index != i-1: raise IndexError(
"CookieJar.__getitem__ only supports sequential iteration")
self._prev_getitem_index = i
try:
return self._getitem_iterator.next()
except StopIteration:
raise IndexError()
def __iter__(self):
return MappingIterator(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
class LoadError(Exception): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file.
Additional methods
save(filename=None, ignore_discard=False, ignore_expires=False)
load(filename=None, ignore_discard=False, ignore_expires=False)
revert(filename=None, ignore_discard=False, ignore_expires=False)
Additional public attributes
filename: filename for loading and saving cookies
Additional public readable attributes
delayload: request that cookies are lazily loaded from disk; this is only
a hint since this only affects performance, not behaviour (unless the
cookies on disk are changing); a CookieJar object may ignore it (in fact,
only MSIECookieJar lazily loads cookies at the moment)
"""
def __init__(self, filename=None, delayload=False, policy=None):
"""
See FileCookieJar.__doc__ for argument documentation.
Cookies are NOT loaded from the named file until either the load or
revert method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file.
filename: name of file in which to save cookies
ignore_discard: save even cookies set to be discarded
ignore_expires: save even cookies that have expired
The file is overwritten if it already exists, thus wiping all its
cookies. Saved cookies can be restored later using the load or revert
methods. If filename is not specified, self.filename is used; if
self.filename is None, ValueError is raised.
"""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file.
Old cookies are kept unless overwritten by newly loaded ones.
Arguments are as for .save().
If filename is not specified, self.filename is used; if self.filename
is None, ValueError is raised. The named file must be in the format
understood by the class, or LoadError will be raised. This format will
be identical to that written by the save method, unless the load format
is not sufficiently well understood (as is the case for MSIECookieJar).
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
diff --git a/mechanize/_request.py b/mechanize/_request.py
index 63998a4..e946c41 100644
--- a/mechanize/_request.py
+++ b/mechanize/_request.py
@@ -1,84 +1,84 @@
"""Integration with Python standard library module urllib2: Request class.
Copyright 2004-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import urllib2, urllib, logging
-from _clientcookie import request_host
+from _clientcookie import request_host_lc
import _rfc3986
warn = logging.getLogger("mechanize").warning
class Request(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, visit=None):
# In mechanize 0.2, the interpretation of a unicode url argument will
# change: A unicode url argument will be interpreted as an IRI, and a
# bytestring as a URI. For now, we accept unicode or bytestring. We
# don't insist that the value is always a URI (specifically, must only
# contain characters which are legal), because that might break working
# code (who knows what bytes some servers want to see, especially with
# browser plugins for internationalised URIs).
if not _rfc3986.is_clean_uri(url):
warn("url argument is not a URI "
"(contains illegal characters) %r" % url)
urllib2.Request.__init__(self, url, data, headers)
self.selector = None
self.unredirected_hdrs = {}
self.visit = visit
# All the terminology below comes from RFC 2965.
self.unverifiable = unverifiable
# Set request-host of origin transaction.
# The origin request-host is needed in order to decide whether
# unverifiable sub-requests (automatic redirects, images embedded
# in HTML, etc.) are to third-party hosts. If they are, the
# resulting transactions might need to be conducted with cookies
# turned off.
if origin_req_host is None:
- origin_req_host = request_host(self)
+ origin_req_host = request_host_lc(self)
self.origin_req_host = origin_req_host
def get_selector(self):
return urllib.splittag(self.__r_host)[0]
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_unredirected_header(self, key, val):
"""Add a header that will not be added to a redirected request."""
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
"""True iff request has named header (regular or unredirected)."""
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
def __str__(self):
return "<Request for %s>" % self.get_full_url()
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
diff --git a/test/test_cookies.py b/test/test_cookies.py
index b5db35e..0a41afb 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,1039 +1,1051 @@
"""Tests for _ClientCookie."""
import sys, urllib2, re, os, StringIO, mimetools, time, tempfile, errno, inspect
from time import localtime
from unittest import TestCase
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class TempfileTestMixin:
def setUp(self):
self._tempfiles = []
def tearDown(self):
for fn in self._tempfiles:
try:
os.remove(fn)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def mktemp(self):
fn = tempfile.mktemp()
self._tempfiles.append(fn)
return fn
def caller():
return sys._getframe().f_back.f_back.f_code.co_name
def attribute_names(obj):
return set([spec[0] for spec in inspect.getmembers(obj)
if not spec[0].startswith("__")])
class CookieJarInterfaceTests(TestCase):
def test_add_cookie_header(self):
from mechanize import CookieJar
# verify only these methods are used
class MockRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def get_type(self):
self.log_called()
return "https"
def has_header(self, header_name):
self.log_called()
return False
def get_header(self, header_name, default=None):
self.log_called()
pass # currently not called
def header_items(self):
self.log_called()
pass # currently not called
def add_unredirected_header(self, key, val):
self.log_called()
self.added_headers.append((key, val))
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
interact_netscape(jar, "https://example.com:443",
"foo=bar; port=443; secure")
request = MockRequest()
jar.add_cookie_header(request)
expect_called = attribute_names(MockRequest) - set(
["port", "get_header", "header_items", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals(request.added_headers, [("Cookie", "foo=bar")])
def test_extract_cookies(self):
from mechanize import CookieJar
# verify only these methods are used
class StubMessage(object):
def getheaders(self, name):
return ["foo=bar; port=443"]
class StubResponse(object):
def info(self):
return StubMessage()
class StubRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
response = StubResponse()
request = StubRequest()
jar.extract_cookies(response, request)
expect_called = attribute_names(StubRequest) - set(
["port", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals([(cookie.name, cookie.value) for cookie in jar],
[("foo", "bar")])
def test_unverifiable(self):
from mechanize._clientcookie import request_is_unverifiable
# .unverifiable was added in mechanize, .is_unverifiable() later got
# added in cookielib. XXX deprecate .unverifiable
class StubRequest(object):
def __init__(self, attrs):
self._attrs = attrs
self.accessed = set()
def __getattr__(self, name):
self.accessed.add(name)
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
request = StubRequest(dict(is_unverifiable=lambda: False))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(is_unverifiable=lambda: False,
unverifiable=True))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(unverifiable=False))
self.assertEquals(request_is_unverifiable(request), False)
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_policy(self):
import mechanize
policy = mechanize.DefaultCookiePolicy()
jar = mechanize.CookieJar()
jar.set_policy(policy)
self.assertEquals(jar.get_policy(), policy)
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
- def test_request_host(self):
+ def test_request_host_lc(self):
from mechanize import Request
- from mechanize._clientcookie import request_host
+ from mechanize._clientcookie import request_host_lc
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
- #assert request_host(req) == "www.acme.com"
- assert request_host(req) == "1.1.1.1"
+ #assert request_host_lc(req) == "www.acme.com"
+ assert request_host_lc(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
- assert request_host(req) == "www.acme.com"
+ assert request_host_lc(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
- # remove test for no host in url in request_host function?
+ # remove test for no host in url in request_host_lc function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
- assert request_host(req) == "www.acme.com"
+ assert request_host_lc(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
- assert request_host(req) == "www.acme.com"
+ assert request_host_lc(req) == "www.acme.com"
+ # the _lc function lower-cases the result
+ req = Request("http://EXAMPLE.com")
+ assert request_host_lc(req) == "example.com"
+
+ def test_effective_request_host(self):
+ from mechanize import Request, effective_request_host
+ self.assertEquals(
+ effective_request_host(Request("http://www.EXAMPLE.com/spam")),
+ "www.EXAMPLE.com")
+ self.assertEquals(
+ effective_request_host(Request("http://bob/spam")),
+ "bob.local")
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check it doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
assert not c._cookies["www.acme.com"]["/"]["foo1"].secure, \
"non-secure cookie registered secure"
assert c._cookies["www.acme.com"]["/"]["foo2"].secure, \
"secure cookie registered non-secure"
def test_quote_cookie_value(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
assert h == r'$Version=1; foo=\\b\"a\"r'
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from mechanize import CookieJar, Request, DefaultCookiePolicy
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
assert len(c) == 1
c.add_cookie_header(req)
assert req.has_header("Cookie")
def test_domain_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find( "Domain") == -1, \
"absent domain returned with domain present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
assert h.find('$Domain=".bar.com"') != -1, \
"domain not returned"
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
assert h.find('$Domain="bar.com"') != -1, \
"domain not returned"
def test_path_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Path") == -1, \
"absent path returned with path present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
assert h.find('$Path="/"') != -1, "path not returned"
def test_port_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Port") == -1, \
"absent port returned with port present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
assert re.search("\$Port([^=]|$)", h), \
"port with no value not returned with no value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
assert h.find('$Port="80"') != -1, \
"port with single value not returned with single value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
assert h.find('$Port="80,8080"') != -1, \
"port with multiple values not returned with multiple values"
def test_no_return_comment(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
# bad version
["Set-Cookie: b=foo; version=spam"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
def test_cookies_for_request(self):
from mechanize import CookieJar, Request
cj = CookieJar()
interact_netscape(cj, "http://example.com/", "short=path")
interact_netscape(cj, "http://example.com/longer/path", "longer=path")
for_short_path = cj.cookies_for_request(Request("http://example.com/"))
self.assertEquals([cookie.name for cookie in for_short_path],
["short"])
for_long_path = cj.cookies_for_request(Request(
"http://example.com/longer/path"))
self.assertEquals([cookie.name for cookie in for_long_path],
["longer", "short"])
class CookieJarPersistenceTests(TempfileTestMixin, TestCase):
|
Almad/Mechanize
|
341971ba1d37e6ba0cc89eaefae56540ab15132f
|
Add .get_policy() method to CookieJar
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index 06328d2..7f265dd 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -485,1059 +485,1063 @@ class CookiePolicy:
"""Return false if cookies should not be returned, given cookie path.
See the docstring for domain_return_ok.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies.
Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is
switched off by default.
The easiest way to provide your own policy is to override this class and
call its methods in your overriden implementations before adding your own
additional checks.
import mechanize
class MyCookiePolicy(mechanize.DefaultCookiePolicy):
def set_ok(self, cookie, request):
if not mechanize.DefaultCookiePolicy.set_ok(
self, cookie, request):
return False
if i_dont_want_to_store_this_cookie():
return False
return True
In addition to the features required to implement the CookiePolicy
interface, this class allows you to block and allow domains from setting
and receiving cookies. There are also some strictness switches that allow
you to tighten up the rather loose Netscape protocol rules a little bit (at
the cost of blocking some benign cookies).
A domain blacklist and whitelist is provided (both off by default). Only
domains not in the blacklist and present in the whitelist (if the whitelist
is active) participate in cookie setting and returning. Use the
blocked_domains constructor argument, and blocked_domains and
set_blocked_domains methods (and the corresponding argument and methods for
allowed_domains). If you set a whitelist, you can turn it off again by
setting it to None.
Domains in block or allow lists that do not start with a dot must
string-compare equal. For example, "acme.com" matches a blacklist entry of
"acme.com", but "www.acme.com" does not. Domains that do start with a dot
are matched by more specific domains too. For example, both "www.acme.com"
and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
not). IP addresses are an exception, and must match exactly. For example,
if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
blocked, but 193.168.1.2 is not.
Additional Public Attributes:
General strictness switches
strict_domain: don't allow sites to set two-component domains with
country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
This is far from perfect and isn't guaranteed to work!
RFC 2965 protocol strictness switches
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
+ get_policy()
set_policy(policy)
cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
+ def get_policy(self):
+ return self._policy
+
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
def cookies_for_request(self, request):
"""Return a list of cookies to be returned to server.
The returned list of cookie instances is sorted in the order they
should appear in the Cookie: header for return to the server.
See add_cookie_header.__doc__ for the interface required of the
request argument.
New in version 0.1.10
"""
cookies = self._cookies_for_request(request)
# add cookies in order of most specific (i.e. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
# this method still exists (alongside cookies_for_request) because it
# is part of an implied protected interface for subclasses of cookiejar
# XXX document that implied interface, or provide another way of
# implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
The $Version attribute is also added when appropriate (currently only
once per request).
>>> jar = CookieJar()
>>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
... "example.com", False, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([ns_cookie])
['foo="bar"']
>>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
... ".example.com", True, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([rfc2965_cookie])
['$Version=1', 'foo=bar', '$Domain="example.com"']
"""
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, is_unverifiable, get_type,
has_header, get_header, header_items and add_unredirected_header, as
documented by urllib2, and the port attribute (the port number).
Actually, RequestUpgradeProcessor will automatically upgrade your
Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None:
try:
version = int(version)
except ValueError:
return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
See extract_cookies.__doc__ for the interface required of the
response and request arguments.
"""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_type, get_host, and is_unverifiable, as
documented by urllib2, and the port attribute (the port number). The
request is used to set default values for cookie-attributes as well as
for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
diff --git a/test/test_cookies.py b/test/test_cookies.py
index ce15222..b5db35e 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,745 +1,752 @@
"""Tests for _ClientCookie."""
import sys, urllib2, re, os, StringIO, mimetools, time, tempfile, errno, inspect
from time import localtime
from unittest import TestCase
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class TempfileTestMixin:
def setUp(self):
self._tempfiles = []
def tearDown(self):
for fn in self._tempfiles:
try:
os.remove(fn)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def mktemp(self):
fn = tempfile.mktemp()
self._tempfiles.append(fn)
return fn
def caller():
return sys._getframe().f_back.f_back.f_code.co_name
def attribute_names(obj):
return set([spec[0] for spec in inspect.getmembers(obj)
if not spec[0].startswith("__")])
class CookieJarInterfaceTests(TestCase):
def test_add_cookie_header(self):
from mechanize import CookieJar
# verify only these methods are used
class MockRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def get_type(self):
self.log_called()
return "https"
def has_header(self, header_name):
self.log_called()
return False
def get_header(self, header_name, default=None):
self.log_called()
pass # currently not called
def header_items(self):
self.log_called()
pass # currently not called
def add_unredirected_header(self, key, val):
self.log_called()
self.added_headers.append((key, val))
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
interact_netscape(jar, "https://example.com:443",
"foo=bar; port=443; secure")
request = MockRequest()
jar.add_cookie_header(request)
expect_called = attribute_names(MockRequest) - set(
["port", "get_header", "header_items", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals(request.added_headers, [("Cookie", "foo=bar")])
def test_extract_cookies(self):
from mechanize import CookieJar
# verify only these methods are used
class StubMessage(object):
def getheaders(self, name):
return ["foo=bar; port=443"]
class StubResponse(object):
def info(self):
return StubMessage()
class StubRequest(object):
def __init__(self):
self.added_headers = []
self.called = set()
def log_called(self):
self.called.add(caller())
def get_full_url(self):
self.log_called()
return "https://example.com:443"
def get_host(self):
self.log_called()
return "example.com:443"
def is_unverifiable(self):
self.log_called()
return False
@property
def port(self):
import traceback; traceback.print_stack()
self.log_called()
pass # currently not used, since urllib2 always sets .port None
jar = CookieJar()
response = StubResponse()
request = StubRequest()
jar.extract_cookies(response, request)
expect_called = attribute_names(StubRequest) - set(
["port", "log_called"])
self.assertEquals(request.called, expect_called)
self.assertEquals([(cookie.name, cookie.value) for cookie in jar],
[("foo", "bar")])
def test_unverifiable(self):
from mechanize._clientcookie import request_is_unverifiable
# .unverifiable was added in mechanize, .is_unverifiable() later got
# added in cookielib. XXX deprecate .unverifiable
class StubRequest(object):
def __init__(self, attrs):
self._attrs = attrs
self.accessed = set()
def __getattr__(self, name):
self.accessed.add(name)
try:
return self._attrs[name]
except KeyError:
raise AttributeError(name)
request = StubRequest(dict(is_unverifiable=lambda: False))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(is_unverifiable=lambda: False,
unverifiable=True))
self.assertEquals(request_is_unverifiable(request), False)
request = StubRequest(dict(unverifiable=False))
self.assertEquals(request_is_unverifiable(request), False)
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
+ def test_policy(self):
+ import mechanize
+ policy = mechanize.DefaultCookiePolicy()
+ jar = mechanize.CookieJar()
+ jar.set_policy(policy)
+ self.assertEquals(jar.get_policy(), policy)
+
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
|
Almad/Mechanize
|
ee93b1301d7d5b2db0bf569fcbd51be7751a9639
|
Don't choke on non-integer version cookie-attribute (http://bugs.python.org/issue3924) * Strip quotes from version attribute value * Ignore cookies with bad versions
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index 531c190..06328d2 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -785,907 +785,911 @@ class DefaultCookiePolicy(CookiePolicy):
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
set_policy(policy)
cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
def cookies_for_request(self, request):
"""Return a list of cookies to be returned to server.
The returned list of cookie instances is sorted in the order they
should appear in the Cookie: header for return to the server.
See add_cookie_header.__doc__ for the interface required of the
request argument.
New in version 0.1.10
"""
cookies = self._cookies_for_request(request)
# add cookies in order of most specific (i.e. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
# this method still exists (alongside cookies_for_request) because it
# is part of an implied protected interface for subclasses of cookiejar
# XXX document that implied interface, or provide another way of
# implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
The $Version attribute is also added when appropriate (currently only
once per request).
>>> jar = CookieJar()
>>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
... "example.com", False, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([ns_cookie])
['foo="bar"']
>>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
... ".example.com", True, False,
... "/", False, False, None, True,
... None, None, {})
>>> jar._cookie_attrs([rfc2965_cookie])
['$Version=1', 'foo=bar', '$Domain="example.com"']
"""
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, is_unverifiable, get_type,
has_header, get_header, header_items and add_unredirected_header, as
documented by urllib2, and the port attribute (the port number).
Actually, RequestUpgradeProcessor will automatically upgrade your
Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
- if version is not None: version = int(version)
+ if version is not None:
+ try:
+ version = int(version)
+ except ValueError:
+ return None # invalid version, ignore cookie
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
See extract_cookies.__doc__ for the interface required of the
response and request arguments.
"""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_type, get_host, and is_unverifiable, as
documented by urllib2, and the port attribute (the port number). The
request is used to set default values for cookie-attributes as well as
for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
"""
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
def __getitem__(self, i):
if i == 0:
self._getitem_iterator = self.__iter__()
elif self._prev_getitem_index != i-1: raise IndexError(
"CookieJar.__getitem__ only supports sequential iteration")
self._prev_getitem_index = i
try:
return self._getitem_iterator.next()
except StopIteration:
raise IndexError()
def __iter__(self):
return MappingIterator(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
class LoadError(Exception): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file.
Additional methods
save(filename=None, ignore_discard=False, ignore_expires=False)
load(filename=None, ignore_discard=False, ignore_expires=False)
revert(filename=None, ignore_discard=False, ignore_expires=False)
Additional public attributes
filename: filename for loading and saving cookies
Additional public readable attributes
delayload: request that cookies are lazily loaded from disk; this is only
a hint since this only affects performance, not behaviour (unless the
cookies on disk are changing); a CookieJar object may ignore it (in fact,
only MSIECookieJar lazily loads cookies at the moment)
"""
def __init__(self, filename=None, delayload=False, policy=None):
"""
See FileCookieJar.__doc__ for argument documentation.
Cookies are NOT loaded from the named file until either the load or
revert method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file.
filename: name of file in which to save cookies
ignore_discard: save even cookies set to be discarded
ignore_expires: save even cookies that have expired
The file is overwritten if it already exists, thus wiping all its
cookies. Saved cookies can be restored later using the load or revert
methods. If filename is not specified, self.filename is used; if
self.filename is None, ValueError is raised.
"""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file.
Old cookies are kept unless overwritten by newly loaded ones.
Arguments are as for .save().
If filename is not specified, self.filename is used; if self.filename
is None, ValueError is raised. The named file must be in the format
understood by the class, or LoadError will be raised. This format will
be identical to that written by the save method, unless the load format
is not sufficiently well understood (as is the case for MSIECookieJar).
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
diff --git a/mechanize/_headersutil.py b/mechanize/_headersutil.py
index d8fe47a..49ba5de 100644
--- a/mechanize/_headersutil.py
+++ b/mechanize/_headersutil.py
@@ -1,226 +1,232 @@
"""Utility functions for HTTP header value parsing and construction.
Copyright 1997-1998, Gisle Aas
Copyright 2002-2006, John J. Lee
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import os, re
from types import StringType
from types import UnicodeType
STRING_TYPES = StringType, UnicodeType
from _util import http2time
import _rfc3986
def is_html(ct_headers, url, allow_xhtml=False):
"""
ct_headers: Sequence of Content-Type headers
url: Response URL
"""
if not ct_headers:
# guess
ext = os.path.splitext(_rfc3986.urlsplit(url)[2])[1]
html_exts = [".htm", ".html"]
if allow_xhtml:
html_exts += [".xhtml"]
return ext in html_exts
# use first header
ct = split_header_words(ct_headers)[0][0][0]
html_types = ["text/html"]
if allow_xhtml:
html_types += [
"text/xhtml", "text/xml",
"application/xml", "application/xhtml+xml",
]
return ct in html_types
def unmatched(match):
"""Return unmatched part of re.Match object."""
start, end = match.span(0)
return match.string[:start]+match.string[end:]
token_re = re.compile(r"^\s*([^=\s;,]+)")
quoted_value_re = re.compile(r"^\s*=\s*\"([^\"\\]*(?:\\.[^\"\\]*)*)\"")
value_re = re.compile(r"^\s*=\s*([^\s;,]*)")
escape_re = re.compile(r"\\(.)")
def split_header_words(header_values):
r"""Parse header values into a list of lists containing key,value pairs.
The function knows how to deal with ",", ";" and "=" as well as quoted
values after "=". A list of space separated tokens are parsed as if they
were separated by ";".
If the header_values passed as argument contains multiple values, then they
are treated as if they were a single value separated by comma ",".
This means that this function is useful for parsing header fields that
follow this syntax (BNF as from the HTTP/1.1 specification, but we relax
the requirement for tokens).
headers = #header
header = (token | parameter) *( [";"] (token | parameter))
token = 1*<any CHAR except CTLs or separators>
separators = "(" | ")" | "<" | ">" | "@"
| "," | ";" | ":" | "\" | <">
| "/" | "[" | "]" | "?" | "="
| "{" | "}" | SP | HT
quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
qdtext = <any TEXT except <">>
quoted-pair = "\" CHAR
parameter = attribute "=" value
attribute = token
value = token | quoted-string
Each header is represented by a list of key/value pairs. The value for a
simple token (not part of a parameter) is None. Syntactically incorrect
headers will not necessarily be parsed as you would want.
This is easier to describe with some examples:
>>> split_header_words(['foo="bar"; port="80,81"; discard, bar=baz'])
[[('foo', 'bar'), ('port', '80,81'), ('discard', None)], [('bar', 'baz')]]
>>> split_header_words(['text/html; charset="iso-8859-1"'])
[[('text/html', None), ('charset', 'iso-8859-1')]]
>>> split_header_words([r'Basic realm="\"foo\bar\""'])
[[('Basic', None), ('realm', '"foobar"')]]
"""
assert type(header_values) not in STRING_TYPES
result = []
for text in header_values:
orig_text = text
pairs = []
while text:
m = token_re.search(text)
if m:
text = unmatched(m)
name = m.group(1)
m = quoted_value_re.search(text)
if m: # quoted value
text = unmatched(m)
value = m.group(1)
value = escape_re.sub(r"\1", value)
else:
m = value_re.search(text)
if m: # unquoted value
text = unmatched(m)
value = m.group(1)
value = value.rstrip()
else:
# no value, a lone token
value = None
pairs.append((name, value))
elif text.lstrip().startswith(","):
# concatenated headers, as per RFC 2616 section 4.2
text = text.lstrip()[1:]
if pairs: result.append(pairs)
pairs = []
else:
# skip junk
non_junk, nr_junk_chars = re.subn("^[=\s;]*", "", text)
assert nr_junk_chars > 0, (
"split_header_words bug: '%s', '%s', %s" %
(orig_text, text, pairs))
text = non_junk
if pairs: result.append(pairs)
return result
join_escape_re = re.compile(r"([\"\\])")
def join_header_words(lists):
"""Do the inverse of the conversion done by split_header_words.
Takes a list of lists of (key, value) pairs and produces a single header
value. Attribute values are quoted if needed.
>>> join_header_words([[("text/plain", None), ("charset", "iso-8859/1")]])
'text/plain; charset="iso-8859/1"'
>>> join_header_words([[("text/plain", None)], [("charset", "iso-8859/1")]])
'text/plain, charset="iso-8859/1"'
"""
headers = []
for pairs in lists:
attr = []
for k, v in pairs:
if v is not None:
if not re.search(r"^\w+$", v):
v = join_escape_re.sub(r"\\\1", v) # escape " and \
v = '"%s"' % v
if k is None: # Netscape cookies may have no name
k = v
else:
k = "%s=%s" % (k, v)
attr.append(k)
if attr: headers.append("; ".join(attr))
return ", ".join(headers)
+def strip_quotes(text):
+ if text.startswith('"'):
+ text = text[1:]
+ if text.endswith('"'):
+ text = text[:-1]
+ return text
+
def parse_ns_headers(ns_headers):
"""Ad-hoc parser for Netscape protocol cookie-attributes.
The old Netscape cookie format for Set-Cookie can for instance contain
an unquoted "," in the expires field, so we have to use this ad-hoc
parser instead of split_header_words.
XXX This may not make the best possible effort to parse all the crap
that Netscape Cookie headers contain. Ronald Tschalar's HTTPClient
parser is probably better, so could do worse than following that if
this ever gives any trouble.
Currently, this is also used for parsing RFC 2109 cookies.
"""
known_attrs = ("expires", "domain", "path", "secure",
# RFC 2109 attrs (may turn up in Netscape cookies, too)
- "port", "max-age")
+ "version", "port", "max-age")
result = []
for ns_header in ns_headers:
pairs = []
version_set = False
params = re.split(r";\s*", ns_header)
for ii in range(len(params)):
param = params[ii]
param = param.rstrip()
if param == "": continue
if "=" not in param:
k, v = param, None
else:
k, v = re.split(r"\s*=\s*", param, 1)
k = k.lstrip()
if ii != 0:
lc = k.lower()
if lc in known_attrs:
k = lc
if k == "version":
# This is an RFC 2109 cookie.
+ v = strip_quotes(v)
version_set = True
if k == "expires":
# convert expires date to seconds since epoch
- if v.startswith('"'): v = v[1:]
- if v.endswith('"'): v = v[:-1]
- v = http2time(v) # None if invalid
+ v = http2time(strip_quotes(v)) # None if invalid
pairs.append((k, v))
if pairs:
if not version_set:
pairs.append(("version", "0"))
result.append(pairs)
return result
def _test():
import doctest, _headersutil
return doctest.testmod(_headersutil)
if __name__ == "__main__":
_test()
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 50f6b46..ce15222 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -492,1024 +492,1026 @@ class CookieTests(TestCase):
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check it doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
assert not c._cookies["www.acme.com"]["/"]["foo1"].secure, \
"non-secure cookie registered secure"
assert c._cookies["www.acme.com"]["/"]["foo2"].secure, \
"secure cookie registered non-secure"
def test_quote_cookie_value(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
assert h == r'$Version=1; foo=\\b\"a\"r'
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from mechanize import CookieJar, Request, DefaultCookiePolicy
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
assert len(c) == 1
c.add_cookie_header(req)
assert req.has_header("Cookie")
def test_domain_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find( "Domain") == -1, \
"absent domain returned with domain present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
assert h.find('$Domain=".bar.com"') != -1, \
"domain not returned"
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
assert h.find('$Domain="bar.com"') != -1, \
"domain not returned"
def test_path_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Path") == -1, \
"absent path returned with path present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
assert h.find('$Path="/"') != -1, "path not returned"
def test_port_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Port") == -1, \
"absent port returned with port present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
assert re.search("\$Port([^=]|$)", h), \
"port with no value not returned with no value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
assert h.find('$Port="80"') != -1, \
"port with single value not returned with single value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
assert h.find('$Port="80,8080"') != -1, \
"port with multiple values not returned with multiple values"
def test_no_return_comment(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
+ # bad version
+ ["Set-Cookie: b=foo; version=spam"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
def test_cookies_for_request(self):
from mechanize import CookieJar, Request
cj = CookieJar()
interact_netscape(cj, "http://example.com/", "short=path")
interact_netscape(cj, "http://example.com/longer/path", "longer=path")
for_short_path = cj.cookies_for_request(Request("http://example.com/"))
self.assertEquals([cookie.name for cookie in for_short_path],
["short"])
for_long_path = cj.cookies_for_request(Request(
"http://example.com/longer/path"))
self.assertEquals([cookie.name for cookie in for_long_path],
["longer", "short"])
class CookieJarPersistenceTests(TempfileTestMixin, TestCase):
def _interact(self, cj):
year_plus_one = localtime(time.time())[0] + 1
interact_2965(cj, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(cj, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(cj, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def test_firefox3_cookiejar_restore(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy
filename = self.mktemp()
def create_cookiejar():
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
return cj
cj = create_cookiejar()
self._interact(cj)
self.assertEquals(len(cj), 6)
cj.close()
cj = create_cookiejar()
self.assert_("name='foo1', value='bar'" in repr(cj))
self.assertEquals(len(cj), 4)
def test_firefox3_cookiejar_iteration(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
self._interact(cj)
summary = "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary,
"""\
<Cookie foo2=bar for www.acme.com:80/>
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
def test_firefox3_cookiejar_clear(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
self._interact(cj)
cj.clear("www.acme.com", "/", "foo2")
def summary(): return "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary(),
"""\
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
cj.clear("www.acme.com")
self.assertEquals(summary(),
"""\
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
# if name is given, so must path and domain
self.assertRaises(ValueError, cj.clear, domain=".foo.com",
name="foob")
# nonexistent domain
self.assertRaises(KeyError, cj.clear, domain=".spam.com")
def test_firefox3_cookiejar_add_cookie_header(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Request
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(filename)
finally:
reset_experimental_warnings()
cj.connect()
# Session cookies (true .discard) and persistent cookies (false
# .discard) are stored differently. Check they both get sent.
year_plus_one = localtime(time.time())[0] + 1
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/", "fooa=bar")
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; %s" % expires)
ca, cb = cj
self.assert_(ca.discard)
self.assertFalse(cb.discard)
request = Request("http://www.foo.com/")
cj.add_cookie_header(request)
self.assertEquals(request.get_header("Cookie"),
"fooa=bar; foob=bar")
def test_mozilla_cookiejar(self):
# Save / load Mozilla/Netscape cookie file format.
from mechanize import MozillaCookieJar, DefaultCookiePolicy
filename = tempfile.mktemp()
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
self._interact(c)
def save_and_restore(cj, ignore_discard, filename=filename):
from mechanize import MozillaCookieJar, DefaultCookiePolicy
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
assert len(new_c) == 6 # none discarded
assert repr(new_c).find("name='foo1', value='bar'") != -1
new_c = save_and_restore(c, False)
assert len(new_c) == 4 # 2 of them discarded on save
assert repr(new_c).find("name='foo1', value='bar'") != -1
def test_mozilla_cookiejar_embedded_tab(self):
from mechanize import MozillaCookieJar
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
"a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
"a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
cj.revert(ignore_discard=True)
cookies = cj._cookies["a.com"]["/"]
self.assertEquals(cookies["name"].value, "val\tstillthevalue")
self.assertEquals(cookies["name2"].value, "value")
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def test_mozilla_cookiejar_initial_dot_violation(self):
from mechanize import MozillaCookieJar, LoadError
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
self.assertRaises(LoadError, cj.revert, ignore_discard=True)
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
class LWPCookieTests(TestCase, TempfileTestMixin):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
diff --git a/test/test_headers.py b/test/test_headers.py
index 42d614f..8465470 100644
--- a/test/test_headers.py
+++ b/test/test_headers.py
@@ -1,128 +1,139 @@
"""Tests for ClientCookie._HeadersUtil."""
from unittest import TestCase
class IsHtmlTests(TestCase):
def test_is_html(self):
from mechanize._headersutil import is_html
for allow_xhtml in False, True:
for cths, ext, expect in [
(["text/html"], ".html", True),
(["text/html", "text/plain"], ".html", True),
# Content-type takes priority over file extension from URL
(["text/html"], ".txt", True),
(["text/plain"], ".html", False),
# use extension if no Content-Type
([], ".html", True),
([], ".gif", False),
# don't regard XHTML as HTML (unless user explicitly asks for it),
# since we don't yet handle XML properly
([], ".xhtml", allow_xhtml),
(["text/xhtml"], ".xhtml", allow_xhtml),
]:
url = "http://example.com/foo"+ext
self.assertEqual(expect, is_html(cths, url, allow_xhtml))
class HeaderTests(TestCase):
- def test_parse_ns_headers(self):
+ def test_parse_ns_headers_expires(self):
from mechanize._headersutil import parse_ns_headers
# quotes should be stripped
assert parse_ns_headers(['foo=bar; expires=01 Jan 2040 22:23:32 GMT']) == \
[[('foo', 'bar'), ('expires', 2209069412L), ('version', '0')]]
assert parse_ns_headers(['foo=bar; expires="01 Jan 2040 22:23:32 GMT"']) == \
[[('foo', 'bar'), ('expires', 2209069412L), ('version', '0')]]
+ def test_parse_ns_headers_version(self):
+ from mechanize._headersutil import parse_ns_headers
+
+ # quotes should be stripped
+ expected = [[('foo', 'bar'), ('version', '1')]]
+ for hdr in [
+ 'foo=bar; version="1"',
+ 'foo=bar; Version="1"',
+ ]:
+ self.assertEquals(parse_ns_headers([hdr]), expected)
+
def test_parse_ns_headers_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize._headersutil import parse_ns_headers
# Cookie with name 'expires'
hdr = 'expires=01 Jan 2040 22:23:32 GMT'
expected = [[("expires", "01 Jan 2040 22:23:32 GMT"), ("version", "0")]]
self.assertEquals(parse_ns_headers([hdr]), expected)
def test_join_header_words(self):
from mechanize._headersutil import join_header_words
assert join_header_words([[
("foo", None), ("bar", "baz"), (None, "value")
]]) == "foo; bar=baz; value"
assert join_header_words([[]]) == ""
def test_split_header_words(self):
from mechanize._headersutil import split_header_words
tests = [
("foo", [[("foo", None)]]),
("foo=bar", [[("foo", "bar")]]),
(" foo ", [[("foo", None)]]),
(" foo= ", [[("foo", "")]]),
(" foo=", [[("foo", "")]]),
(" foo= ; ", [[("foo", "")]]),
(" foo= ; bar= baz ", [[("foo", ""), ("bar", "baz")]]),
("foo=bar bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
# doesn't really matter if this next fails, but it works ATM
("foo= bar=baz", [[("foo", "bar=baz")]]),
("foo=bar;bar=baz", [[("foo", "bar"), ("bar", "baz")]]),
('foo bar baz', [[("foo", None), ("bar", None), ("baz", None)]]),
("a, b, c", [[("a", None)], [("b", None)], [("c", None)]]),
(r'foo; bar=baz, spam=, foo="\,\;\"", bar= ',
[[("foo", None), ("bar", "baz")],
[("spam", "")], [("foo", ',;"')], [("bar", "")]]),
]
for arg, expect in tests:
try:
result = split_header_words([arg])
except:
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
result = "(error -- traceback follows)\n\n%s" % f.getvalue()
assert result == expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
""" % (arg, expect, result)
def test_roundtrip(self):
from mechanize._headersutil import split_header_words, join_header_words
tests = [
("foo", "foo"),
("foo=bar", "foo=bar"),
(" foo ", "foo"),
("foo=", 'foo=""'),
("foo=bar bar=baz", "foo=bar; bar=baz"),
("foo=bar;bar=baz", "foo=bar; bar=baz"),
('foo bar baz', "foo; bar; baz"),
(r'foo="\"" bar="\\"', r'foo="\""; bar="\\"'),
('foo,,,bar', 'foo, bar'),
('foo=bar,bar=baz', 'foo=bar, bar=baz'),
('text/html; charset=iso-8859-1',
'text/html; charset="iso-8859-1"'),
('foo="bar"; port="80,81"; discard, bar=baz',
'foo=bar; port="80,81"; discard, bar=baz'),
(r'Basic realm="\"foo\\\\bar\""',
r'Basic; realm="\"foo\\\\bar\""')
]
for arg, expect in tests:
input = split_header_words([arg])
res = join_header_words(input)
assert res == expect, """
When parsing: '%s'
Expected: '%s'
Got: '%s'
Input was: '%s'""" % (arg, expect, res, input)
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
585c7a1ac7913feb1cfcb233e25cc6b85aedda5c
|
* Add method CookieJar.cookies_for_request() * Fix documented interface required of requests and responses (and add some tests for this!) * Allow either .is_unverifiable() or .unverifiable on request objects (preferring the former) * Replace example return value in mechanize._clientcookie.CookieJar._cookie_attrs.__doc__ with a doctest * Fix some comment / docstring typos * Remove a pointless dependency of _clientcookie.py on httplib
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index c8537ac..531c190 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -1,1656 +1,1691 @@
"""HTTP cookie handling for web clients.
This module originally developed from my port of Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
Comments to John J Lee <[email protected]>.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
Copyright 2002-2003 Johnny Lee (original MSIE Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import sys, re, copy, time, urllib, types, logging
try:
import threading
_threading = threading; del threading
except ImportError:
import dummy_threading
_threading = dummy_threading; del dummy_threading
-import httplib # only for the default HTTP port
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
-DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
+DEFAULT_HTTP_PORT = "80"
from _headersutil import split_header_words, parse_ns_headers
from _util import isstringlike
import _rfc3986
debug = logging.getLogger("mechanize.cookies").debug
def reraise_unmasked_exceptions(unmasked=()):
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways.
# This function re-raises some exceptions we don't want to trap.
import mechanize, warnings
if not mechanize.USE_BARE_EXCEPT:
raise
unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError)
etype = sys.exc_info()[0]
if issubclass(etype, unmasked):
raise
# swallowed an exception
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2)
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
return not (IPV4_RE.search(text) or
text == "" or
text[0] == "." or text[-1] == ".")
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
has_form_nb = not (i == -1 or i == 0)
return (
has_form_nb and
B.startswith(".") and
is_HDN(B[1:])
)
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
return not IPV4_RE.search(text)
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = _rfc3986.urlsplit(url)[1]
if host is None:
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
path, query, frag = _rfc3986.urlsplit(url)[2:]
path = escape_path(path)
req_path = _rfc3986.urlunsplit((None, None, path, query, frag))
if not req_path.startswith("/"):
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
+def request_is_unverifiable(request):
+ try:
+ return request.is_unverifiable()
+ except AttributeError:
+ if hasattr(request, "unverifiable"):
+ return request.unverifiable
+ else:
+ raise
+
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, types.UnicodeType):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
# the origin request's request-host was stuffed into request by
# _urllib2_support.AbstractHTTPHandler
return not domain_match(req_host, reach(request.origin_req_host))
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
version: integer;
name: string;
value: string (may be None);
port: string; None indicates no attribute was supplied (eg. "Port", rather
than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list
string (eg. "80,8080")
port_specified: boolean; true if a value was supplied with the Port
cookie-attribute
domain: string;
domain_specified: boolean; true if Domain was explicitly set
domain_initial_dot: boolean; true if Domain as set in HTTP header by server
started with a dot (yes, this really is necessary!)
path: string;
path_specified: boolean; true if Path was explicitly set
secure: boolean; true if should only be returned over secure connection
expires: integer; seconds since epoch (RFC 2965 cookies should calculate
this value from the Max-Age attribute)
discard: boolean, true if this is a session cookie; (if no expires value,
this should be true)
comment: string;
comment_url: string;
rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not
Set-Cookie2:) header, but had a version cookie-attribute of 1
rest: mapping of other cookie-attributes
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return self._rest.has_key(name)
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def nonstandard_attr_keys(self):
return self._rest.keys()
def is_expired(self, now=None):
if now is None: now = time.time()
return (self.expires is not None) and (self.expires <= now)
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ["version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
]:
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
As well as implementing set_ok and return_ok, implementations of this
interface must also supply the following attributes, indicating which
protocols should be used, and how. These can be read and set at any time,
though whether that makes complete sense from the protocol point of view is
doubtful.
Public attributes:
netscape: implement netscape protocol
rfc2965: implement RFC 2965 protocol
rfc2109_as_netscape:
WARNING: This argument will change or go away if is not accepted into
the Python standard library in this form!
If true, treat RFC 2109 cookies as though they were Netscape cookies. The
default is for this attribute to be None, which means treat 2109 cookies
as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is,
by default), and as Netscape cookies otherwise.
hide_cookie2: don't add Cookie2 header to requests (the presence of
this header indicates to the server that we understand RFC 2965
cookies)
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.extract_cookies.__doc__
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.add_cookie_header.__doc__
"""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
This is here as an optimization, to remove the need for checking every
cookie with a particular domain (which may involve reading many files).
The default implementations of domain_return_ok and path_return_ok
(return True) leave all the work to return_ok.
If domain_return_ok returns true for the cookie domain, path_return_ok
is called for the cookie path. Otherwise, path_return_ok and return_ok
are never called for that cookie domain. If path_return_ok returns
true, return_ok is called with the Cookie object itself for a full
check. Otherwise, return_ok is never called for that cookie path.
Note that domain_return_ok is called for every *cookie* domain, not
just for the *request* domain. For example, the function might be
called with both ".acme.com" and "www.acme.com" if the request domain
is "www.acme.com". The same goes for path_return_ok.
For argument documentation, see the docstring for return_ok.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
See the docstring for domain_return_ok.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies.
Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is
switched off by default.
The easiest way to provide your own policy is to override this class and
call its methods in your overriden implementations before adding your own
additional checks.
import mechanize
class MyCookiePolicy(mechanize.DefaultCookiePolicy):
def set_ok(self, cookie, request):
if not mechanize.DefaultCookiePolicy.set_ok(
self, cookie, request):
return False
if i_dont_want_to_store_this_cookie():
return False
return True
In addition to the features required to implement the CookiePolicy
interface, this class allows you to block and allow domains from setting
and receiving cookies. There are also some strictness switches that allow
you to tighten up the rather loose Netscape protocol rules a little bit (at
the cost of blocking some benign cookies).
A domain blacklist and whitelist is provided (both off by default). Only
domains not in the blacklist and present in the whitelist (if the whitelist
is active) participate in cookie setting and returning. Use the
blocked_domains constructor argument, and blocked_domains and
set_blocked_domains methods (and the corresponding argument and methods for
allowed_domains). If you set a whitelist, you can turn it off again by
setting it to None.
Domains in block or allow lists that do not start with a dot must
string-compare equal. For example, "acme.com" matches a blacklist entry of
"acme.com", but "www.acme.com" does not. Domains that do start with a dot
are matched by more specific domains too. For example, both "www.acme.com"
and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
not). IP addresses are an exception, and must match exactly. For example,
if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
blocked, but 193.168.1.2 is not.
Additional Public Attributes:
General strictness switches
strict_domain: don't allow sites to set two-component domains with
country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
This is far from perfect and isn't guaranteed to work!
RFC 2965 protocol strictness switches
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
- if request.unverifiable and is_third_party(request):
+ if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
- if request.unverifiable and is_third_party(request):
+ if request_is_unverifiable(request) and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
+ set_policy(policy)
+ cookies_for_request(request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
+ def cookies_for_request(self, request):
+ """Return a list of cookies to be returned to server.
+
+ The returned list of cookie instances is sorted in the order they
+ should appear in the Cookie: header for return to the server.
+
+ See add_cookie_header.__doc__ for the interface required of the
+ request argument.
+
+ New in version 0.1.10
+
+ """
+ cookies = self._cookies_for_request(request)
+ # add cookies in order of most specific (i.e. longest) path first
+ def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
+ cookies.sort(decreasing_size)
+ return cookies
+
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
+ # this method still exists (alongside cookies_for_request) because it
+ # is part of an implied protected interface for subclasses of cookiejar
+ # XXX document that implied interface, or provide another way of
+ # implementing cookiejars than subclassing
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
- like ['foo="bar"; $Path="/"', ...]
-
The $Version attribute is also added when appropriate (currently only
once per request).
- """
- # add cookies in order of most specific (ie. longest) path first
- def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
- cookies.sort(decreasing_size)
+ >>> jar = CookieJar()
+ >>> ns_cookie = Cookie(0, "foo", '"bar"', None, False,
+ ... "example.com", False, False,
+ ... "/", False, False, None, True,
+ ... None, None, {})
+ >>> jar._cookie_attrs([ns_cookie])
+ ['foo="bar"']
+ >>> rfc2965_cookie = Cookie(1, "foo", "bar", None, False,
+ ... ".example.com", True, False,
+ ... "/", False, False, None, True,
+ ... None, None, {})
+ >>> jar._cookie_attrs([rfc2965_cookie])
+ ['$Version=1', 'foo=bar', '$Domain="example.com"']
+ """
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
- the methods get_full_url, get_host, get_type, has_header, get_header,
- header_items and add_unredirected_header, as documented by urllib2, and
- the port attribute (the port number). Actually,
- RequestUpgradeProcessor will automatically upgrade your Request object
- to one with has_header, get_header, header_items and
+ the methods get_full_url, get_host, is_unverifiable, get_type,
+ has_header, get_header, header_items and add_unredirected_header, as
+ documented by urllib2, and the port attribute (the port number).
+ Actually, RequestUpgradeProcessor will automatically upgrade your
+ Request object to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
- req_host, erhn = eff_request_host(request)
- strict_non_domain = (
- self._policy.strict_ns_domain & self._policy.DomainStrictNonDomain)
-
- cookies = self._cookies_for_request(request)
+ cookies = self.cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
if v is None:
debug(" missing value for max-age attribute")
bad_cookie = True
break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None: version = int(version)
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
- See extract_cookies.__doc__ for the interfaces required of the
+ See extract_cookies.__doc__ for the interface required of the
response and request arguments.
"""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
- object' may be any object that provides a getallmatchingheaders
- method).
+ object' may be any object that provides a getheaders method).
The request object (usually a urllib2.Request instance) must support
- the methods get_full_url and get_host, as documented by urllib2, and
- the port attribute (the port number). The request is used to set
- default values for cookie-attributes as well as for checking that the
- cookie is OK to be set.
+ the methods get_full_url, get_type, get_host, and is_unverifiable, as
+ documented by urllib2, and the port attribute (the port number). The
+ request is used to set default values for cookie-attributes as well as
+ for checking that the cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
"""
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
def __getitem__(self, i):
if i == 0:
self._getitem_iterator = self.__iter__()
elif self._prev_getitem_index != i-1: raise IndexError(
"CookieJar.__getitem__ only supports sequential iteration")
self._prev_getitem_index = i
try:
return self._getitem_iterator.next()
except StopIteration:
raise IndexError()
def __iter__(self):
return MappingIterator(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
class LoadError(Exception): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file.
Additional methods
save(filename=None, ignore_discard=False, ignore_expires=False)
load(filename=None, ignore_discard=False, ignore_expires=False)
revert(filename=None, ignore_discard=False, ignore_expires=False)
Additional public attributes
filename: filename for loading and saving cookies
Additional public readable attributes
delayload: request that cookies are lazily loaded from disk; this is only
a hint since this only affects performance, not behaviour (unless the
cookies on disk are changing); a CookieJar object may ignore it (in fact,
only MSIECookieJar lazily loads cookies at the moment)
"""
def __init__(self, filename=None, delayload=False, policy=None):
"""
See FileCookieJar.__doc__ for argument documentation.
Cookies are NOT loaded from the named file until either the load or
revert method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file.
filename: name of file in which to save cookies
ignore_discard: save even cookies set to be discarded
ignore_expires: save even cookies that have expired
The file is overwritten if it already exists, thus wiping all its
cookies. Saved cookies can be restored later using the load or revert
methods. If filename is not specified, self.filename is used; if
self.filename is None, ValueError is raised.
"""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file.
Old cookies are kept unless overwritten by newly loaded ones.
Arguments are as for .save().
If filename is not specified, self.filename is used; if self.filename
is None, ValueError is raised. The named file must be in the format
understood by the class, or LoadError will be raised. This format will
be identical to that written by the save method, unless the load format
is not sufficiently well understood (as is the case for MSIECookieJar).
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 1a62831..50f6b46 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,1400 +1,1540 @@
"""Tests for _ClientCookie."""
-import urllib2, re, os, StringIO, mimetools, time, tempfile, errno
+import sys, urllib2, re, os, StringIO, mimetools, time, tempfile, errno, inspect
from time import localtime
from unittest import TestCase
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class TempfileTestMixin:
def setUp(self):
self._tempfiles = []
def tearDown(self):
for fn in self._tempfiles:
try:
os.remove(fn)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def mktemp(self):
fn = tempfile.mktemp()
self._tempfiles.append(fn)
return fn
+def caller():
+ return sys._getframe().f_back.f_back.f_code.co_name
+
+def attribute_names(obj):
+ return set([spec[0] for spec in inspect.getmembers(obj)
+ if not spec[0].startswith("__")])
+
+class CookieJarInterfaceTests(TestCase):
+
+ def test_add_cookie_header(self):
+ from mechanize import CookieJar
+ # verify only these methods are used
+ class MockRequest(object):
+ def __init__(self):
+ self.added_headers = []
+ self.called = set()
+ def log_called(self):
+ self.called.add(caller())
+ def get_full_url(self):
+ self.log_called()
+ return "https://example.com:443"
+ def get_host(self):
+ self.log_called()
+ return "example.com:443"
+ def get_type(self):
+ self.log_called()
+ return "https"
+ def has_header(self, header_name):
+ self.log_called()
+ return False
+ def get_header(self, header_name, default=None):
+ self.log_called()
+ pass # currently not called
+ def header_items(self):
+ self.log_called()
+ pass # currently not called
+ def add_unredirected_header(self, key, val):
+ self.log_called()
+ self.added_headers.append((key, val))
+ def is_unverifiable(self):
+ self.log_called()
+ return False
+ @property
+ def port(self):
+ import traceback; traceback.print_stack()
+ self.log_called()
+ pass # currently not used, since urllib2 always sets .port None
+ jar = CookieJar()
+ interact_netscape(jar, "https://example.com:443",
+ "foo=bar; port=443; secure")
+ request = MockRequest()
+ jar.add_cookie_header(request)
+ expect_called = attribute_names(MockRequest) - set(
+ ["port", "get_header", "header_items", "log_called"])
+ self.assertEquals(request.called, expect_called)
+ self.assertEquals(request.added_headers, [("Cookie", "foo=bar")])
+
+ def test_extract_cookies(self):
+ from mechanize import CookieJar
+
+ # verify only these methods are used
+
+ class StubMessage(object):
+ def getheaders(self, name):
+ return ["foo=bar; port=443"]
+
+ class StubResponse(object):
+ def info(self):
+ return StubMessage()
+
+ class StubRequest(object):
+ def __init__(self):
+ self.added_headers = []
+ self.called = set()
+ def log_called(self):
+ self.called.add(caller())
+ def get_full_url(self):
+ self.log_called()
+ return "https://example.com:443"
+ def get_host(self):
+ self.log_called()
+ return "example.com:443"
+ def is_unverifiable(self):
+ self.log_called()
+ return False
+ @property
+ def port(self):
+ import traceback; traceback.print_stack()
+ self.log_called()
+ pass # currently not used, since urllib2 always sets .port None
+ jar = CookieJar()
+ response = StubResponse()
+ request = StubRequest()
+ jar.extract_cookies(response, request)
+ expect_called = attribute_names(StubRequest) - set(
+ ["port", "log_called"])
+ self.assertEquals(request.called, expect_called)
+ self.assertEquals([(cookie.name, cookie.value) for cookie in jar],
+ [("foo", "bar")])
+
+ def test_unverifiable(self):
+ from mechanize._clientcookie import request_is_unverifiable
+ # .unverifiable was added in mechanize, .is_unverifiable() later got
+ # added in cookielib. XXX deprecate .unverifiable
+ class StubRequest(object):
+ def __init__(self, attrs):
+ self._attrs = attrs
+ self.accessed = set()
+ def __getattr__(self, name):
+ self.accessed.add(name)
+ try:
+ return self._attrs[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ request = StubRequest(dict(is_unverifiable=lambda: False))
+ self.assertEquals(request_is_unverifiable(request), False)
+
+ request = StubRequest(dict(is_unverifiable=lambda: False,
+ unverifiable=True))
+ self.assertEquals(request_is_unverifiable(request), False)
+
+ request = StubRequest(dict(unverifiable=False))
+ self.assertEquals(request_is_unverifiable(request), False)
+
+
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
- # ... and check is doesn't get returned
+ # ... and check it doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
assert not c._cookies["www.acme.com"]["/"]["foo1"].secure, \
"non-secure cookie registered secure"
assert c._cookies["www.acme.com"]["/"]["foo2"].secure, \
"secure cookie registered non-secure"
def test_quote_cookie_value(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
assert h == r'$Version=1; foo=\\b\"a\"r'
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from mechanize import CookieJar, Request, DefaultCookiePolicy
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
assert len(c) == 1
c.add_cookie_header(req)
assert req.has_header("Cookie")
def test_domain_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find( "Domain") == -1, \
"absent domain returned with domain present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
assert h.find('$Domain=".bar.com"') != -1, \
"domain not returned"
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
assert h.find('$Domain="bar.com"') != -1, \
"domain not returned"
def test_path_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Path") == -1, \
"absent path returned with path present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
assert h.find('$Path="/"') != -1, "path not returned"
def test_port_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Port") == -1, \
"absent port returned with port present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
assert re.search("\$Port([^=]|$)", h), \
"port with no value not returned with no value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
assert h.find('$Port="80"') != -1, \
"port with single value not returned with single value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
assert h.find('$Port="80,8080"') != -1, \
"port with multiple values not returned with multiple values"
def test_no_return_comment(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
+ def test_cookies_for_request(self):
+ from mechanize import CookieJar, Request
+
+ cj = CookieJar()
+ interact_netscape(cj, "http://example.com/", "short=path")
+ interact_netscape(cj, "http://example.com/longer/path", "longer=path")
+ for_short_path = cj.cookies_for_request(Request("http://example.com/"))
+ self.assertEquals([cookie.name for cookie in for_short_path],
+ ["short"])
+ for_long_path = cj.cookies_for_request(Request(
+ "http://example.com/longer/path"))
+ self.assertEquals([cookie.name for cookie in for_long_path],
+ ["longer", "short"])
+
class CookieJarPersistenceTests(TempfileTestMixin, TestCase):
def _interact(self, cj):
year_plus_one = localtime(time.time())[0] + 1
interact_2965(cj, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(cj, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(cj, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def test_firefox3_cookiejar_restore(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy
filename = self.mktemp()
def create_cookiejar():
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
return cj
cj = create_cookiejar()
self._interact(cj)
self.assertEquals(len(cj), 6)
cj.close()
cj = create_cookiejar()
self.assert_("name='foo1', value='bar'" in repr(cj))
self.assertEquals(len(cj), 4)
def test_firefox3_cookiejar_iteration(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
self._interact(cj)
summary = "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary,
"""\
<Cookie foo2=bar for www.acme.com:80/>
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
def test_firefox3_cookiejar_clear(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(
filename, policy=DefaultCookiePolicy(rfc2965=True))
finally:
reset_experimental_warnings()
cj.connect()
self._interact(cj)
cj.clear("www.acme.com", "/", "foo2")
def summary(): return "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary(),
"""\
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
cj.clear("www.acme.com")
self.assertEquals(summary(),
"""\
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
# if name is given, so must path and domain
self.assertRaises(ValueError, cj.clear, domain=".foo.com",
name="foob")
# nonexistent domain
self.assertRaises(KeyError, cj.clear, domain=".spam.com")
def test_firefox3_cookiejar_add_cookie_header(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Request
filename = self.mktemp()
hide_experimental_warnings()
try:
cj = Firefox3CookieJar(filename)
finally:
reset_experimental_warnings()
cj.connect()
# Session cookies (true .discard) and persistent cookies (false
# .discard) are stored differently. Check they both get sent.
year_plus_one = localtime(time.time())[0] + 1
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/", "fooa=bar")
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; %s" % expires)
ca, cb = cj
self.assert_(ca.discard)
self.assertFalse(cb.discard)
request = Request("http://www.foo.com/")
cj.add_cookie_header(request)
self.assertEquals(request.get_header("Cookie"),
"fooa=bar; foob=bar")
def test_mozilla_cookiejar(self):
# Save / load Mozilla/Netscape cookie file format.
from mechanize import MozillaCookieJar, DefaultCookiePolicy
filename = tempfile.mktemp()
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
self._interact(c)
def save_and_restore(cj, ignore_discard, filename=filename):
from mechanize import MozillaCookieJar, DefaultCookiePolicy
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
assert len(new_c) == 6 # none discarded
assert repr(new_c).find("name='foo1', value='bar'") != -1
new_c = save_and_restore(c, False)
assert len(new_c) == 4 # 2 of them discarded on save
assert repr(new_c).find("name='foo1', value='bar'") != -1
def test_mozilla_cookiejar_embedded_tab(self):
from mechanize import MozillaCookieJar
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
"a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
"a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
cj.revert(ignore_discard=True)
cookies = cj._cookies["a.com"]["/"]
self.assertEquals(cookies["name"].value, "val\tstillthevalue")
self.assertEquals(cookies["name2"].value, "value")
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def test_mozilla_cookiejar_initial_dot_violation(self):
from mechanize import MozillaCookieJar, LoadError
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
self.assertRaises(LoadError, cj.revert, ignore_discard=True)
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
class LWPCookieTests(TestCase, TempfileTestMixin):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
assert re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie)
|
Almad/Mechanize
|
6ac75b3eec18d1ac1e8c952382faf506a77084b1
|
Add a -l option to test runner to print debug logs
|
diff --git a/test.py b/test.py
index fa5d0f6..8e497ee 100755
--- a/test.py
+++ b/test.py
@@ -1,146 +1,151 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
]
import sys, os, logging, glob
-#level = logging.DEBUG
-#level = logging.INFO
-#level = logging.WARNING
-#level = logging.NOTSET
-#logging.getLogger("mechanize").setLevel(level)
-#logging.getLogger("mechanize").addHandler(logging.StreamHandler(sys.stdout))
-
if __name__ == "__main__":
# XXX
# temporary stop-gap to run doctests &c.
# should switch to nose or something
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
+ log = "-l" in sys.argv
+ if log:
+ sys.argv.remove("-l")
+ level = logging.DEBUG
+# level = logging.INFO
+# level = logging.WARNING
+# level = logging.NOTSET
+ logger = logging.getLogger("mechanize")
+ logger.setLevel(level)
+ handler = logging.StreamHandler(sys.stdout)
+ handler.setLevel(level)
+ logger.addHandler(handler)
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
import testprogram
if run_coverage:
import coverage
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
pm_doctest_filename = os.path.join(
"test", "test_password_manager.special_doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
doctest.testfile(pm_doctest_filename, globs=globs)
try:
import robotparser
except ImportError:
pass
else:
doctest.testfile(os.path.join(
"test", "test_robotfileparser.special_doctest"))
# run .doctest files
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
_http, _rfc3986, _useragent
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
doctest.testmod(_useragent)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
test_runner = testprogram.CgitbTextTestRunner()
prog = testprogram.TestProgram(
MODULE_NAMES,
testRunner=test_runner,
localServerProcess=testprogram.TwistedServerProcess(),
)
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
|
Almad/Mechanize
|
a537abd1023746fdb452a63c37ad606800209b31
|
Silence module logging's "no handlers could be found for logger mechanize" warning in a way that doesn't clobber attempts to set log level sometimes
|
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index f7fca78..3f786ec 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,133 +1,139 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
+import logging
import sys
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
try:
import sqlite3
except ImportError:
pass
else:
from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
+
+logger = logging.getLogger("mechanize")
+if logger.level is logging.NOTSET:
+ logger.setLevel(logging.CRITICAL)
+del logger
diff --git a/mechanize/_request.py b/mechanize/_request.py
index 4322348..63998a4 100644
--- a/mechanize/_request.py
+++ b/mechanize/_request.py
@@ -1,86 +1,84 @@
"""Integration with Python standard library module urllib2: Request class.
Copyright 2004-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import urllib2, urllib, logging
from _clientcookie import request_host
import _rfc3986
warn = logging.getLogger("mechanize").warning
-# don't complain about missing logging handler
-logging.getLogger("mechanize").setLevel(logging.ERROR)
class Request(urllib2.Request):
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False, visit=None):
# In mechanize 0.2, the interpretation of a unicode url argument will
# change: A unicode url argument will be interpreted as an IRI, and a
# bytestring as a URI. For now, we accept unicode or bytestring. We
# don't insist that the value is always a URI (specifically, must only
# contain characters which are legal), because that might break working
# code (who knows what bytes some servers want to see, especially with
# browser plugins for internationalised URIs).
if not _rfc3986.is_clean_uri(url):
warn("url argument is not a URI "
"(contains illegal characters) %r" % url)
urllib2.Request.__init__(self, url, data, headers)
self.selector = None
self.unredirected_hdrs = {}
self.visit = visit
# All the terminology below comes from RFC 2965.
self.unverifiable = unverifiable
# Set request-host of origin transaction.
# The origin request-host is needed in order to decide whether
# unverifiable sub-requests (automatic redirects, images embedded
# in HTML, etc.) are to third-party hosts. If they are, the
# resulting transactions might need to be conducted with cookies
# turned off.
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
def get_selector(self):
return urllib.splittag(self.__r_host)[0]
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_unredirected_header(self, key, val):
"""Add a header that will not be added to a redirected request."""
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
"""True iff request has named header (regular or unredirected)."""
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
def __str__(self):
return "<Request for %s>" % self.get_full_url()
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
|
Almad/Mechanize
|
c0ee6247017794923e3a662120c1de11ee6de1ac
|
Don't use private attribute of request in request upgrade handlerl (what was I thinking??)
|
diff --git a/mechanize/_upgrade.py b/mechanize/_upgrade.py
index 73a447c..df59c01 100644
--- a/mechanize/_upgrade.py
+++ b/mechanize/_upgrade.py
@@ -1,40 +1,40 @@
from urllib2 import BaseHandler
from _request import Request
from _response import upgrade_response
from _util import deprecation
class HTTPRequestUpgradeProcessor(BaseHandler):
# upgrade urllib2.Request to this module's Request
# yuck!
handler_order = 0 # before anything else
def http_request(self, request):
if not hasattr(request, "add_unredirected_header"):
- newrequest = Request(request._Request__original, request.data,
+ newrequest = Request(request.get_full_url(), request.data,
request.headers)
try: newrequest.origin_req_host = request.origin_req_host
except AttributeError: pass
try: newrequest.unverifiable = request.unverifiable
except AttributeError: pass
try: newrequest.visit = request.visit
except AttributeError: pass
request = newrequest
return request
https_request = http_request
class ResponseUpgradeProcessor(BaseHandler):
# upgrade responses to be .close()able without becoming unusable
handler_order = 0 # before anything else
def __init__(self):
deprecation(
"See http://wwwsearch.sourceforge.net/mechanize/doc.html#seekable")
def any_response(self, request, response):
if not hasattr(response, 'closeable_response'):
response = upgrade_response(response)
return response
|
Almad/Mechanize
|
69d224959a7ae54cd8690a9e300e4e2e2c6aa39b
|
* Add Python version classifiers to setup.py * Don't call setup() on import of setup.py
|
diff --git a/setup.py b/setup.py
index e61de2d..e1ae03b 100755
--- a/setup.py
+++ b/setup.py
@@ -1,147 +1,156 @@
#!/usr/bin/env python
"""Stateful programmatic web browsing.
Stateful programmatic web browsing, after Andy Lester's Perl module
WWW::Mechanize.
The library is layered: mechanize.Browser (stateful web browser),
mechanize.UserAgent (configurable URL opener), plus urllib2 handlers.
Features include: ftp:, http: and file: URL schemes, browser history,
high-level hyperlink and HTML form support, HTTP cookies, HTTP-EQUIV and
Refresh, Referer [sic] header, robots.txt, redirections, proxies, and
Basic and Digest HTTP authentication. mechanize's response objects are
(lazily-) .seek()able and still work after .close().
Much of the code originally derived from Perl code by Gisle Aas
(libwww-perl), Johnny Lee (MSIE Cookie support) and last but not least
Andy Lester (WWW::Mechanize). urllib2 was written by Jeremy Hylton.
"""
def unparse_version(tup):
major, minor, bugfix, state_char, pre = tup
fmt = "%s.%s.%s"
args = [major, minor, bugfix]
if state_char is not None:
fmt += "%s"
args.append(state_char)
if pre is not None:
fmt += "-pre%s"
args.append(pre)
return fmt % tuple(args)
def str_to_tuple(text):
if text.startswith("("):
text = text[1:-1]
els = [el.strip() for el in text.split(",")]
newEls = []
for ii in range(len(els)):
el = els[ii]
if el == "None":
newEls.append(None)
elif 0 <= ii < 3:
newEls.append(int(el))
else:
if el.startswith("'") or el.startswith('"'):
el = el[1:-1]
newEls.append(el)
return tuple(newEls)
import re
## VERSION_MATCH = re.search(r'__version__ = \((.*)\)',
## open("mechanize/_mechanize.py").read())
## VERSION = unparse_version(str_to_tuple(VERSION_MATCH.group(1)))
VERSION = "0.1.10"
INSTALL_REQUIRES = ["ClientForm>=0.2.6, ==dev"]
NAME = "mechanize"
PACKAGE = True
LICENSE = "BSD" # or ZPL 2.1
PLATFORMS = ["any"]
ZIP_SAFE = True
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: BSD License
License :: OSI Approved :: Zope Public License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
+Programming Language :: Python :: 2
+Programming Language :: Python :: 2.3
+Programming Language :: Python :: 2.4
+Programming Language :: Python :: 2.5
Topic :: Internet
Topic :: Internet :: File Transfer Protocol (FTP)
Topic :: Internet :: WWW/HTTP
Topic :: Internet :: WWW/HTTP :: Browsers
Topic :: Internet :: WWW/HTTP :: Indexing/Search
Topic :: Internet :: WWW/HTTP :: Site Management
Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking
Topic :: Software Development :: Libraries
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Software Development :: Testing
Topic :: Software Development :: Testing :: Traffic Generation
Topic :: System :: Archiving :: Mirroring
Topic :: System :: Networking :: Monitoring
Topic :: System :: Systems Administration
Topic :: Text Processing
Topic :: Text Processing :: Markup
Topic :: Text Processing :: Markup :: HTML
Topic :: Text Processing :: Markup :: XML
"""
#-------------------------------------------------------
# the rest is constant for most of my released packages:
import sys
if PACKAGE:
packages, py_modules = [NAME], None
else:
packages, py_modules = None, [NAME]
doclines = __doc__.split("\n")
if not hasattr(sys, "version_info") or sys.version_info < (2, 3):
from distutils.core import setup
_setup = setup
def setup(**kwargs):
for key in [
# distutils >= Python 2.3 args
# XXX probably download_url came in earlier than 2.3
"classifiers", "download_url",
# setuptools args
"install_requires", "zip_safe", "test_suite",
]:
if kwargs.has_key(key):
del kwargs[key]
# Only want packages keyword if this is a package,
# only want py_modules keyword if this is a single-file module,
# so get rid of packages or py_modules keyword as appropriate.
if kwargs["packages"] is None:
del kwargs["packages"]
else:
del kwargs["py_modules"]
apply(_setup, (), kwargs)
else:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
-setup(
- name = NAME,
- version = VERSION,
- license = LICENSE,
- platforms = PLATFORMS,
- classifiers = [c for c in CLASSIFIERS.split("\n") if c],
- install_requires = INSTALL_REQUIRES,
- zip_safe = ZIP_SAFE,
- test_suite = "test",
- author = "John J. Lee",
- author_email = "[email protected]",
- description = doclines[0],
- long_description = "\n".join(doclines[2:]),
- url = "http://wwwsearch.sourceforge.net/%s/" % NAME,
- download_url = ("http://wwwsearch.sourceforge.net/%s/src/"
- "%s-%s.tar.gz" % (NAME, NAME, VERSION)),
- py_modules = py_modules,
- packages = packages,
- )
+def main():
+ setup(
+ name = NAME,
+ version = VERSION,
+ license = LICENSE,
+ platforms = PLATFORMS,
+ classifiers = [c for c in CLASSIFIERS.split("\n") if c],
+ install_requires = INSTALL_REQUIRES,
+ zip_safe = ZIP_SAFE,
+ test_suite = "test",
+ author = "John J. Lee",
+ author_email = "[email protected]",
+ description = doclines[0],
+ long_description = "\n".join(doclines[2:]),
+ url = "http://wwwsearch.sourceforge.net/%s/" % NAME,
+ download_url = ("http://wwwsearch.sourceforge.net/%s/src/"
+ "%s-%s.tar.gz" % (NAME, NAME, VERSION)),
+ py_modules = py_modules,
+ packages = packages,
+ )
+
+
+if __name__ == "__main__":
+ main()
|
Almad/Mechanize
|
1ff146c5c304a29b273d1374fd8388eeb1d32710
|
Fix typo in MANIFEST.in
|
diff --git a/MANIFEST.in b/MANIFEST.in
index 013ddf8..a43d4ef 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,16 +1,16 @@
include MANIFEST.in
include COPYING.txt
include INSTALL.txt
include GeneralFAQ.html
include README.html.in
include README.html
include README.txt
include doc.html.in
include doc.html
include ChangeLog.txt
-include 0.1.0-changes.txt
+include 0.1-changes.txt
include *.py
prune docs-in-progress
recursive-include examples *.py
recursive-include attic *.py
recursive-include test-tools *.py
|
Almad/Mechanize
|
2e763f20a3577cbdb9d8660feeb8dd35c03eb841
|
* Fix ImportError if sqlite3 not available * Fix a couple of functional test not to wait 5 seconds each
|
diff --git a/functional_tests.py b/functional_tests.py
index 38a8800..2b2952e 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,513 +1,514 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys, urllib, tempfile, errno
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_refresh(self):
def refresh_request(seconds):
uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
return uri + ("?refresh=%s" % val)
+ self.browser.set_handle_refresh(True, honor_time=False)
r = self.browser.open(refresh_request(5))
self.assertEqual(r.geturl(), self.uri)
# Set a maximum refresh time of 30 seconds (these long refreshes tend
# to be there only because the website owner wants you to see the
# latest news, or whatever -- they're not essential to the operation of
# the site, and not really useful or appropriate when scraping).
refresh_uri = refresh_request(60)
self.browser.set_handle_refresh(True, max_time=30., honor_time=True)
r = self.browser.open(refresh_uri)
self.assertEqual(r.geturl(), refresh_uri)
# allow long refreshes (but don't actually wait 60 seconds)
self.browser.set_handle_refresh(True, max_time=None, honor_time=False)
r = self.browser.open(refresh_request(60))
self.assertEqual(r.geturl(), self.uri)
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
br = mechanize.Browser()
response = br.open_local_file("mechanize/_mechanize.py")
self.assert_("def open_local_file(self, filename):" in
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_seek_wrapper_class_name(self):
opener = mechanize.UserAgent()
opener.set_seekable_responses(True)
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_("HTTPError instance" in repr(exc))
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
+ br.set_handle_refresh(True, honor_time=False)
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
-
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CookieJarTests(TestCase):
def test_mozilla_cookiejar(self):
filename = tempfile.mktemp()
try:
def get_cookiejar():
cj = mechanize.MozillaCookieJar(filename=filename)
try:
cj.revert()
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
return cj
def commit(cj):
cj.save()
self._test_cookiejar(get_cookiejar, commit)
finally:
try:
os.remove(filename)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
def test_firefox3_cookiejar(self):
try:
mechanize.Firefox3CookieJar
except AttributeError:
- # firefox 3 cookiejar is only supported in Python 2.5 and later
- self.assert_(sys.version_info[:2] < (2, 5))
+ # firefox 3 cookiejar is only supported in Python 2.5 and later;
+ # also, sqlite3 must be available
return
filename = tempfile.mktemp()
try:
def get_cookiejar():
hide_experimental_warnings()
try:
cj = mechanize.Firefox3CookieJar(filename=filename)
finally:
reset_experimental_warnings()
cj.connect()
return cj
def commit(cj):
pass
self._test_cookiejar(get_cookiejar, commit)
finally:
os.remove(filename)
def _test_cookiejar(self, get_cookiejar, commit):
cookiejar = get_cookiejar()
br = mechanize.Browser()
br.set_cookiejar(cookiejar)
br.set_handle_refresh(False)
url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
# no cookie was set on the first request
html = br.open(url).read()
self.assertEquals(html.find("Your browser supports cookies!"), -1)
self.assertEquals(len(cookiejar), 1)
# ... but now we have the cookie
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
commit(cookiejar)
# should still have the cookie when we load afresh
cookiejar = get_cookiejar()
br.set_cookiejar(cookiejar)
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
If this option doesn't work on Windows/Mac, somebody please
tell me about it, or I'll never find out...
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index 3e17c09..f7fca78 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,128 +1,133 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
import sys
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
# 2.4 raises SyntaxError due to generator / try/finally use
if sys.version_info[:2] > (2,4):
- from _firefox3cookiejar import Firefox3CookieJar
+ try:
+ import sqlite3
+ except ImportError:
+ pass
+ else:
+ from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
diff --git a/mechanize/_firefox3cookiejar.py b/mechanize/_firefox3cookiejar.py
index 17ffba8..34fe979 100644
--- a/mechanize/_firefox3cookiejar.py
+++ b/mechanize/_firefox3cookiejar.py
@@ -1,253 +1,249 @@
"""Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
+import sqlite3
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
-try:
- import sqlite3
-except ImportError:
- pass
-else:
- class Firefox3CookieJar(CookieJar):
+class Firefox3CookieJar(CookieJar):
- """Firefox 3 cookie jar.
+ """Firefox 3 cookie jar.
- The cookies are stored in Firefox 3's "cookies.sqlite" format.
+ The cookies are stored in Firefox 3's "cookies.sqlite" format.
- Constructor arguments:
+ Constructor arguments:
- filename: filename of cookies.sqlite (typically found at the top level
- of a firefox profile directory)
- autoconnect: as a convenience, connect to the SQLite cookies database at
- Firefox3CookieJar construction time (default True)
- policy: an object satisfying the mechanize.CookiePolicy interface
+ filename: filename of cookies.sqlite (typically found at the top level
+ of a firefox profile directory)
+ autoconnect: as a convenience, connect to the SQLite cookies database at
+ Firefox3CookieJar construction time (default True)
+ policy: an object satisfying the mechanize.CookiePolicy interface
- Note that this is NOT a FileCookieJar, and there are no .load(),
- .save() or .restore() methods. The database is in sync with the
- cookiejar object's state after each public method call.
+ Note that this is NOT a FileCookieJar, and there are no .load(),
+ .save() or .restore() methods. The database is in sync with the
+ cookiejar object's state after each public method call.
- Following Firefox's own behaviour, session cookies are never saved to
- the database.
+ Following Firefox's own behaviour, session cookies are never saved to
+ the database.
- The file is created, and an sqlite database written to it, if it does
- not already exist. The moz_cookies database table is created if it does
- not already exist.
- """
+ The file is created, and an sqlite database written to it, if it does
+ not already exist. The moz_cookies database table is created if it does
+ not already exist.
+ """
- # XXX
- # handle DatabaseError exceptions
- # add a FileCookieJar (explicit .save() / .revert() / .load() methods)
+ # XXX
+ # handle DatabaseError exceptions
+ # add a FileCookieJar (explicit .save() / .revert() / .load() methods)
- def __init__(self, filename, autoconnect=True, policy=None):
- experimental("Firefox3CookieJar is experimental code")
- CookieJar.__init__(self, policy)
- if filename is not None and not isstringlike(filename):
- raise ValueError("filename must be string-like")
- self.filename = filename
- self._conn = None
- if autoconnect:
- self.connect()
+ def __init__(self, filename, autoconnect=True, policy=None):
+ experimental("Firefox3CookieJar is experimental code")
+ CookieJar.__init__(self, policy)
+ if filename is not None and not isstringlike(filename):
+ raise ValueError("filename must be string-like")
+ self.filename = filename
+ self._conn = None
+ if autoconnect:
+ self.connect()
- def connect(self):
- self._conn = sqlite3.connect(self.filename)
- self._conn.isolation_level = "DEFERRED"
- self._create_table_if_necessary()
+ def connect(self):
+ self._conn = sqlite3.connect(self.filename)
+ self._conn.isolation_level = "DEFERRED"
+ self._create_table_if_necessary()
- def close(self):
- self._conn.close()
+ def close(self):
+ self._conn.close()
- def _transaction(self, func):
- try:
- cur = self._conn.cursor()
- try:
- result = func(cur)
- finally:
- cur.close()
- except:
- self._conn.rollback()
- raise
- else:
- self._conn.commit()
- return result
-
- def _execute(self, query, params=()):
- return self._transaction(lambda cur: cur.execute(query, params))
-
- def _query(self, query, params=()):
- # XXX should we bother with a transaction?
+ def _transaction(self, func):
+ try:
cur = self._conn.cursor()
try:
- cur.execute(query, params)
- for row in cur.fetchall():
- yield row
+ result = func(cur)
finally:
cur.close()
-
- def _create_table_if_necessary(self):
- self._execute("""\
+ except:
+ self._conn.rollback()
+ raise
+ else:
+ self._conn.commit()
+ return result
+
+ def _execute(self, query, params=()):
+ return self._transaction(lambda cur: cur.execute(query, params))
+
+ def _query(self, query, params=()):
+ # XXX should we bother with a transaction?
+ cur = self._conn.cursor()
+ try:
+ cur.execute(query, params)
+ for row in cur.fetchall():
+ yield row
+ finally:
+ cur.close()
+
+ def _create_table_if_necessary(self):
+ self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
- def _cookie_from_row(self, row):
- (pk, name, value, domain, path, expires,
- last_accessed, secure, http_only) = row
-
- version = 0
- domain = domain.encode("ascii", "ignore")
- path = path.encode("ascii", "ignore")
- name = name.encode("ascii", "ignore")
- value = value.encode("ascii", "ignore")
- secure = bool(secure)
-
- # last_accessed isn't a cookie attribute, so isn't added to rest
- rest = {}
- if http_only:
- rest["HttpOnly"] = None
-
- if name == "":
- name = value
- value = None
-
- initial_dot = domain.startswith(".")
- domain_specified = initial_dot
-
- discard = False
- if expires == "":
- expires = None
- discard = True
-
- return Cookie(version, name, value,
- None, False,
- domain, domain_specified, initial_dot,
- path, False,
- secure,
- expires,
- discard,
- None,
- None,
- rest)
-
- def clear(self, domain=None, path=None, name=None):
- CookieJar.clear(self, domain, path, name)
- where_parts = []
- sql_params = []
- if domain is not None:
- where_parts.append("host = ?")
- sql_params.append(domain)
- if path is not None:
- where_parts.append("path = ?")
- sql_params.append(path)
- if name is not None:
- where_parts.append("name = ?")
- sql_params.append(name)
- where = " AND ".join(where_parts)
- if where:
- where = " WHERE " + where
- def clear(cur):
- cur.execute("DELETE FROM moz_cookies%s" % where,
- tuple(sql_params))
- self._transaction(clear)
-
- def _row_from_cookie(self, cookie, cur):
- expires = cookie.expires
- if cookie.discard:
- expires = ""
-
- domain = unicode(cookie.domain)
- path = unicode(cookie.path)
- name = unicode(cookie.name)
- value = unicode(cookie.value)
- secure = bool(int(cookie.secure))
-
- if value is None:
- value = name
- name = ""
-
- last_accessed = int(time.time())
- http_only = cookie.has_nonstandard_attr("HttpOnly")
-
- query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
- pk = query.fetchone()[0]
- if pk is None:
- pk = 1
-
- return (pk, name, value, domain, path, expires,
- last_accessed, secure, http_only)
-
- def set_cookie(self, cookie):
- if cookie.discard:
- CookieJar.set_cookie(self, cookie)
- return
-
- def set_cookie(cur):
- # XXX
- # is this RFC 2965-correct?
- # could this do an UPDATE instead?
- row = self._row_from_cookie(cookie, cur)
- name, unused, domain, path = row[1:5]
- cur.execute("""\
+ def _cookie_from_row(self, row):
+ (pk, name, value, domain, path, expires,
+ last_accessed, secure, http_only) = row
+
+ version = 0
+ domain = domain.encode("ascii", "ignore")
+ path = path.encode("ascii", "ignore")
+ name = name.encode("ascii", "ignore")
+ value = value.encode("ascii", "ignore")
+ secure = bool(secure)
+
+ # last_accessed isn't a cookie attribute, so isn't added to rest
+ rest = {}
+ if http_only:
+ rest["HttpOnly"] = None
+
+ if name == "":
+ name = value
+ value = None
+
+ initial_dot = domain.startswith(".")
+ domain_specified = initial_dot
+
+ discard = False
+ if expires == "":
+ expires = None
+ discard = True
+
+ return Cookie(version, name, value,
+ None, False,
+ domain, domain_specified, initial_dot,
+ path, False,
+ secure,
+ expires,
+ discard,
+ None,
+ None,
+ rest)
+
+ def clear(self, domain=None, path=None, name=None):
+ CookieJar.clear(self, domain, path, name)
+ where_parts = []
+ sql_params = []
+ if domain is not None:
+ where_parts.append("host = ?")
+ sql_params.append(domain)
+ if path is not None:
+ where_parts.append("path = ?")
+ sql_params.append(path)
+ if name is not None:
+ where_parts.append("name = ?")
+ sql_params.append(name)
+ where = " AND ".join(where_parts)
+ if where:
+ where = " WHERE " + where
+ def clear(cur):
+ cur.execute("DELETE FROM moz_cookies%s" % where,
+ tuple(sql_params))
+ self._transaction(clear)
+
+ def _row_from_cookie(self, cookie, cur):
+ expires = cookie.expires
+ if cookie.discard:
+ expires = ""
+
+ domain = unicode(cookie.domain)
+ path = unicode(cookie.path)
+ name = unicode(cookie.name)
+ value = unicode(cookie.value)
+ secure = bool(int(cookie.secure))
+
+ if value is None:
+ value = name
+ name = ""
+
+ last_accessed = int(time.time())
+ http_only = cookie.has_nonstandard_attr("HttpOnly")
+
+ query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
+ pk = query.fetchone()[0]
+ if pk is None:
+ pk = 1
+
+ return (pk, name, value, domain, path, expires,
+ last_accessed, secure, http_only)
+
+ def set_cookie(self, cookie):
+ if cookie.discard:
+ CookieJar.set_cookie(self, cookie)
+ return
+
+ def set_cookie(cur):
+ # XXX
+ # is this RFC 2965-correct?
+ # could this do an UPDATE instead?
+ row = self._row_from_cookie(cookie, cur)
+ name, unused, domain, path = row[1:5]
+ cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
- (domain, path, name))
- cur.execute("""\
+ (domain, path, name))
+ cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
- self._transaction(set_cookie)
-
- def __iter__(self):
- # session (non-persistent) cookies
- for cookie in MappingIterator(self._cookies):
- yield cookie
- # persistent cookies
- for row in self._query("""\
+ self._transaction(set_cookie)
+
+ def __iter__(self):
+ # session (non-persistent) cookies
+ for cookie in MappingIterator(self._cookies):
+ yield cookie
+ # persistent cookies
+ for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
- yield self._cookie_from_row(row)
-
- def _cookies_for_request(self, request):
- session_cookies = CookieJar._cookies_for_request(self, request)
- def get_cookies(cur):
- query = cur.execute("SELECT host from moz_cookies")
- domains = [row[0] for row in query.fetchmany()]
- cookies = []
- for domain in domains:
- cookies += self._persistent_cookies_for_domain(domain,
- request, cur)
- return cookies
- persistent_coookies = self._transaction(get_cookies)
- return session_cookies + persistent_coookies
-
- def _persistent_cookies_for_domain(self, domain, request, cur):
+ yield self._cookie_from_row(row)
+
+ def _cookies_for_request(self, request):
+ session_cookies = CookieJar._cookies_for_request(self, request)
+ def get_cookies(cur):
+ query = cur.execute("SELECT host from moz_cookies")
+ domains = [row[0] for row in query.fetchmany()]
cookies = []
- if not self._policy.domain_return_ok(domain, request):
- return []
- debug("Checking %s for cookies to return", domain)
- query = cur.execute("""\
+ for domain in domains:
+ cookies += self._persistent_cookies_for_domain(domain,
+ request, cur)
+ return cookies
+ persistent_coookies = self._transaction(get_cookies)
+ return session_cookies + persistent_coookies
+
+ def _persistent_cookies_for_domain(self, domain, request, cur):
+ cookies = []
+ if not self._policy.domain_return_ok(domain, request):
+ return []
+ debug("Checking %s for cookies to return", domain)
+ query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
- (domain,))
- cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
- last_path = None
- r = []
- for cookie in cookies:
- if (cookie.path != last_path and
- not self._policy.path_return_ok(cookie.path, request)):
- last_path = cookie.path
- continue
- if not self._policy.return_ok(cookie, request):
- debug(" not returning cookie")
- continue
- debug(" it's a match")
- r.append(cookie)
- return r
+ (domain,))
+ cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
+ last_path = None
+ r = []
+ for cookie in cookies:
+ if (cookie.path != last_path and
+ not self._policy.path_return_ok(cookie.path, request)):
+ last_path = cookie.path
+ continue
+ if not self._policy.return_ok(cookie, request):
+ debug(" not returning cookie")
+ continue
+ debug(" it's a match")
+ r.append(cookie)
+ return r
|
Almad/Mechanize
|
1d5485e27d2e5f7eac2b284ca003015c698b5da4
|
Fix web page regarding beta status
|
diff --git a/README.html.in b/README.html.in
index 0df011b..2b7df0f 100644
--- a/README.html.in
+++ b/README.html.in
@@ -1,606 +1,606 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
@# This file is processed by EmPy: do not edit
@# http://wwwsearch.sf.net/bits/colorize.py
@{
from colorize import colorize
import time
import release
last_modified = release.svn_id_to_time("$Id$")
try:
base
except NameError:
base = False
}
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<meta name="author" content="John J. Lee <jjl@@pobox.com>">
<meta name="date" content="@(time.strftime("%Y-%m-%d", last_modified))">
<meta name="keywords" content="Python,HTML,HTTP,browser,stateful,web,client,client-side,mechanize,cookie,form,META,HTTP-EQUIV,Refresh,ClientForm,ClientCookie,pullparser,WWW::Mechanize">
<meta name="keywords" content="cookie,HTTP,Python,web,client,client-side,HTML,META,HTTP-EQUIV,Refresh">
<title>mechanize</title>
<style type="text/css" media="screen">@@import "../styles/style.css";</style>
@[if base]<base href="http://wwwsearch.sourceforge.net/mechanize/">@[end if]
</head>
<body>
<div id="sf"><a href="http://sourceforge.net">
<img src="http://sourceforge.net/sflogo.php?group_id=48205&type=2"
width="125" height="37" alt="SourceForge.net Logo"></a></div>
<!--<img src="../images/sflogo.png"-->
<h1>mechanize</h1>
<div id="Content">
<p>Stateful programmatic web browsing in Python, after Andy Lester's Perl
module <a
href="http://search.cpan.org/dist/WWW-Mechanize/"><code>WWW::Mechanize</code>
</a>.
<ul>
<li><code>mechanize.Browser</code> is a subclass of
<code>mechanize.UserAgentBase</code>, which is, in turn, a subclass of
<code>urllib2.OpenerDirector</code> (in fact, of
<code>mechanize.OpenerDirector</code>), so:
<ul>
<li>any URL can be opened, not just <code>http:</code>
<li><code>mechanize.UserAgentBase</code> offers easy dynamic
configuration of user-agent features like protocol, cookie,
redirection and <code>robots.txt</code> handling, without having
to make a new <code>OpenerDirector</code> each time, e.g. by
calling <code>build_opener()</code>.
</ul>
<li>Easy HTML form filling, using <a href="../ClientForm/">ClientForm</a>
interface.
<li>Convenient link parsing and following.
<li>Browser history (<code>.back()</code> and <code>.reload()</code>
methods).
<li>The <code>Referer</code> HTTP header is added properly (optional).
<li>Automatic observance of <a
href="http://www.robotstxt.org/wc/norobots.html">
<code>robots.txt</code></a>.
<li>Automatic handling of HTTP-Equiv and Refresh.
</ul>
<a name="examples"></a>
<h2>Examples</h2>
<p class="docwarning">This documentation is in need of reorganisation and
extension!</p>
<p>The two below are just to give the gist. There are also some <a
href="./#tests">actual working examples</a>.
@{colorize(r"""
import re
from mechanize import Browser
br = Browser()
br.open("http://www.example.com/")
# follow second link with element text matching regular expression
response1 = br.follow_link(text_regex=r"cheese\s*shop", nr=1)
assert br.viewing_html()
print br.title()
print response1.geturl()
print response1.info() # headers
print response1.read() # body
response1.close() # (shown for clarity; in fact Browser does this for you)
br.select_form(name="order")
# Browser passes through unknown attributes (including methods)
# to the selected HTMLForm (from ClientForm).
br["cheeses"] = ["mozzarella", "caerphilly"] # (the method here is __setitem__)
response2 = br.submit() # submit current form
# print currently selected form (don't call .submit() on this, use br.submit())
print br.form
response3 = br.back() # back to cheese shop (same data as response1)
# the history mechanism returns cached response objects
# we can still use the response, even though we closed it:
response3.seek(0)
response3.read()
response4 = br.reload() # fetches from server
for form in br.forms():
print form
# .links() optionally accepts the keyword args of .follow_/.find_link()
for link in br.links(url_regex="python.org"):
print link
br.follow_link(link) # takes EITHER Link instance OR keyword args
br.back()
""")}
<p>You may control the browser's policy by using the methods of
<code>mechanize.Browser</code>'s base class, <code>mechanize.UserAgent</code>.
For example:
@{colorize("""
br = Browser()
# Explicitly configure proxies (Browser will attempt to set good defaults).
# Note the userinfo ("joe:password@") and port number (":3128") are optional.
br.set_proxies({"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com",
})
# Add HTTP Basic/Digest auth username and password for HTTP proxy access.
# (equivalent to using "joe:password@..." form above)
br.add_proxy_password("joe", "password")
# Add HTTP Basic/Digest auth username and password for website access.
br.add_password("http://example.com/protected/", "joe", "password")
# Don't handle HTTP-EQUIV headers (HTTP headers embedded in HTML).
br.set_handle_equiv(False)
# Ignore robots.txt. Do not do this without thought and consideration.
br.set_handle_robots(False)
# Don't add Referer (sic) header
br.set_handle_referer(False)
# Don't handle Refresh redirections
br.set_handle_refresh(False)
# Don't handle cookies
br.set_cookiejar()
# Supply your own mechanize.CookieJar (NOTE: cookie handling is ON by
# default: no need to do this unless you have some reason to use a
# particular cookiejar)
br.set_cookiejar(cj)
# Log information about HTTP redirects and Refreshes.
br.set_debug_redirects(True)
# Log HTTP response bodies (ie. the HTML, most of the time).
br.set_debug_responses(True)
# Print HTTP headers.
br.set_debug_http(True)
# To make sure you're seeing all debug output:
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
# Sometimes it's useful to process bad headers or bad HTML:
response = br.response() # this is a copy of response
headers = response.info() # currently, this is a mimetools.Message
headers["Content-type"] = "text/html; charset=utf-8"
response.set_data(response.get_data().replace("<!---", "<!--"))
br.set_response(response)
""")}
<p>mechanize exports the complete interface of <code>urllib2</code>:
@{colorize("""
import mechanize
response = mechanize.urlopen("http://www.example.com/")
print response.read()
""")}
<p>so anything you would normally import from <code>urllib2</code> can
(and should, by preference, to insulate you from future changes) be
imported from mechanize instead. In many cases if you import an
object from mechanize it will be the very same object you would get if
you imported from urllib2. In many other cases, though, the
implementation comes from mechanize, either because bug fixes have
been applied or the functionality of urllib2 has been extended in some
way.
<a name="useragentbase"></a>
<h2>UserAgent vs UserAgentBase</h2>
<p><code>mechanize.UserAgent</code> is a trivial subclass of
<code>mechanize.UserAgentBase</code>, adding just one method,
<code>.set_seekable_responses()</code> (see the <a
href="./doc.html#seekable">documentation on seekable responses</a>).
<p>The reason for the extra class is that
<code>mechanize.Browser</code> depends on seekable response objects
(because response objects are used to implement the browser history).
<a name="compatnotes"></a>
<h2>Compatibility</h2>
<p>These notes explain the relationship between mechanize, ClientCookie,
<code>cookielib</code> and <code>urllib2</code>, and which to use when. If
you're just using mechanize, and not any of those other libraries, you can
ignore this section.
<ol>
<li>mechanize works with Python 2.3, Python 2.4 and Python 2.5.
<li>ClientCookie is no longer maintained as a separate package. The code is
now part of mechanize, and its interface is now exported through module
mechanize (since mechanize 0.1.0). Old code can simply be changed to
<code>import mechanize as ClientCookie</code> and should continue to
work.
<li>The cookie handling parts of mechanize are in Python 2.4 standard library
as module <code>cookielib</code> and extensions to module
<code>urllib2</code>.
</ol>
<p><strong>IMPORTANT:</strong> The following are the ONLY cases where
<code>mechanize</code> and <code>urllib2</code> code are intended to work
together. For all other code, use mechanize
<em><strong>exclusively</strong></em>: do NOT mix use of mechanize and
<code>urllib2</code>!
<ol>
<li>Handler classes that are missing from 2.4's <code>urllib2</code>
(e.g. <code>HTTPRefreshProcessor</code>, <code>HTTPEquivProcessor</code>,
<code>HTTPRobotRulesProcessor</code>) may be used with the
<code>urllib2</code> of Python 2.4 or newer. There are not currently any
functional tests for this in mechanize, however, so this feature may be
broken.
<li>If you want to use <code>mechanize.RefreshProcessor</code> with Python >=
2.4's <code>urllib2</code>, you must also use
<code>mechanize.HTTPRedirectHandler</code>.
<li><code>mechanize.HTTPRefererProcessor</code> requires special support from
<code>mechanize.Browser</code>, so cannot be used with vanilla
<code>urllib2</code>.
<li><code>mechanize.HTTPRequestUpgradeProcessor</code> and
<code>mechanize.ResponseUpgradeProcessor</code> are not useful outside of
mechanize.
<li>Request and response objects from code based on <code>urllib2</code> work
with mechanize, and vice-versa.
<li>The classes and functions exported by mechanize in its public interface
that come straight from <code>urllib2</code>
(e.g. <code>FTPHandler</code>, at the time of writing) do work with
mechanize (duh ;-). Exactly which of these classes and functions come
straight from <code>urllib2</code> without extension or modification will
change over time, though, so don't rely on it; instead, just import
everything you need from mechanize, never from <code>urllib2</code>. The
exception is usage as described in the first item in this list, which is
explicitly OK (though not well tested ATM), subject to the other
restrictions in the list above .
</ol>
<a name="docs"></a>
<h2>Documentation</h2>
<p>Full documentation is in the docstrings.
<p>The documentation in the web pages is in need of reorganisation at the
moment, after the merge of ClientCookie into mechanize.
<a name="credits"></a>
<h2>Credits</h2>
<p>Thanks to all the too-numerous-to-list people who reported bugs and provided
patches. Also thanks to Ian Bicking, for persuading me that a
<code>UserAgent</code> class would be useful, and to Ronald Tschalar for advice
on Netscape cookies.
<p>A lot of credit must go to Gisle Aas, who wrote libwww-perl, from which
large parts of mechanize originally derived, and Andy Lester for the original,
<a href="http://search.cpan.org/dist/WWW-Mechanize/"><code>WWW::Mechanize</code>
</a>. Finally, thanks to the (coincidentally-named) Johnny Lee for the MSIE
CookieJar Perl code from which mechanize's support for that is derived.
<a name="todo"></a>
<h2>To do</h2>
<p>Contributions welcome!
<p>The documentation to-do list has moved to the new "docs-in-progress"
directory in SVN.
<p><em>This is <strong>very</strong> roughly in order of priority</em>
<ul>
<li>Test <code>.any_response()</code> two handlers case: ordering.
<li>Test referer bugs (frags and don't add in redirect unless orig
req had Referer)
<li>Remove use of urlparse from _auth.py.
<li>Proper XHTML support!
<li>Fix BeautifulSoup support to use a single BeautifulSoup instance
per page.
<li>Test BeautifulSoup support better / fix encoding issue.
<li>Support BeautifulSoup 3.
<li>Add another History implementation or two and finalise interface.
<li>History cache expiration.
<li>Investigate possible leak further (see Balazs Ree's list posting).
<li>Make <code>EncodingFinder</code> public, I guess (but probably
improve it first). (For example: support Mark Pilgrim's universal
encoding detector?)
<li>Add two-way links between BeautifulSoup & ClientForm object
models.
<li>In 0.2: switch to Python unicode strings everywhere appropriate
(HTTP level should still use byte strings, of course).
<li><code>clean_url()</code>: test browser behaviour. I <em>think</em>
this is correct...
<li>Use a nicer RFC 3986 join / split / unsplit implementation.
<li>Figure out the Right Thing (if such a thing exists) for %-encoding.
<li>How do IRIs fit into the world?
<li>IDNA -- must read about security stuff first.
<li>Unicode support in general.
<li>Provide per-connection access to timeouts.
<li>Keep-alive / connection caching.
<li>Pipelining??
<li>Content negotiation.
<li>gzip transfer encoding (there's already a handler for this in
mechanize, but it's poorly implemented ATM).
<li>proxy.pac parsing (I don't think this needs JS interpretation)
<li>Topological sort for handlers, instead of .handler_order
attribute. Ordering and other dependencies (where unavoidable)
should be defined separate from handlers themselves. Add new
build_opener and deprecate the old one? Actually, _useragent is
probably not far off what I'd have in mind (would just need a
method or two and a base class adding I think), and it's not a high
priority since I guess most people will just use the UserAgent and
Browser classes.
</ul>
<a name="download"></a>
<h2>Getting mechanize</h2>
<p>You can install the <a href="./#source">old-fashioned way</a>, or using <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a>. I
recommend the latter even though EasyInstall is still in alpha, because it will
automatically ensure you have the necessary dependencies, downloading if
necessary.
<p><a href="./#svn">Subversion (SVN) access</a> is also available.
<p>Since EasyInstall is new, I include some instructions below, but mechanize
follows standard EasyInstall / <code>setuptools</code> conventions, so you
should refer to the <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a> and
<a href="http://peak.telecommunity.com/DevCenter/setuptools">setuptools</a>
documentation if you need more detailed or up-to-date instructions.
<h2>EasyInstall / setuptools</h2>
<p>The benefit of EasyInstall and the new <code>setuptools</code>-supporting
<code>setup.py</code> is that they grab all dependencies for you. Also, using
EasyInstall is a one-liner for the common case, to be compared with the usual
download-unpack-install cycle with <code>setup.py</code>.
<h3>Using EasyInstall to download and install mechanize</h3>
<ol>
<li><a href="http://peak.telecommunity.com/DevCenter/EasyInstall#installing-easy-install">
Install easy_install</a>
<li><code>easy_install mechanize</code>
</ol>
<p>If you're on a Unix-like OS, you may need root permissions for that last
step (or see the <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall
documentation</a> for other installation options).
<p>If you already have mechanize installed as a <a
href="http://peak.telecommunity.com/DevCenter/PythonEggs">Python Egg</a> (as
you do if you installed using EasyInstall, or using <code>setup.py
install</code> from mechanize 0.0.10a or newer), you can upgrade to the latest
version using:
<pre>easy_install --upgrade mechanize</pre>
<p>You may want to read up on the <code>-m</code> option to
<code>easy_install</code>, which lets you install multiple versions of a
package.
<a name="svnhead"></a>
<h3>Using EasyInstall to download and install the latest in-development (SVN HEAD) version of mechanize</h3>
<pre>easy_install "mechanize==dev"</pre>
<p>Note that that will not necessarily grab the SVN versions of dependencies,
such as ClientForm: It will use SVN to fetch dependencies if and only if the
SVN HEAD version of mechanize declares itself to depend on the SVN versions of
those dependencies; even then, those declared dependencies won't necessarily be
on SVN HEAD, but rather a particular revision. If you want SVN HEAD for a
dependency project, you should ask for it explicitly by running
<code>easy_install "projectname=dev"</code> for that project.
<p>Note also that you can still carry on using a plain old SVN checkout as
usual if you like.
<h3>Using setup.py from a .tar.gz, .zip or an SVN checkout to download and install mechanize</h3>
<p><code>setup.py</code> should correctly resolve and download dependencies:
<pre>python setup.py install</pre>
<p>Or, to get access to the same options that <code>easy_install</code>
accepts, use the <code>easy_install</code> distutils command instead of
<code>install</code> (see <code>python setup.py --help easy_install</code>)
<pre>python setup.py easy_install mechanize</pre>
<a name="source"></a>
<h2>Download</h2>
<p>All documentation (including this web page) is included in the distribution.
-<p>This is a beta release: there will be bugs.
+<p>This is a stable release.
<p><em>Development release.</em>
<ul>
@{version = "0.1.8"}
<li><a href="./src/mechanize-@(version).tar.gz">mechanize-@(version).tar.gz</a>
<li><a href="./src/mechanize-@(version).zip">mechanize-@(version).zip</a>
<li><a href="./src/ChangeLog.txt">Change Log</a> (included in distribution)
<li><a href="./src/">Older versions.</a>
</ul>
<p>For old-style installation instructions, see the INSTALL file included in
the distribution. Better, <a href="./#download">use EasyInstall</a>.
<a name="svn"></a>
<h2>Subversion</h2>
<p>The <a href="http://subversion.tigris.org/">Subversion (SVN)</a> trunk is <a href="http://codespeak.net/svn/wwwsearch/mechanize/trunk#egg=mechanize-dev">http://codespeak.net/svn/wwwsearch/mechanize/trunk</a>, so to check out the source:
<pre>
svn co http://codespeak.net/svn/wwwsearch/mechanize/trunk mechanize
</pre>
<a name="tests"></a>
<h2>Tests and examples</h2>
<h3>Examples</h3>
<p>The <code>examples</code> directory in the <a href="./#source">source
packages</a> contains a couple of silly, but working, scripts to demonstrate
basic use of the module. Note that it's in the nature of web scraping for such
scripts to break, so don't be too suprised if that happens – do let me
know, though!
<p>It's worth knowing also that the examples on the <a
href="../ClientForm/">ClientForm web page</a> are useful for mechanize users,
and are now real run-able scripts rather than just documentation.
<h3>Functional tests</h3>
<p>To run the functional tests (which <strong>do</strong> access the network),
run the following
command:
<pre>python functional_tests.py</pre>
<h3>Unit tests</h3>
<p>Note that ClientForm (a dependency of mechanize) has its own unit tests,
which must be run separately.
<p>To run the unit tests (none of which access the network), run the following
command:
<pre>python test.py</pre>
<p>This runs the tests against the source files extracted from the
package. For help on command line options:
<pre>python test.py --help</pre>
<h2>See also</h2>
<p>There are several wrappers around mechanize designed for functional testing
of web applications:
<ul>
<li><a href="http://cheeseshop.python.org/pypi?:action=display&name=zope.testbrowser">
<code>zope.testbrowser</code></a> (or
<a href="http://cheeseshop.python.org/pypi?%3Aaction=display&name=ZopeTestbrowser">
<code>ZopeTestBrowser</code></a>, the standalone version).
<li><a href="http://www.idyll.org/~t/www-tools/twill.html">twill</a>.
</ul>
<p>Richard Jones' <a href="http://mechanicalcat.net/tech/webunit/">webunit</a>
(this is not the same as Steven Purcell's <a
href="http://webunit.sourceforge.net/">code of the same name</a>). webunit and
mechanize are quite similar. On the minus side, webunit is missing things like
browser history, high-level forms and links handling, thorough cookie handling,
refresh redirection, adding of the Referer header, observance of robots.txt and
easy extensibility. On the plus side, webunit has a bunch of utility functions
bound up in its WebFetcher class, which look useful for writing tests (though
they'd be easy to duplicate using mechanize). In general, webunit has more of
a frameworky emphasis, with aims limited to writing tests, where mechanize and
the modules it depends on try hard to be general-purpose libraries.
<p>There are many related links in the <a
href="../bits/GeneralFAQ.html">General FAQ</a> page, too.
<a name="faq"></a>
<h2>FAQs - pre install</h2>
<ul>
<li>Which version of Python do I need?
<p>2.3 or above.
<li>What else do I need?
<p>mechanize depends on <a href="../ClientForm/">ClientForm</a>.
<li>Does mechanize depend on BeautifulSoup?
No. mechanize offers a few (still rather experimental) classes that make
use of BeautifulSoup, but these classes are not required to use mechanize.
mechanize bundles BeautifulSoup version 2, so that module is no longer
required. A future version of mechanize will support BeautifulSoup
version 3, at which point mechanize will likely no longer bundle the
module.
<p>The versions of those required modules are listed in the
<code>setup.py</code> for mechanize (included with the download). The
dependencies are automatically fetched by <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a>
(or by <a href="./#source">downloading</a> a mechanize source package and
running <code>python setup.py install</code>). If you like you can fetch
and install them manually, instead – see the <code>INSTALL.txt</code>
file (included with the distribution).
<li>Which license?
<p>mechanize is dual-licensed: you may pick either the
<a href="http://www.opensource.org/licenses/bsd-license.php">BSD license</a>,
or the <a href="http://www.zope.org/Resources/ZPL">ZPL 2.1</a> (both are
included in the distribution).
</ul>
<a name="usagefaq"></a>
<h2>FAQs - usage</h2>
<ul>
<li>I'm not getting the HTML page I expected to see.
<ul>
<li><a href="http://wwwsearch.sourceforge.net/mechanize/doc.html#debugging">Debugging tips</a>
<li><a href="http://wwwsearch.sourceforge.net/bits/GeneralFAQ.html">More tips</a>
</ul>
<li>I'm <strong><em>sure</em></strong> this page is HTML, why does
<code>mechanize.Browser</code> think otherwise?
@{colorize("""
b = mechanize.Browser(
# mechanize's XHTML support needs work, so is currently switched off. If
# we want to get our work done, we have to turn it on by supplying a
# mechanize.Factory (with XHTML support turned on):
factory=mechanize.DefaultFactory(i_want_broken_xhtml_support=True)
)
""")}
</ul>
<p>I prefer questions and comments to be sent to the <a
href="http://lists.sourceforge.net/lists/listinfo/wwwsearch-general">
mailing list</a> rather than direct to me.
<p><a href="mailto:jjl@@pobox.com">John J. Lee</a>,
@(time.strftime("%B %Y", last_modified)).
<hr>
</div>
<div id="Menu">
@(release.navbar('mechanize'))
<br>
<a href="./#examples">Examples</a><br>
<a href="./#compatnotes">Compatibility</a><br>
<a href="./#docs">Documentation</a><br>
<a href="./#todo">To-do</a><br>
<a href="./#download">Download</a><br>
<a href="./#svn">Subversion</a><br>
<a href="./#tests">More examples</a><br>
<a href="./#faq">FAQs</a><br>
</div>
</body>
</html>
|
Almad/Mechanize
|
b990f9e1210be4665bad848d66b6b0fe5fb6ca94
|
Add a setup.cfg.release
|
diff --git a/setup.cfg.release b/setup.cfg.release
new file mode 100644
index 0000000..e69de29
|
Almad/Mechanize
|
52bdccb046b2e1869bc2f6ae98dc31b0934f6011
|
Make the next release version 0.1.8 (no "b" for beta)
|
diff --git a/README.html.in b/README.html.in
index bc5f721..0df011b 100644
--- a/README.html.in
+++ b/README.html.in
@@ -1,606 +1,606 @@
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
@# This file is processed by EmPy: do not edit
@# http://wwwsearch.sf.net/bits/colorize.py
@{
from colorize import colorize
import time
import release
last_modified = release.svn_id_to_time("$Id$")
try:
base
except NameError:
base = False
}
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=ISO-8859-1">
<meta name="author" content="John J. Lee <jjl@@pobox.com>">
<meta name="date" content="@(time.strftime("%Y-%m-%d", last_modified))">
<meta name="keywords" content="Python,HTML,HTTP,browser,stateful,web,client,client-side,mechanize,cookie,form,META,HTTP-EQUIV,Refresh,ClientForm,ClientCookie,pullparser,WWW::Mechanize">
<meta name="keywords" content="cookie,HTTP,Python,web,client,client-side,HTML,META,HTTP-EQUIV,Refresh">
<title>mechanize</title>
<style type="text/css" media="screen">@@import "../styles/style.css";</style>
@[if base]<base href="http://wwwsearch.sourceforge.net/mechanize/">@[end if]
</head>
<body>
<div id="sf"><a href="http://sourceforge.net">
<img src="http://sourceforge.net/sflogo.php?group_id=48205&type=2"
width="125" height="37" alt="SourceForge.net Logo"></a></div>
<!--<img src="../images/sflogo.png"-->
<h1>mechanize</h1>
<div id="Content">
<p>Stateful programmatic web browsing in Python, after Andy Lester's Perl
module <a
href="http://search.cpan.org/dist/WWW-Mechanize/"><code>WWW::Mechanize</code>
</a>.
<ul>
<li><code>mechanize.Browser</code> is a subclass of
<code>mechanize.UserAgentBase</code>, which is, in turn, a subclass of
<code>urllib2.OpenerDirector</code> (in fact, of
<code>mechanize.OpenerDirector</code>), so:
<ul>
<li>any URL can be opened, not just <code>http:</code>
<li><code>mechanize.UserAgentBase</code> offers easy dynamic
configuration of user-agent features like protocol, cookie,
redirection and <code>robots.txt</code> handling, without having
to make a new <code>OpenerDirector</code> each time, e.g. by
calling <code>build_opener()</code>.
</ul>
<li>Easy HTML form filling, using <a href="../ClientForm/">ClientForm</a>
interface.
<li>Convenient link parsing and following.
<li>Browser history (<code>.back()</code> and <code>.reload()</code>
methods).
<li>The <code>Referer</code> HTTP header is added properly (optional).
<li>Automatic observance of <a
href="http://www.robotstxt.org/wc/norobots.html">
<code>robots.txt</code></a>.
<li>Automatic handling of HTTP-Equiv and Refresh.
</ul>
<a name="examples"></a>
<h2>Examples</h2>
<p class="docwarning">This documentation is in need of reorganisation and
extension!</p>
<p>The two below are just to give the gist. There are also some <a
href="./#tests">actual working examples</a>.
@{colorize(r"""
import re
from mechanize import Browser
br = Browser()
br.open("http://www.example.com/")
# follow second link with element text matching regular expression
response1 = br.follow_link(text_regex=r"cheese\s*shop", nr=1)
assert br.viewing_html()
print br.title()
print response1.geturl()
print response1.info() # headers
print response1.read() # body
response1.close() # (shown for clarity; in fact Browser does this for you)
br.select_form(name="order")
# Browser passes through unknown attributes (including methods)
# to the selected HTMLForm (from ClientForm).
br["cheeses"] = ["mozzarella", "caerphilly"] # (the method here is __setitem__)
response2 = br.submit() # submit current form
# print currently selected form (don't call .submit() on this, use br.submit())
print br.form
response3 = br.back() # back to cheese shop (same data as response1)
# the history mechanism returns cached response objects
# we can still use the response, even though we closed it:
response3.seek(0)
response3.read()
response4 = br.reload() # fetches from server
for form in br.forms():
print form
# .links() optionally accepts the keyword args of .follow_/.find_link()
for link in br.links(url_regex="python.org"):
print link
br.follow_link(link) # takes EITHER Link instance OR keyword args
br.back()
""")}
<p>You may control the browser's policy by using the methods of
<code>mechanize.Browser</code>'s base class, <code>mechanize.UserAgent</code>.
For example:
@{colorize("""
br = Browser()
# Explicitly configure proxies (Browser will attempt to set good defaults).
# Note the userinfo ("joe:password@") and port number (":3128") are optional.
br.set_proxies({"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com",
})
# Add HTTP Basic/Digest auth username and password for HTTP proxy access.
# (equivalent to using "joe:password@..." form above)
br.add_proxy_password("joe", "password")
# Add HTTP Basic/Digest auth username and password for website access.
br.add_password("http://example.com/protected/", "joe", "password")
# Don't handle HTTP-EQUIV headers (HTTP headers embedded in HTML).
br.set_handle_equiv(False)
# Ignore robots.txt. Do not do this without thought and consideration.
br.set_handle_robots(False)
# Don't add Referer (sic) header
br.set_handle_referer(False)
# Don't handle Refresh redirections
br.set_handle_refresh(False)
# Don't handle cookies
br.set_cookiejar()
# Supply your own mechanize.CookieJar (NOTE: cookie handling is ON by
# default: no need to do this unless you have some reason to use a
# particular cookiejar)
br.set_cookiejar(cj)
# Log information about HTTP redirects and Refreshes.
br.set_debug_redirects(True)
# Log HTTP response bodies (ie. the HTML, most of the time).
br.set_debug_responses(True)
# Print HTTP headers.
br.set_debug_http(True)
# To make sure you're seeing all debug output:
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
# Sometimes it's useful to process bad headers or bad HTML:
response = br.response() # this is a copy of response
headers = response.info() # currently, this is a mimetools.Message
headers["Content-type"] = "text/html; charset=utf-8"
response.set_data(response.get_data().replace("<!---", "<!--"))
br.set_response(response)
""")}
<p>mechanize exports the complete interface of <code>urllib2</code>:
@{colorize("""
import mechanize
response = mechanize.urlopen("http://www.example.com/")
print response.read()
""")}
<p>so anything you would normally import from <code>urllib2</code> can
(and should, by preference, to insulate you from future changes) be
imported from mechanize instead. In many cases if you import an
object from mechanize it will be the very same object you would get if
you imported from urllib2. In many other cases, though, the
implementation comes from mechanize, either because bug fixes have
been applied or the functionality of urllib2 has been extended in some
way.
<a name="useragentbase"></a>
<h2>UserAgent vs UserAgentBase</h2>
<p><code>mechanize.UserAgent</code> is a trivial subclass of
<code>mechanize.UserAgentBase</code>, adding just one method,
<code>.set_seekable_responses()</code> (see the <a
href="./doc.html#seekable">documentation on seekable responses</a>).
<p>The reason for the extra class is that
<code>mechanize.Browser</code> depends on seekable response objects
(because response objects are used to implement the browser history).
<a name="compatnotes"></a>
<h2>Compatibility</h2>
<p>These notes explain the relationship between mechanize, ClientCookie,
<code>cookielib</code> and <code>urllib2</code>, and which to use when. If
you're just using mechanize, and not any of those other libraries, you can
ignore this section.
<ol>
<li>mechanize works with Python 2.3, Python 2.4 and Python 2.5.
<li>ClientCookie is no longer maintained as a separate package. The code is
now part of mechanize, and its interface is now exported through module
mechanize (since mechanize 0.1.0). Old code can simply be changed to
<code>import mechanize as ClientCookie</code> and should continue to
work.
<li>The cookie handling parts of mechanize are in Python 2.4 standard library
as module <code>cookielib</code> and extensions to module
<code>urllib2</code>.
</ol>
<p><strong>IMPORTANT:</strong> The following are the ONLY cases where
<code>mechanize</code> and <code>urllib2</code> code are intended to work
together. For all other code, use mechanize
<em><strong>exclusively</strong></em>: do NOT mix use of mechanize and
<code>urllib2</code>!
<ol>
<li>Handler classes that are missing from 2.4's <code>urllib2</code>
(e.g. <code>HTTPRefreshProcessor</code>, <code>HTTPEquivProcessor</code>,
<code>HTTPRobotRulesProcessor</code>) may be used with the
<code>urllib2</code> of Python 2.4 or newer. There are not currently any
functional tests for this in mechanize, however, so this feature may be
broken.
<li>If you want to use <code>mechanize.RefreshProcessor</code> with Python >=
2.4's <code>urllib2</code>, you must also use
<code>mechanize.HTTPRedirectHandler</code>.
<li><code>mechanize.HTTPRefererProcessor</code> requires special support from
<code>mechanize.Browser</code>, so cannot be used with vanilla
<code>urllib2</code>.
<li><code>mechanize.HTTPRequestUpgradeProcessor</code> and
<code>mechanize.ResponseUpgradeProcessor</code> are not useful outside of
mechanize.
<li>Request and response objects from code based on <code>urllib2</code> work
with mechanize, and vice-versa.
<li>The classes and functions exported by mechanize in its public interface
that come straight from <code>urllib2</code>
(e.g. <code>FTPHandler</code>, at the time of writing) do work with
mechanize (duh ;-). Exactly which of these classes and functions come
straight from <code>urllib2</code> without extension or modification will
change over time, though, so don't rely on it; instead, just import
everything you need from mechanize, never from <code>urllib2</code>. The
exception is usage as described in the first item in this list, which is
explicitly OK (though not well tested ATM), subject to the other
restrictions in the list above .
</ol>
<a name="docs"></a>
<h2>Documentation</h2>
<p>Full documentation is in the docstrings.
<p>The documentation in the web pages is in need of reorganisation at the
moment, after the merge of ClientCookie into mechanize.
<a name="credits"></a>
<h2>Credits</h2>
<p>Thanks to all the too-numerous-to-list people who reported bugs and provided
patches. Also thanks to Ian Bicking, for persuading me that a
<code>UserAgent</code> class would be useful, and to Ronald Tschalar for advice
on Netscape cookies.
<p>A lot of credit must go to Gisle Aas, who wrote libwww-perl, from which
large parts of mechanize originally derived, and Andy Lester for the original,
<a href="http://search.cpan.org/dist/WWW-Mechanize/"><code>WWW::Mechanize</code>
</a>. Finally, thanks to the (coincidentally-named) Johnny Lee for the MSIE
CookieJar Perl code from which mechanize's support for that is derived.
<a name="todo"></a>
<h2>To do</h2>
<p>Contributions welcome!
<p>The documentation to-do list has moved to the new "docs-in-progress"
directory in SVN.
<p><em>This is <strong>very</strong> roughly in order of priority</em>
<ul>
<li>Test <code>.any_response()</code> two handlers case: ordering.
<li>Test referer bugs (frags and don't add in redirect unless orig
req had Referer)
<li>Remove use of urlparse from _auth.py.
<li>Proper XHTML support!
<li>Fix BeautifulSoup support to use a single BeautifulSoup instance
per page.
<li>Test BeautifulSoup support better / fix encoding issue.
<li>Support BeautifulSoup 3.
<li>Add another History implementation or two and finalise interface.
<li>History cache expiration.
<li>Investigate possible leak further (see Balazs Ree's list posting).
<li>Make <code>EncodingFinder</code> public, I guess (but probably
improve it first). (For example: support Mark Pilgrim's universal
encoding detector?)
<li>Add two-way links between BeautifulSoup & ClientForm object
models.
<li>In 0.2: switch to Python unicode strings everywhere appropriate
(HTTP level should still use byte strings, of course).
<li><code>clean_url()</code>: test browser behaviour. I <em>think</em>
this is correct...
<li>Use a nicer RFC 3986 join / split / unsplit implementation.
<li>Figure out the Right Thing (if such a thing exists) for %-encoding.
<li>How do IRIs fit into the world?
<li>IDNA -- must read about security stuff first.
<li>Unicode support in general.
<li>Provide per-connection access to timeouts.
<li>Keep-alive / connection caching.
<li>Pipelining??
<li>Content negotiation.
<li>gzip transfer encoding (there's already a handler for this in
mechanize, but it's poorly implemented ATM).
<li>proxy.pac parsing (I don't think this needs JS interpretation)
<li>Topological sort for handlers, instead of .handler_order
attribute. Ordering and other dependencies (where unavoidable)
should be defined separate from handlers themselves. Add new
build_opener and deprecate the old one? Actually, _useragent is
probably not far off what I'd have in mind (would just need a
method or two and a base class adding I think), and it's not a high
priority since I guess most people will just use the UserAgent and
Browser classes.
</ul>
<a name="download"></a>
<h2>Getting mechanize</h2>
<p>You can install the <a href="./#source">old-fashioned way</a>, or using <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a>. I
recommend the latter even though EasyInstall is still in alpha, because it will
automatically ensure you have the necessary dependencies, downloading if
necessary.
<p><a href="./#svn">Subversion (SVN) access</a> is also available.
<p>Since EasyInstall is new, I include some instructions below, but mechanize
follows standard EasyInstall / <code>setuptools</code> conventions, so you
should refer to the <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a> and
<a href="http://peak.telecommunity.com/DevCenter/setuptools">setuptools</a>
documentation if you need more detailed or up-to-date instructions.
<h2>EasyInstall / setuptools</h2>
<p>The benefit of EasyInstall and the new <code>setuptools</code>-supporting
<code>setup.py</code> is that they grab all dependencies for you. Also, using
EasyInstall is a one-liner for the common case, to be compared with the usual
download-unpack-install cycle with <code>setup.py</code>.
<h3>Using EasyInstall to download and install mechanize</h3>
<ol>
<li><a href="http://peak.telecommunity.com/DevCenter/EasyInstall#installing-easy-install">
Install easy_install</a>
<li><code>easy_install mechanize</code>
</ol>
<p>If you're on a Unix-like OS, you may need root permissions for that last
step (or see the <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall
documentation</a> for other installation options).
<p>If you already have mechanize installed as a <a
href="http://peak.telecommunity.com/DevCenter/PythonEggs">Python Egg</a> (as
you do if you installed using EasyInstall, or using <code>setup.py
install</code> from mechanize 0.0.10a or newer), you can upgrade to the latest
version using:
<pre>easy_install --upgrade mechanize</pre>
<p>You may want to read up on the <code>-m</code> option to
<code>easy_install</code>, which lets you install multiple versions of a
package.
<a name="svnhead"></a>
<h3>Using EasyInstall to download and install the latest in-development (SVN HEAD) version of mechanize</h3>
<pre>easy_install "mechanize==dev"</pre>
<p>Note that that will not necessarily grab the SVN versions of dependencies,
such as ClientForm: It will use SVN to fetch dependencies if and only if the
SVN HEAD version of mechanize declares itself to depend on the SVN versions of
those dependencies; even then, those declared dependencies won't necessarily be
on SVN HEAD, but rather a particular revision. If you want SVN HEAD for a
dependency project, you should ask for it explicitly by running
<code>easy_install "projectname=dev"</code> for that project.
<p>Note also that you can still carry on using a plain old SVN checkout as
usual if you like.
<h3>Using setup.py from a .tar.gz, .zip or an SVN checkout to download and install mechanize</h3>
<p><code>setup.py</code> should correctly resolve and download dependencies:
<pre>python setup.py install</pre>
<p>Or, to get access to the same options that <code>easy_install</code>
accepts, use the <code>easy_install</code> distutils command instead of
<code>install</code> (see <code>python setup.py --help easy_install</code>)
<pre>python setup.py easy_install mechanize</pre>
<a name="source"></a>
<h2>Download</h2>
<p>All documentation (including this web page) is included in the distribution.
<p>This is a beta release: there will be bugs.
<p><em>Development release.</em>
<ul>
-@{version = "0.1.8b"}
+@{version = "0.1.8"}
<li><a href="./src/mechanize-@(version).tar.gz">mechanize-@(version).tar.gz</a>
<li><a href="./src/mechanize-@(version).zip">mechanize-@(version).zip</a>
<li><a href="./src/ChangeLog.txt">Change Log</a> (included in distribution)
<li><a href="./src/">Older versions.</a>
</ul>
<p>For old-style installation instructions, see the INSTALL file included in
the distribution. Better, <a href="./#download">use EasyInstall</a>.
<a name="svn"></a>
<h2>Subversion</h2>
<p>The <a href="http://subversion.tigris.org/">Subversion (SVN)</a> trunk is <a href="http://codespeak.net/svn/wwwsearch/mechanize/trunk#egg=mechanize-dev">http://codespeak.net/svn/wwwsearch/mechanize/trunk</a>, so to check out the source:
<pre>
svn co http://codespeak.net/svn/wwwsearch/mechanize/trunk mechanize
</pre>
<a name="tests"></a>
<h2>Tests and examples</h2>
<h3>Examples</h3>
<p>The <code>examples</code> directory in the <a href="./#source">source
packages</a> contains a couple of silly, but working, scripts to demonstrate
basic use of the module. Note that it's in the nature of web scraping for such
scripts to break, so don't be too suprised if that happens – do let me
know, though!
<p>It's worth knowing also that the examples on the <a
href="../ClientForm/">ClientForm web page</a> are useful for mechanize users,
and are now real run-able scripts rather than just documentation.
<h3>Functional tests</h3>
<p>To run the functional tests (which <strong>do</strong> access the network),
run the following
command:
<pre>python functional_tests.py</pre>
<h3>Unit tests</h3>
<p>Note that ClientForm (a dependency of mechanize) has its own unit tests,
which must be run separately.
<p>To run the unit tests (none of which access the network), run the following
command:
<pre>python test.py</pre>
<p>This runs the tests against the source files extracted from the
package. For help on command line options:
<pre>python test.py --help</pre>
<h2>See also</h2>
<p>There are several wrappers around mechanize designed for functional testing
of web applications:
<ul>
<li><a href="http://cheeseshop.python.org/pypi?:action=display&name=zope.testbrowser">
<code>zope.testbrowser</code></a> (or
<a href="http://cheeseshop.python.org/pypi?%3Aaction=display&name=ZopeTestbrowser">
<code>ZopeTestBrowser</code></a>, the standalone version).
<li><a href="http://www.idyll.org/~t/www-tools/twill.html">twill</a>.
</ul>
<p>Richard Jones' <a href="http://mechanicalcat.net/tech/webunit/">webunit</a>
(this is not the same as Steven Purcell's <a
href="http://webunit.sourceforge.net/">code of the same name</a>). webunit and
mechanize are quite similar. On the minus side, webunit is missing things like
browser history, high-level forms and links handling, thorough cookie handling,
refresh redirection, adding of the Referer header, observance of robots.txt and
easy extensibility. On the plus side, webunit has a bunch of utility functions
bound up in its WebFetcher class, which look useful for writing tests (though
they'd be easy to duplicate using mechanize). In general, webunit has more of
a frameworky emphasis, with aims limited to writing tests, where mechanize and
the modules it depends on try hard to be general-purpose libraries.
<p>There are many related links in the <a
href="../bits/GeneralFAQ.html">General FAQ</a> page, too.
<a name="faq"></a>
<h2>FAQs - pre install</h2>
<ul>
<li>Which version of Python do I need?
<p>2.3 or above.
<li>What else do I need?
<p>mechanize depends on <a href="../ClientForm/">ClientForm</a>.
<li>Does mechanize depend on BeautifulSoup?
No. mechanize offers a few (still rather experimental) classes that make
use of BeautifulSoup, but these classes are not required to use mechanize.
mechanize bundles BeautifulSoup version 2, so that module is no longer
required. A future version of mechanize will support BeautifulSoup
version 3, at which point mechanize will likely no longer bundle the
module.
<p>The versions of those required modules are listed in the
<code>setup.py</code> for mechanize (included with the download). The
dependencies are automatically fetched by <a
href="http://peak.telecommunity.com/DevCenter/EasyInstall">EasyInstall</a>
(or by <a href="./#source">downloading</a> a mechanize source package and
running <code>python setup.py install</code>). If you like you can fetch
and install them manually, instead – see the <code>INSTALL.txt</code>
file (included with the distribution).
<li>Which license?
<p>mechanize is dual-licensed: you may pick either the
<a href="http://www.opensource.org/licenses/bsd-license.php">BSD license</a>,
or the <a href="http://www.zope.org/Resources/ZPL">ZPL 2.1</a> (both are
included in the distribution).
</ul>
<a name="usagefaq"></a>
<h2>FAQs - usage</h2>
<ul>
<li>I'm not getting the HTML page I expected to see.
<ul>
<li><a href="http://wwwsearch.sourceforge.net/mechanize/doc.html#debugging">Debugging tips</a>
<li><a href="http://wwwsearch.sourceforge.net/bits/GeneralFAQ.html">More tips</a>
</ul>
<li>I'm <strong><em>sure</em></strong> this page is HTML, why does
<code>mechanize.Browser</code> think otherwise?
@{colorize("""
b = mechanize.Browser(
# mechanize's XHTML support needs work, so is currently switched off. If
# we want to get our work done, we have to turn it on by supplying a
# mechanize.Factory (with XHTML support turned on):
factory=mechanize.DefaultFactory(i_want_broken_xhtml_support=True)
)
""")}
</ul>
<p>I prefer questions and comments to be sent to the <a
href="http://lists.sourceforge.net/lists/listinfo/wwwsearch-general">
mailing list</a> rather than direct to me.
<p><a href="mailto:jjl@@pobox.com">John J. Lee</a>,
@(time.strftime("%B %Y", last_modified)).
<hr>
</div>
<div id="Menu">
@(release.navbar('mechanize'))
<br>
<a href="./#examples">Examples</a><br>
<a href="./#compatnotes">Compatibility</a><br>
<a href="./#docs">Documentation</a><br>
<a href="./#todo">To-do</a><br>
<a href="./#download">Download</a><br>
<a href="./#svn">Subversion</a><br>
<a href="./#tests">More examples</a><br>
<a href="./#faq">FAQs</a><br>
</div>
</body>
</html>
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index 6e51f90..bbc9a83 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,533 +1,533 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
-__version__ = (0, 1, 8, "b", None) # 0.1.8b
+__version__ = (0, 1, 8, None, None) # 0.1.8
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants
of any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
diff --git a/setup.py b/setup.py
index 509f253..003b3f9 100755
--- a/setup.py
+++ b/setup.py
@@ -1,147 +1,147 @@
#!/usr/bin/env python
"""Stateful programmatic web browsing.
Stateful programmatic web browsing, after Andy Lester's Perl module
WWW::Mechanize.
The library is layered: mechanize.Browser (stateful web browser),
mechanize.UserAgent (configurable URL opener), plus urllib2 handlers.
Features include: ftp:, http: and file: URL schemes, browser history,
high-level hyperlink and HTML form support, HTTP cookies, HTTP-EQUIV and
Refresh, Referer [sic] header, robots.txt, redirections, proxies, and
Basic and Digest HTTP authentication. mechanize's response objects are
(lazily-) .seek()able and still work after .close().
Much of the code originally derived from Perl code by Gisle Aas
(libwww-perl), Johnny Lee (MSIE Cookie support) and last but not least
Andy Lester (WWW::Mechanize). urllib2 was written by Jeremy Hylton.
"""
def unparse_version(tup):
major, minor, bugfix, state_char, pre = tup
fmt = "%s.%s.%s"
args = [major, minor, bugfix]
if state_char is not None:
fmt += "%s"
args.append(state_char)
if pre is not None:
fmt += "-pre%s"
args.append(pre)
return fmt % tuple(args)
def str_to_tuple(text):
if text.startswith("("):
text = text[1:-1]
els = [el.strip() for el in text.split(",")]
newEls = []
for ii in range(len(els)):
el = els[ii]
if el == "None":
newEls.append(None)
elif 0 <= ii < 3:
newEls.append(int(el))
else:
if el.startswith("'") or el.startswith('"'):
el = el[1:-1]
newEls.append(el)
return tuple(newEls)
import re
## VERSION_MATCH = re.search(r'__version__ = \((.*)\)',
## open("mechanize/_mechanize.py").read())
## VERSION = unparse_version(str_to_tuple(VERSION_MATCH.group(1)))
-VERSION = "0.1.8b"
+VERSION = "0.1.8"
INSTALL_REQUIRES = ["ClientForm>=0.2.6, ==dev"]
NAME = "mechanize"
PACKAGE = True
LICENSE = "BSD" # or ZPL 2.1
PLATFORMS = ["any"]
ZIP_SAFE = True
CLASSIFIERS = """\
-Development Status :: 4 - Beta
+Development Status :: 5 - Production/Stable
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: BSD License
License :: OSI Approved :: Zope Public License
Natural Language :: English
Operating System :: OS Independent
Programming Language :: Python
Topic :: Internet
Topic :: Internet :: File Transfer Protocol (FTP)
Topic :: Internet :: WWW/HTTP
Topic :: Internet :: WWW/HTTP :: Browsers
Topic :: Internet :: WWW/HTTP :: Indexing/Search
Topic :: Internet :: WWW/HTTP :: Site Management
Topic :: Internet :: WWW/HTTP :: Site Management :: Link Checking
Topic :: Software Development :: Libraries
Topic :: Software Development :: Libraries :: Python Modules
Topic :: Software Development :: Testing
Topic :: Software Development :: Testing :: Traffic Generation
Topic :: System :: Archiving :: Mirroring
Topic :: System :: Networking :: Monitoring
Topic :: System :: Systems Administration
Topic :: Text Processing
Topic :: Text Processing :: Markup
Topic :: Text Processing :: Markup :: HTML
Topic :: Text Processing :: Markup :: XML
"""
#-------------------------------------------------------
# the rest is constant for most of my released packages:
import sys
if PACKAGE:
packages, py_modules = [NAME], None
else:
packages, py_modules = None, [NAME]
doclines = __doc__.split("\n")
if not hasattr(sys, "version_info") or sys.version_info < (2, 3):
from distutils.core import setup
_setup = setup
def setup(**kwargs):
for key in [
# distutils >= Python 2.3 args
# XXX probably download_url came in earlier than 2.3
"classifiers", "download_url",
# setuptools args
"install_requires", "zip_safe", "test_suite",
]:
if kwargs.has_key(key):
del kwargs[key]
# Only want packages keyword if this is a package,
# only want py_modules keyword if this is a single-file module,
# so get rid of packages or py_modules keyword as appropriate.
if kwargs["packages"] is None:
del kwargs["packages"]
else:
del kwargs["py_modules"]
apply(_setup, (), kwargs)
else:
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup
setup(
name = NAME,
version = VERSION,
license = LICENSE,
platforms = PLATFORMS,
classifiers = [c for c in CLASSIFIERS.split("\n") if c],
install_requires = INSTALL_REQUIRES,
zip_safe = ZIP_SAFE,
test_suite = "test",
author = "John J. Lee",
author_email = "[email protected]",
description = doclines[0],
long_description = "\n".join(doclines[2:]),
url = "http://wwwsearch.sourceforge.net/%s/" % NAME,
download_url = ("http://wwwsearch.sourceforge.net/%s/src/"
"%s-%s.tar.gz" % (NAME, NAME, VERSION)),
py_modules = py_modules,
packages = packages,
)
|
Almad/Mechanize
|
25ddc62c0a446568dc3fcf18af599f45a83742f1
|
Revert r44828 and update tests appropriately. This reverts changes to Refresh redirection defaults, back to how they were in the last release (0.1.7b).
|
diff --git a/functional_tests.py b/functional_tests.py
index 82fb832..38a8800 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,513 +1,513 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys, urllib, tempfile, errno
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
from mechanize._util import hide_experimental_warnings, \
reset_experimental_warnings
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_refresh(self):
def refresh_request(seconds):
uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
return uri + ("?refresh=%s" % val)
r = self.browser.open(refresh_request(5))
self.assertEqual(r.geturl(), self.uri)
- # Refresh with pause > 30 seconds is ignored by default (these long
- # refreshes tend to be there only because the website owner wants you
- # to see the latest news, or whatever -- they're not essential to the
- # operation of the site, and not really useful or appropriate when
- # scraping).
+ # Set a maximum refresh time of 30 seconds (these long refreshes tend
+ # to be there only because the website owner wants you to see the
+ # latest news, or whatever -- they're not essential to the operation of
+ # the site, and not really useful or appropriate when scraping).
refresh_uri = refresh_request(60)
+ self.browser.set_handle_refresh(True, max_time=30., honor_time=True)
r = self.browser.open(refresh_uri)
self.assertEqual(r.geturl(), refresh_uri)
- # allow long refreshes (note we don't actually wait 60 seconds by default)
- self.browser.set_handle_refresh(True, max_time=None)
+ # allow long refreshes (but don't actually wait 60 seconds)
+ self.browser.set_handle_refresh(True, max_time=None, honor_time=False)
r = self.browser.open(refresh_request(60))
self.assertEqual(r.geturl(), self.uri)
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
br = mechanize.Browser()
response = br.open_local_file("mechanize/_mechanize.py")
self.assert_("def open_local_file(self, filename):" in
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_seek_wrapper_class_name(self):
opener = mechanize.UserAgent()
opener.set_seekable_responses(True)
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_("HTTPError instance" in repr(exc))
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CookieJarTests(TestCase):
def test_mozilla_cookiejar(self):
filename = tempfile.mktemp()
try:
def get_cookiejar():
cj = mechanize.MozillaCookieJar(filename=filename)
try:
cj.revert()
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
return cj
def commit(cj):
cj.save()
self._test_cookiejar(get_cookiejar, commit)
finally:
try:
os.remove(filename)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
def test_firefox3_cookiejar(self):
try:
mechanize.Firefox3CookieJar
except AttributeError:
# firefox 3 cookiejar is only supported in Python 2.5 and later
self.assert_(sys.version_info[:2] < (2, 5))
return
filename = tempfile.mktemp()
try:
def get_cookiejar():
hide_experimental_warnings()
try:
cj = mechanize.Firefox3CookieJar(filename=filename)
finally:
reset_experimental_warnings()
cj.connect()
return cj
def commit(cj):
pass
self._test_cookiejar(get_cookiejar, commit)
finally:
os.remove(filename)
def _test_cookiejar(self, get_cookiejar, commit):
cookiejar = get_cookiejar()
br = mechanize.Browser()
br.set_cookiejar(cookiejar)
br.set_handle_refresh(False)
url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
# no cookie was set on the first request
html = br.open(url).read()
self.assertEquals(html.find("Your browser supports cookies!"), -1)
self.assertEquals(len(cookiejar), 1)
# ... but now we have the cookie
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
commit(cookiejar)
# should still have the cookie when we load afresh
cookiejar = get_cookiejar()
br.set_cookiejar(cookiejar)
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
If this option doesn't work on Windows/Mac, somebody please
tell me about it, or I'll never find out...
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/mechanize/_useragent.py b/mechanize/_useragent.py
index 1e4c084..5206d45 100644
--- a/mechanize/_useragent.py
+++ b/mechanize/_useragent.py
@@ -1,348 +1,348 @@
"""Convenient HTTP UserAgent class.
This is a subclass of urllib2.OpenerDirector.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import warnings
import _opener
import _urllib2
import _auth
import _gzip
import _response
class UserAgentBase(_opener.OpenerDirector):
"""Convenient user-agent class.
Do not use .add_handler() to add a handler for something already dealt with
by this code.
The only reason at present for the distinction between UserAgent and
UserAgentBase is so that classes that depend on .seek()able responses
(e.g. mechanize.Browser) can inherit from UserAgentBase. The subclass
UserAgent exposes a .set_seekable_responses() method that allows switching
off the adding of a .seek() method to responses.
Public attributes:
addheaders: list of (name, value) pairs specifying headers to send with
every request, unless they are overridden in the Request instance.
>>> ua = UserAgentBase()
>>> ua.addheaders = [
... ("User-agent", "Mozilla/5.0 (compatible)"),
... ("From", "[email protected]")]
"""
handler_classes = {
# scheme handlers
"http": _urllib2.HTTPHandler,
# CacheFTPHandler is buggy, at least in 2.3, so we don't use it
"ftp": _urllib2.FTPHandler,
"file": _urllib2.FileHandler,
# other handlers
"_unknown": _urllib2.UnknownHandler,
# HTTP{S,}Handler depend on HTTPErrorProcessor too
"_http_error": _urllib2.HTTPErrorProcessor,
"_http_request_upgrade": _urllib2.HTTPRequestUpgradeProcessor,
"_http_default_error": _urllib2.HTTPDefaultErrorHandler,
# feature handlers
"_basicauth": _urllib2.HTTPBasicAuthHandler,
"_digestauth": _urllib2.HTTPDigestAuthHandler,
"_redirect": _urllib2.HTTPRedirectHandler,
"_cookies": _urllib2.HTTPCookieProcessor,
"_refresh": _urllib2.HTTPRefreshProcessor,
"_equiv": _urllib2.HTTPEquivProcessor,
"_proxy": _urllib2.ProxyHandler,
"_proxy_basicauth": _urllib2.ProxyBasicAuthHandler,
"_proxy_digestauth": _urllib2.ProxyDigestAuthHandler,
"_robots": _urllib2.HTTPRobotRulesProcessor,
"_gzip": _gzip.HTTPGzipProcessor, # experimental!
# debug handlers
"_debug_redirect": _urllib2.HTTPRedirectDebugProcessor,
"_debug_response_body": _urllib2.HTTPResponseDebugProcessor,
}
default_schemes = ["http", "ftp", "file"]
default_others = ["_unknown", "_http_error", "_http_request_upgrade",
"_http_default_error",
]
default_features = ["_redirect", "_cookies",
"_refresh", "_equiv",
"_basicauth", "_digestauth",
"_proxy", "_proxy_basicauth", "_proxy_digestauth",
"_robots",
]
if hasattr(_urllib2, 'HTTPSHandler'):
handler_classes["https"] = _urllib2.HTTPSHandler
default_schemes.append("https")
def __init__(self):
_opener.OpenerDirector.__init__(self)
ua_handlers = self._ua_handlers = {}
for scheme in (self.default_schemes+
self.default_others+
self.default_features):
klass = self.handler_classes[scheme]
ua_handlers[scheme] = klass()
for handler in ua_handlers.itervalues():
self.add_handler(handler)
# Yuck.
# Ensure correct default constructor args were passed to
# HTTPRefreshProcessor and HTTPEquivProcessor.
if "_refresh" in ua_handlers:
self.set_handle_refresh(True)
if "_equiv" in ua_handlers:
self.set_handle_equiv(True)
# Ensure default password managers are installed.
pm = ppm = None
if "_basicauth" in ua_handlers or "_digestauth" in ua_handlers:
pm = _urllib2.HTTPPasswordMgrWithDefaultRealm()
if ("_proxy_basicauth" in ua_handlers or
"_proxy_digestauth" in ua_handlers):
ppm = _auth.HTTPProxyPasswordMgr()
self.set_password_manager(pm)
self.set_proxy_password_manager(ppm)
# set default certificate manager
if "https" in ua_handlers:
cm = _urllib2.HTTPSClientCertMgr()
self.set_client_cert_manager(cm)
def close(self):
_opener.OpenerDirector.close(self)
self._ua_handlers = None
# XXX
## def set_timeout(self, timeout):
## self._timeout = timeout
## def set_http_connection_cache(self, conn_cache):
## self._http_conn_cache = conn_cache
## def set_ftp_connection_cache(self, conn_cache):
## # XXX ATM, FTP has cache as part of handler; should it be separate?
## self._ftp_conn_cache = conn_cache
def set_handled_schemes(self, schemes):
"""Set sequence of URL scheme (protocol) strings.
For example: ua.set_handled_schemes(["http", "ftp"])
If this fails (with ValueError) because you've passed an unknown
scheme, the set of handled schemes will not be changed.
"""
want = {}
for scheme in schemes:
if scheme.startswith("_"):
raise ValueError("not a scheme '%s'" % scheme)
if scheme not in self.handler_classes:
raise ValueError("unknown scheme '%s'")
want[scheme] = None
# get rid of scheme handlers we don't want
for scheme, oldhandler in self._ua_handlers.items():
if scheme.startswith("_"): continue # not a scheme handler
if scheme not in want:
self._replace_handler(scheme, None)
else:
del want[scheme] # already got it
# add the scheme handlers that are missing
for scheme in want.keys():
self._set_handler(scheme, True)
def set_cookiejar(self, cookiejar):
"""Set a mechanize.CookieJar, or None."""
self._set_handler("_cookies", obj=cookiejar)
# XXX could use Greg Stein's httpx for some of this instead?
# or httplib2??
def set_proxies(self, proxies):
"""Set a dictionary mapping URL scheme to proxy specification, or None.
e.g. {"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com"}
"""
self._set_handler("_proxy", obj=proxies)
def add_password(self, url, user, password, realm=None):
self._password_manager.add_password(realm, url, user, password)
def add_proxy_password(self, user, password, hostport=None, realm=None):
self._proxy_password_manager.add_password(
realm, hostport, user, password)
def add_client_certificate(self, url, key_file, cert_file):
"""Add an SSL client certificate, for HTTPS client auth.
key_file and cert_file must be filenames of the key and certificate
files, in PEM format. You can use e.g. OpenSSL to convert a p12 (PKCS
12) file to PEM format:
openssl pkcs12 -clcerts -nokeys -in cert.p12 -out cert.pem
openssl pkcs12 -nocerts -in cert.p12 -out key.pem
Note that client certificate password input is very inflexible ATM. At
the moment this seems to be console only, which is presumably the
default behaviour of libopenssl. In future mechanize may support
third-party libraries that (I assume) allow more options here.
"""
self._client_cert_manager.add_key_cert(url, key_file, cert_file)
# the following are rarely useful -- use add_password / add_proxy_password
# instead
def set_password_manager(self, password_manager):
"""Set a mechanize.HTTPPasswordMgrWithDefaultRealm, or None."""
self._password_manager = password_manager
self._set_handler("_basicauth", obj=password_manager)
self._set_handler("_digestauth", obj=password_manager)
def set_proxy_password_manager(self, password_manager):
"""Set a mechanize.HTTPProxyPasswordMgr, or None."""
self._proxy_password_manager = password_manager
self._set_handler("_proxy_basicauth", obj=password_manager)
self._set_handler("_proxy_digestauth", obj=password_manager)
def set_client_cert_manager(self, cert_manager):
"""Set a mechanize.HTTPClientCertMgr, or None."""
self._client_cert_manager = cert_manager
handler = self._ua_handlers["https"]
handler.client_cert_manager = cert_manager
# these methods all take a boolean parameter
def set_handle_robots(self, handle):
"""Set whether to observe rules from robots.txt."""
self._set_handler("_robots", handle)
def set_handle_redirect(self, handle):
"""Set whether to handle HTTP 30x redirections."""
self._set_handler("_redirect", handle)
- def set_handle_refresh(self, handle, max_time=30.0, honor_time=False):
+ def set_handle_refresh(self, handle, max_time=None, honor_time=True):
"""Set whether to handle HTTP Refresh headers."""
self._set_handler("_refresh", handle, constructor_kwds=
{"max_time": max_time, "honor_time": honor_time})
def set_handle_equiv(self, handle, head_parser_class=None):
"""Set whether to treat HTML http-equiv headers like HTTP headers.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
if head_parser_class is not None:
constructor_kwds = {"head_parser_class": head_parser_class}
else:
constructor_kwds={}
self._set_handler("_equiv", handle, constructor_kwds=constructor_kwds)
def set_handle_gzip(self, handle):
"""Handle gzip transfer encoding.
"""
if handle:
warnings.warn(
"gzip transfer encoding is experimental!", stacklevel=2)
self._set_handler("_gzip", handle)
def set_debug_redirects(self, handle):
"""Log information about HTTP redirects (including refreshes).
Logging is performed using module logging. The logger name is
"mechanize.http_redirects". To actually print some debug output,
eg:
import sys, logging
logger = logging.getLogger("mechanize.http_redirects")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
Other logger names relevant to this module:
"mechanize.http_responses"
"mechanize.cookies" (or "cookielib" if running Python 2.4)
To turn on everything:
import sys, logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
"""
self._set_handler("_debug_redirect", handle)
def set_debug_responses(self, handle):
"""Log HTTP response bodies.
See docstring for .set_debug_redirects() for details of logging.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
self._set_handler("_debug_response_body", handle)
def set_debug_http(self, handle):
"""Print HTTP headers to sys.stdout."""
level = int(bool(handle))
for scheme in "http", "https":
h = self._ua_handlers.get(scheme)
if h is not None:
h.set_http_debuglevel(level)
def _set_handler(self, name, handle=None, obj=None,
constructor_args=(), constructor_kwds={}):
if handle is None:
handle = obj is not None
if handle:
handler_class = self.handler_classes[name]
if obj is not None:
newhandler = handler_class(obj)
else:
newhandler = handler_class(
*constructor_args, **constructor_kwds)
else:
newhandler = None
self._replace_handler(name, newhandler)
def _replace_handler(self, name, newhandler=None):
# first, if handler was previously added, remove it
if name is not None:
handler = self._ua_handlers.get(name)
if handler:
try:
self.handlers.remove(handler)
except ValueError:
pass
# then add the replacement, if any
if newhandler is not None:
self.add_handler(newhandler)
self._ua_handlers[name] = newhandler
class UserAgent(UserAgentBase):
def __init__(self):
UserAgentBase.__init__(self)
self._seekable = False
def set_seekable_responses(self, handle):
"""Make response objects .seek()able."""
self._seekable = bool(handle)
def open(self, fullurl, data=None):
if self._seekable:
def bound_open(fullurl, data=None):
return UserAgentBase.open(self, fullurl, data)
response = _opener.wrapped_open(
bound_open, _response.seek_wrapped_response, fullurl, data)
else:
response = UserAgentBase.open(self, fullurl, data)
return response
|
Almad/Mechanize
|
96c495728b59f3476bca2a15e3b94cb441f29fe4
|
Firefox 3 cookiejar support is only supported in Python 2.5 and later, and is experimental code. * Issue a warning on construction of mechanize.Firefox3CookieJar to indicate it's experimental * Make mechanize.Firefox3CookieJar unavailable in Python 2.4 / 2.3 rather than crashing on "import mechanize" with SyntaxError
|
diff --git a/functional_tests.py b/functional_tests.py
index 4bf028c..82fb832 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,500 +1,513 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys, urllib, tempfile, errno
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
+from mechanize._util import hide_experimental_warnings, \
+ reset_experimental_warnings
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_refresh(self):
def refresh_request(seconds):
uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
return uri + ("?refresh=%s" % val)
r = self.browser.open(refresh_request(5))
self.assertEqual(r.geturl(), self.uri)
# Refresh with pause > 30 seconds is ignored by default (these long
# refreshes tend to be there only because the website owner wants you
# to see the latest news, or whatever -- they're not essential to the
# operation of the site, and not really useful or appropriate when
# scraping).
refresh_uri = refresh_request(60)
r = self.browser.open(refresh_uri)
self.assertEqual(r.geturl(), refresh_uri)
# allow long refreshes (note we don't actually wait 60 seconds by default)
self.browser.set_handle_refresh(True, max_time=None)
r = self.browser.open(refresh_request(60))
self.assertEqual(r.geturl(), self.uri)
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
br = mechanize.Browser()
response = br.open_local_file("mechanize/_mechanize.py")
self.assert_("def open_local_file(self, filename):" in
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_seek_wrapper_class_name(self):
opener = mechanize.UserAgent()
opener.set_seekable_responses(True)
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_("HTTPError instance" in repr(exc))
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CookieJarTests(TestCase):
def test_mozilla_cookiejar(self):
filename = tempfile.mktemp()
try:
def get_cookiejar():
cj = mechanize.MozillaCookieJar(filename=filename)
try:
cj.revert()
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
return cj
def commit(cj):
cj.save()
self._test_cookiejar(get_cookiejar, commit)
finally:
try:
os.remove(filename)
except OSError, exc:
if exc.errno != errno.ENOENT:
raise
def test_firefox3_cookiejar(self):
+ try:
+ mechanize.Firefox3CookieJar
+ except AttributeError:
+ # firefox 3 cookiejar is only supported in Python 2.5 and later
+ self.assert_(sys.version_info[:2] < (2, 5))
+ return
+
filename = tempfile.mktemp()
try:
def get_cookiejar():
- cj = mechanize.Firefox3CookieJar(filename=filename)
+ hide_experimental_warnings()
+ try:
+ cj = mechanize.Firefox3CookieJar(filename=filename)
+ finally:
+ reset_experimental_warnings()
cj.connect()
return cj
def commit(cj):
pass
self._test_cookiejar(get_cookiejar, commit)
finally:
os.remove(filename)
def _test_cookiejar(self, get_cookiejar, commit):
cookiejar = get_cookiejar()
br = mechanize.Browser()
br.set_cookiejar(cookiejar)
br.set_handle_refresh(False)
url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
# no cookie was set on the first request
html = br.open(url).read()
self.assertEquals(html.find("Your browser supports cookies!"), -1)
self.assertEquals(len(cookiejar), 1)
# ... but now we have the cookie
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
commit(cookiejar)
# should still have the cookie when we load afresh
cookiejar = get_cookiejar()
br.set_cookiejar(cookiejar)
html = br.open(url).read()
self.assert_("Your browser supports cookies!" in html)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
If this option doesn't work on Windows/Mac, somebody please
tell me about it, or I'll never find out...
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index b14cc7a..3e17c09 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,124 +1,128 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
+import sys
+
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
-from _firefox3cookiejar import Firefox3CookieJar
+# 2.4 raises SyntaxError due to generator / try/finally use
+if sys.version_info[:2] > (2,4):
+ from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
diff --git a/mechanize/_firefox3cookiejar.py b/mechanize/_firefox3cookiejar.py
index 1ac8d30..17ffba8 100644
--- a/mechanize/_firefox3cookiejar.py
+++ b/mechanize/_firefox3cookiejar.py
@@ -1,252 +1,253 @@
"""Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
-from _util import isstringlike
+from _util import isstringlike, experimental
debug = logging.getLogger("mechanize.cookies").debug
try:
import sqlite3
except ImportError:
pass
else:
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
# XXX
# handle DatabaseError exceptions
# add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
+ experimental("Firefox3CookieJar is experimental code")
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
for row in cur.fetchall():
yield row
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchmany()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
diff --git a/mechanize/_util.py b/mechanize/_util.py
index ef34af2..dcdefa9 100644
--- a/mechanize/_util.py
+++ b/mechanize/_util.py
@@ -1,280 +1,291 @@
"""Utility functions and date/time routines.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, time, warnings
+
+class ExperimentalWarning(UserWarning):
+ pass
+
+def experimental(message):
+ warnings.warn(message, ExperimentalWarning, stacklevel=3)
+def hide_experimental_warnings():
+ warnings.filterwarnings("ignore", category=ExperimentalWarning)
+def reset_experimental_warnings():
+ warnings.filterwarnings("default", category=ExperimentalWarning)
+
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def hide_deprecations():
- warnings.filterwarnings('ignore', category=DeprecationWarning)
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
def reset_deprecations():
- warnings.filterwarnings('default', category=DeprecationWarning)
+ warnings.filterwarnings("default", category=DeprecationWarning)
def isstringlike(x):
try: x+""
except: return False
else: return True
## def caller():
## try:
## raise SyntaxError
## except:
## import sys
## return sys.exc_traceback.tb_frame.f_back.f_back.f_code.co_name
from calendar import timegm
# Date/time conversion routines for formats used by the HTTP protocol.
EPOCH = 1970
def my_timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
months_lower = []
for month in months: months_lower.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
days[wday], mday, months[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = months_lower.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = my_timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
wkday_re = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
loose_http_re = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = strict_re.search(text)
if m:
g = m.groups()
mon = months_lower.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return my_timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = wkday_re.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = loose_http_re.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
iso_re = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = iso_re.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
diff --git a/test/test_cookies.py b/test/test_cookies.py
index c51894d..1a62831 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,1500 +1,1520 @@
"""Tests for _ClientCookie."""
import urllib2, re, os, StringIO, mimetools, time, tempfile, errno
from time import localtime
from unittest import TestCase
+from mechanize._util import hide_experimental_warnings, \
+ reset_experimental_warnings
+
+
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
-class TempfileTestMixin():
+class TempfileTestMixin:
def setUp(self):
self._tempfiles = []
def tearDown(self):
for fn in self._tempfiles:
try:
os.remove(fn)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def mktemp(self):
fn = tempfile.mktemp()
self._tempfiles.append(fn)
return fn
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
assert not c._cookies["www.acme.com"]["/"]["foo1"].secure, \
"non-secure cookie registered secure"
assert c._cookies["www.acme.com"]["/"]["foo2"].secure, \
"secure cookie registered non-secure"
def test_quote_cookie_value(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
assert h == r'$Version=1; foo=\\b\"a\"r'
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from mechanize import CookieJar, Request, DefaultCookiePolicy
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
assert len(c) == 1
c.add_cookie_header(req)
assert req.has_header("Cookie")
def test_domain_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find( "Domain") == -1, \
"absent domain returned with domain present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
assert h.find('$Domain=".bar.com"') != -1, \
"domain not returned"
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
assert h.find('$Domain="bar.com"') != -1, \
"domain not returned"
def test_path_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Path") == -1, \
"absent path returned with path present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
assert h.find('$Path="/"') != -1, "path not returned"
def test_port_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Port") == -1, \
"absent port returned with port present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
assert re.search("\$Port([^=]|$)", h), \
"port with no value not returned with no value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
assert h.find('$Port="80"') != -1, \
"port with single value not returned with single value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
assert h.find('$Port="80,8080"') != -1, \
"port with multiple values not returned with multiple values"
def test_no_return_comment(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
class CookieJarPersistenceTests(TempfileTestMixin, TestCase):
def _interact(self, cj):
year_plus_one = localtime(time.time())[0] + 1
interact_2965(cj, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(cj, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(cj, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(cj, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def test_firefox3_cookiejar_restore(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy
filename = self.mktemp()
def create_cookiejar():
- cj = Firefox3CookieJar(filename,
- policy=DefaultCookiePolicy(rfc2965=True))
+ hide_experimental_warnings()
+ try:
+ cj = Firefox3CookieJar(
+ filename, policy=DefaultCookiePolicy(rfc2965=True))
+ finally:
+ reset_experimental_warnings()
cj.connect()
return cj
cj = create_cookiejar()
self._interact(cj)
self.assertEquals(len(cj), 6)
cj.close()
cj = create_cookiejar()
self.assert_("name='foo1', value='bar'" in repr(cj))
self.assertEquals(len(cj), 4)
def test_firefox3_cookiejar_iteration(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
- cj = Firefox3CookieJar(filename,
- policy=DefaultCookiePolicy(rfc2965=True))
+ hide_experimental_warnings()
+ try:
+ cj = Firefox3CookieJar(
+ filename, policy=DefaultCookiePolicy(rfc2965=True))
+ finally:
+ reset_experimental_warnings()
cj.connect()
self._interact(cj)
summary = "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary,
"""\
<Cookie foo2=bar for www.acme.com:80/>
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
def test_firefox3_cookiejar_clear(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Cookie
filename = self.mktemp()
- cj = Firefox3CookieJar(filename,
- policy=DefaultCookiePolicy(rfc2965=True))
+ hide_experimental_warnings()
+ try:
+ cj = Firefox3CookieJar(
+ filename, policy=DefaultCookiePolicy(rfc2965=True))
+ finally:
+ reset_experimental_warnings()
cj.connect()
self._interact(cj)
cj.clear("www.acme.com", "/", "foo2")
def summary(): return "\n".join([str(cookie) for cookie in cj])
self.assertEquals(summary(),
"""\
<Cookie foo3=bar for www.acme.com/>
<Cookie foo1=bar for www.acme.com/>
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
cj.clear("www.acme.com")
self.assertEquals(summary(),
"""\
<Cookie fooa=bar for www.foo.com/>
<Cookie foob=bar for .foo.com/>
<Cookie fooc=bar for .www.foo.com/>""")
# if name is given, so must path and domain
self.assertRaises(ValueError, cj.clear, domain=".foo.com",
name="foob")
# nonexistent domain
self.assertRaises(KeyError, cj.clear, domain=".spam.com")
def test_firefox3_cookiejar_add_cookie_header(self):
try:
from mechanize import Firefox3CookieJar
except ImportError:
pass
else:
from mechanize import DefaultCookiePolicy, Request
filename = self.mktemp()
- cj = Firefox3CookieJar(filename)
+ hide_experimental_warnings()
+ try:
+ cj = Firefox3CookieJar(filename)
+ finally:
+ reset_experimental_warnings()
cj.connect()
# Session cookies (true .discard) and persistent cookies (false
# .discard) are stored differently. Check they both get sent.
year_plus_one = localtime(time.time())[0] + 1
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(cj, "http://www.foo.com/", "fooa=bar")
interact_netscape(cj, "http://www.foo.com/",
"foob=bar; %s" % expires)
ca, cb = cj
self.assert_(ca.discard)
self.assertFalse(cb.discard)
request = Request("http://www.foo.com/")
cj.add_cookie_header(request)
self.assertEquals(request.get_header("Cookie"),
"fooa=bar; foob=bar")
def test_mozilla_cookiejar(self):
# Save / load Mozilla/Netscape cookie file format.
from mechanize import MozillaCookieJar, DefaultCookiePolicy
filename = tempfile.mktemp()
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
self._interact(c)
def save_and_restore(cj, ignore_discard, filename=filename):
from mechanize import MozillaCookieJar, DefaultCookiePolicy
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
assert len(new_c) == 6 # none discarded
assert repr(new_c).find("name='foo1', value='bar'") != -1
new_c = save_and_restore(c, False)
assert len(new_c) == 4 # 2 of them discarded on save
assert repr(new_c).find("name='foo1', value='bar'") != -1
def test_mozilla_cookiejar_embedded_tab(self):
from mechanize import MozillaCookieJar
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
"a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
"a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
cj.revert(ignore_discard=True)
cookies = cj._cookies["a.com"]["/"]
self.assertEquals(cookies["name"].value, "val\tstillthevalue")
self.assertEquals(cookies["name2"].value, "value")
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
def test_mozilla_cookiejar_initial_dot_violation(self):
from mechanize import MozillaCookieJar, LoadError
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
self.assertRaises(LoadError, cj.revert, ignore_discard=True)
finally:
try:
os.remove(filename)
except IOError, exc:
if exc.errno != errno.ENOENT:
raise
class LWPCookieTests(TestCase, TempfileTestMixin):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
assert re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie)
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
assert (cookie.find("Rocket_Launcher_0001") != -1 and
not cookie.find("Riding_Rocket_0023") != -1)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from mechanize import LWPCookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
assert not c
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
assert len(c) == 1
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
assert len(c) == 1
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
assert len(c) == 2
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
assert len(c) == 2
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 2
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 3
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
assert len(c) == 3
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
assert len(c) == 4
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
assert len(c) == 5
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
assert len(c) == 6
# save and restore
filename = tempfile.mktemp()
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
assert old == repr(c)
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
|
Almad/Mechanize
|
73b324e33b639dc3fd1d618f3b12d1ea2a3e7f49
|
Close sockets. This only affects Python 2.5 (and later) - earlier versions of Python were unaffected.
|
diff --git a/mechanize/_http.py b/mechanize/_http.py
index 96fd405..aa45242 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -1,733 +1,743 @@
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import time, htmlentitydefs, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _request import Request
from _response import closeable_response, response_seek_wrapper
from _html import unescape, unescape_charref
from _headersutil import is_html
from _clientcookie import CookieJar
import _rfc3986
debug = logging.getLogger("mechanize").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
+try:
+ socket._fileobject("fake socket", close=True)
+except TypeError:
+ # python <= 2.4
+ create_readline_wrapper = socket._fileobject
+else:
+ def create_readline_wrapper(fh):
+ return socket._fileobject(fh, close=True)
+
+
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
def set_opener(self, opener=None):
import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
robotparser._debug("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
robotparser._debug("disallow all")
elif status >= 400:
self.allow_all = True
robotparser._debug("allow all")
elif status == 200 and lines:
robotparser._debug("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
- fp = socket._fileobject(r)
+ fp = create_readline_wrapper(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
|
Almad/Mechanize
|
b505541f4e5ecb8f9be302f969867a605bb335ad
|
Add note re work to do on Firefox 3 cookies support
|
diff --git a/mechanize/_firefox3cookiejar.py b/mechanize/_firefox3cookiejar.py
index 5a63894..1ac8d30 100644
--- a/mechanize/_firefox3cookiejar.py
+++ b/mechanize/_firefox3cookiejar.py
@@ -1,250 +1,252 @@
"""Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike
debug = logging.getLogger("mechanize.cookies").debug
try:
import sqlite3
except ImportError:
pass
else:
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
- # XXX handle DatabaseError exceptions
+ # XXX
+ # handle DatabaseError exceptions
+ # add a FileCookieJar (explicit .save() / .revert() / .load() methods)
def __init__(self, filename, autoconnect=True, policy=None):
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
for row in cur.fetchall():
yield row
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
# XXX
# is this RFC 2965-correct?
# could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchmany()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
|
Almad/Mechanize
|
4aa12da154a0a1bea26399e9699451fac177434e
|
Note a couple of issues with Firefox 3 cookie jar implementation
|
diff --git a/mechanize/_firefox3cookiejar.py b/mechanize/_firefox3cookiejar.py
index d7388e2..5a63894 100644
--- a/mechanize/_firefox3cookiejar.py
+++ b/mechanize/_firefox3cookiejar.py
@@ -1,245 +1,250 @@
"""Firefox 3 "cookies.sqlite" cookie persistence.
Copyright 2008 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import logging
import time
from _clientcookie import CookieJar, Cookie, MappingIterator
from _util import isstringlike
debug = logging.getLogger("mechanize.cookies").debug
try:
import sqlite3
except ImportError:
pass
else:
class Firefox3CookieJar(CookieJar):
"""Firefox 3 cookie jar.
The cookies are stored in Firefox 3's "cookies.sqlite" format.
Constructor arguments:
filename: filename of cookies.sqlite (typically found at the top level
of a firefox profile directory)
autoconnect: as a convenience, connect to the SQLite cookies database at
Firefox3CookieJar construction time (default True)
policy: an object satisfying the mechanize.CookiePolicy interface
Note that this is NOT a FileCookieJar, and there are no .load(),
.save() or .restore() methods. The database is in sync with the
cookiejar object's state after each public method call.
Following Firefox's own behaviour, session cookies are never saved to
the database.
The file is created, and an sqlite database written to it, if it does
not already exist. The moz_cookies database table is created if it does
not already exist.
"""
+ # XXX handle DatabaseError exceptions
+
def __init__(self, filename, autoconnect=True, policy=None):
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self._conn = None
if autoconnect:
self.connect()
def connect(self):
self._conn = sqlite3.connect(self.filename)
self._conn.isolation_level = "DEFERRED"
self._create_table_if_necessary()
def close(self):
self._conn.close()
def _transaction(self, func):
try:
cur = self._conn.cursor()
try:
result = func(cur)
finally:
cur.close()
except:
self._conn.rollback()
raise
else:
self._conn.commit()
return result
def _execute(self, query, params=()):
return self._transaction(lambda cur: cur.execute(query, params))
def _query(self, query, params=()):
# XXX should we bother with a transaction?
cur = self._conn.cursor()
try:
cur.execute(query, params)
for row in cur.fetchall():
yield row
finally:
cur.close()
def _create_table_if_necessary(self):
self._execute("""\
CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
value TEXT, host TEXT, path TEXT,expiry INTEGER,
lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
def _cookie_from_row(self, row):
(pk, name, value, domain, path, expires,
last_accessed, secure, http_only) = row
version = 0
domain = domain.encode("ascii", "ignore")
path = path.encode("ascii", "ignore")
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
secure = bool(secure)
# last_accessed isn't a cookie attribute, so isn't added to rest
rest = {}
if http_only:
rest["HttpOnly"] = None
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
domain_specified = initial_dot
discard = False
if expires == "":
expires = None
discard = True
return Cookie(version, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
rest)
def clear(self, domain=None, path=None, name=None):
CookieJar.clear(self, domain, path, name)
where_parts = []
sql_params = []
if domain is not None:
where_parts.append("host = ?")
sql_params.append(domain)
if path is not None:
where_parts.append("path = ?")
sql_params.append(path)
if name is not None:
where_parts.append("name = ?")
sql_params.append(name)
where = " AND ".join(where_parts)
if where:
where = " WHERE " + where
def clear(cur):
cur.execute("DELETE FROM moz_cookies%s" % where,
tuple(sql_params))
self._transaction(clear)
def _row_from_cookie(self, cookie, cur):
expires = cookie.expires
if cookie.discard:
expires = ""
domain = unicode(cookie.domain)
path = unicode(cookie.path)
name = unicode(cookie.name)
value = unicode(cookie.value)
secure = bool(int(cookie.secure))
if value is None:
value = name
name = ""
last_accessed = int(time.time())
http_only = cookie.has_nonstandard_attr("HttpOnly")
query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
pk = query.fetchone()[0]
if pk is None:
pk = 1
return (pk, name, value, domain, path, expires,
last_accessed, secure, http_only)
def set_cookie(self, cookie):
if cookie.discard:
CookieJar.set_cookie(self, cookie)
return
def set_cookie(cur):
+ # XXX
+ # is this RFC 2965-correct?
+ # could this do an UPDATE instead?
row = self._row_from_cookie(cookie, cur)
name, unused, domain, path = row[1:5]
cur.execute("""\
DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
(domain, path, name))
cur.execute("""\
INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
""", row)
self._transaction(set_cookie)
def __iter__(self):
# session (non-persistent) cookies
for cookie in MappingIterator(self._cookies):
yield cookie
# persistent cookies
for row in self._query("""\
SELECT * FROM moz_cookies ORDER BY name, path, host"""):
yield self._cookie_from_row(row)
def _cookies_for_request(self, request):
session_cookies = CookieJar._cookies_for_request(self, request)
def get_cookies(cur):
query = cur.execute("SELECT host from moz_cookies")
domains = [row[0] for row in query.fetchmany()]
cookies = []
for domain in domains:
cookies += self._persistent_cookies_for_domain(domain,
request, cur)
return cookies
persistent_coookies = self._transaction(get_cookies)
return session_cookies + persistent_coookies
def _persistent_cookies_for_domain(self, domain, request, cur):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
query = cur.execute("""\
SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
(domain,))
cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
last_path = None
r = []
for cookie in cookies:
if (cookie.path != last_path and
not self._policy.path_return_ok(cookie.path, request)):
last_path = cookie.path
continue
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
r.append(cookie)
return r
|
Almad/Mechanize
|
576f9a20bf58aabf5078915eafaa5af0d7977282
|
* Add support for Firefox 3 cookie jars ("cookies.sqlite") * Fix a couple of test typos re tempfile cleanup * Add a functional test for seek-wrapped HTTPError repr
|
diff --git a/functional_tests.py b/functional_tests.py
index 483dc88..4bf028c 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,433 +1,500 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
-import os, sys, urllib
+import os, sys, urllib, tempfile, errno
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_refresh(self):
def refresh_request(seconds):
uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
return uri + ("?refresh=%s" % val)
r = self.browser.open(refresh_request(5))
self.assertEqual(r.geturl(), self.uri)
# Refresh with pause > 30 seconds is ignored by default (these long
# refreshes tend to be there only because the website owner wants you
# to see the latest news, or whatever -- they're not essential to the
# operation of the site, and not really useful or appropriate when
# scraping).
refresh_uri = refresh_request(60)
r = self.browser.open(refresh_uri)
self.assertEqual(r.geturl(), refresh_uri)
# allow long refreshes (note we don't actually wait 60 seconds by default)
self.browser.set_handle_refresh(True, max_time=None)
r = self.browser.open(refresh_request(60))
self.assertEqual(r.geturl(), self.uri)
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
br = mechanize.Browser()
response = br.open_local_file("mechanize/_mechanize.py")
self.assert_("def open_local_file(self, filename):" in
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
+ def test_seek_wrapper_class_name(self):
+ opener = mechanize.UserAgent()
+ opener.set_seekable_responses(True)
+ try:
+ opener.open(urljoin(self.uri, "nonexistent"))
+ except mechanize.HTTPError, exc:
+ self.assert_("HTTPError instance" in repr(exc))
+
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
+
+class CookieJarTests(TestCase):
+
+ def test_mozilla_cookiejar(self):
+ filename = tempfile.mktemp()
+ try:
+ def get_cookiejar():
+ cj = mechanize.MozillaCookieJar(filename=filename)
+ try:
+ cj.revert()
+ except IOError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+ return cj
+ def commit(cj):
+ cj.save()
+ self._test_cookiejar(get_cookiejar, commit)
+ finally:
+ try:
+ os.remove(filename)
+ except OSError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ def test_firefox3_cookiejar(self):
+ filename = tempfile.mktemp()
+ try:
+ def get_cookiejar():
+ cj = mechanize.Firefox3CookieJar(filename=filename)
+ cj.connect()
+ return cj
+ def commit(cj):
+ pass
+ self._test_cookiejar(get_cookiejar, commit)
+ finally:
+ os.remove(filename)
+
+ def _test_cookiejar(self, get_cookiejar, commit):
+ cookiejar = get_cookiejar()
+ br = mechanize.Browser()
+ br.set_cookiejar(cookiejar)
+ br.set_handle_refresh(False)
+ url = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
+ # no cookie was set on the first request
+ html = br.open(url).read()
+ self.assertEquals(html.find("Your browser supports cookies!"), -1)
+ self.assertEquals(len(cookiejar), 1)
+ # ... but now we have the cookie
+ html = br.open(url).read()
+ self.assert_("Your browser supports cookies!" in html)
+ commit(cookiejar)
+
+ # should still have the cookie when we load afresh
+ cookiejar = get_cookiejar()
+ br.set_cookiejar(cookiejar)
+ html = br.open(url).read()
+ self.assert_("Your browser supports cookies!" in html)
+
+
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
If this option doesn't work on Windows/Mac, somebody please
tell me about it, or I'll never find out...
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index 983ca7b..b14cc7a 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,123 +1,124 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
+from _firefox3cookiejar import Firefox3CookieJar
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
diff --git a/mechanize/_firefox3cookiejar.py b/mechanize/_firefox3cookiejar.py
new file mode 100644
index 0000000..d7388e2
--- /dev/null
+++ b/mechanize/_firefox3cookiejar.py
@@ -0,0 +1,245 @@
+"""Firefox 3 "cookies.sqlite" cookie persistence.
+
+Copyright 2008 John J Lee <[email protected]>
+
+This code is free software; you can redistribute it and/or modify it
+under the terms of the BSD or ZPL 2.1 licenses (see the file
+COPYING.txt included with the distribution).
+
+"""
+
+import logging
+import time
+
+from _clientcookie import CookieJar, Cookie, MappingIterator
+from _util import isstringlike
+debug = logging.getLogger("mechanize.cookies").debug
+
+
+try:
+ import sqlite3
+except ImportError:
+ pass
+else:
+ class Firefox3CookieJar(CookieJar):
+
+ """Firefox 3 cookie jar.
+
+ The cookies are stored in Firefox 3's "cookies.sqlite" format.
+
+ Constructor arguments:
+
+ filename: filename of cookies.sqlite (typically found at the top level
+ of a firefox profile directory)
+ autoconnect: as a convenience, connect to the SQLite cookies database at
+ Firefox3CookieJar construction time (default True)
+ policy: an object satisfying the mechanize.CookiePolicy interface
+
+ Note that this is NOT a FileCookieJar, and there are no .load(),
+ .save() or .restore() methods. The database is in sync with the
+ cookiejar object's state after each public method call.
+
+ Following Firefox's own behaviour, session cookies are never saved to
+ the database.
+
+ The file is created, and an sqlite database written to it, if it does
+ not already exist. The moz_cookies database table is created if it does
+ not already exist.
+ """
+
+ def __init__(self, filename, autoconnect=True, policy=None):
+ CookieJar.__init__(self, policy)
+ if filename is not None and not isstringlike(filename):
+ raise ValueError("filename must be string-like")
+ self.filename = filename
+ self._conn = None
+ if autoconnect:
+ self.connect()
+
+ def connect(self):
+ self._conn = sqlite3.connect(self.filename)
+ self._conn.isolation_level = "DEFERRED"
+ self._create_table_if_necessary()
+
+ def close(self):
+ self._conn.close()
+
+ def _transaction(self, func):
+ try:
+ cur = self._conn.cursor()
+ try:
+ result = func(cur)
+ finally:
+ cur.close()
+ except:
+ self._conn.rollback()
+ raise
+ else:
+ self._conn.commit()
+ return result
+
+ def _execute(self, query, params=()):
+ return self._transaction(lambda cur: cur.execute(query, params))
+
+ def _query(self, query, params=()):
+ # XXX should we bother with a transaction?
+ cur = self._conn.cursor()
+ try:
+ cur.execute(query, params)
+ for row in cur.fetchall():
+ yield row
+ finally:
+ cur.close()
+
+ def _create_table_if_necessary(self):
+ self._execute("""\
+CREATE TABLE IF NOT EXISTS moz_cookies (id INTEGER PRIMARY KEY, name TEXT,
+ value TEXT, host TEXT, path TEXT,expiry INTEGER,
+ lastAccessed INTEGER, isSecure INTEGER, isHttpOnly INTEGER)""")
+
+ def _cookie_from_row(self, row):
+ (pk, name, value, domain, path, expires,
+ last_accessed, secure, http_only) = row
+
+ version = 0
+ domain = domain.encode("ascii", "ignore")
+ path = path.encode("ascii", "ignore")
+ name = name.encode("ascii", "ignore")
+ value = value.encode("ascii", "ignore")
+ secure = bool(secure)
+
+ # last_accessed isn't a cookie attribute, so isn't added to rest
+ rest = {}
+ if http_only:
+ rest["HttpOnly"] = None
+
+ if name == "":
+ name = value
+ value = None
+
+ initial_dot = domain.startswith(".")
+ domain_specified = initial_dot
+
+ discard = False
+ if expires == "":
+ expires = None
+ discard = True
+
+ return Cookie(version, name, value,
+ None, False,
+ domain, domain_specified, initial_dot,
+ path, False,
+ secure,
+ expires,
+ discard,
+ None,
+ None,
+ rest)
+
+ def clear(self, domain=None, path=None, name=None):
+ CookieJar.clear(self, domain, path, name)
+ where_parts = []
+ sql_params = []
+ if domain is not None:
+ where_parts.append("host = ?")
+ sql_params.append(domain)
+ if path is not None:
+ where_parts.append("path = ?")
+ sql_params.append(path)
+ if name is not None:
+ where_parts.append("name = ?")
+ sql_params.append(name)
+ where = " AND ".join(where_parts)
+ if where:
+ where = " WHERE " + where
+ def clear(cur):
+ cur.execute("DELETE FROM moz_cookies%s" % where,
+ tuple(sql_params))
+ self._transaction(clear)
+
+ def _row_from_cookie(self, cookie, cur):
+ expires = cookie.expires
+ if cookie.discard:
+ expires = ""
+
+ domain = unicode(cookie.domain)
+ path = unicode(cookie.path)
+ name = unicode(cookie.name)
+ value = unicode(cookie.value)
+ secure = bool(int(cookie.secure))
+
+ if value is None:
+ value = name
+ name = ""
+
+ last_accessed = int(time.time())
+ http_only = cookie.has_nonstandard_attr("HttpOnly")
+
+ query = cur.execute("""SELECT MAX(id) + 1 from moz_cookies""")
+ pk = query.fetchone()[0]
+ if pk is None:
+ pk = 1
+
+ return (pk, name, value, domain, path, expires,
+ last_accessed, secure, http_only)
+
+ def set_cookie(self, cookie):
+ if cookie.discard:
+ CookieJar.set_cookie(self, cookie)
+ return
+
+ def set_cookie(cur):
+ row = self._row_from_cookie(cookie, cur)
+ name, unused, domain, path = row[1:5]
+ cur.execute("""\
+DELETE FROM moz_cookies WHERE host = ? AND path = ? AND name = ?""",
+ (domain, path, name))
+ cur.execute("""\
+INSERT INTO moz_cookies VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
+""", row)
+ self._transaction(set_cookie)
+
+ def __iter__(self):
+ # session (non-persistent) cookies
+ for cookie in MappingIterator(self._cookies):
+ yield cookie
+ # persistent cookies
+ for row in self._query("""\
+SELECT * FROM moz_cookies ORDER BY name, path, host"""):
+ yield self._cookie_from_row(row)
+
+ def _cookies_for_request(self, request):
+ session_cookies = CookieJar._cookies_for_request(self, request)
+ def get_cookies(cur):
+ query = cur.execute("SELECT host from moz_cookies")
+ domains = [row[0] for row in query.fetchmany()]
+ cookies = []
+ for domain in domains:
+ cookies += self._persistent_cookies_for_domain(domain,
+ request, cur)
+ return cookies
+ persistent_coookies = self._transaction(get_cookies)
+ return session_cookies + persistent_coookies
+
+ def _persistent_cookies_for_domain(self, domain, request, cur):
+ cookies = []
+ if not self._policy.domain_return_ok(domain, request):
+ return []
+ debug("Checking %s for cookies to return", domain)
+ query = cur.execute("""\
+SELECT * from moz_cookies WHERE host = ? ORDER BY path""",
+ (domain,))
+ cookies = [self._cookie_from_row(row) for row in query.fetchmany()]
+ last_path = None
+ r = []
+ for cookie in cookies:
+ if (cookie.path != last_path and
+ not self._policy.path_return_ok(cookie.path, request)):
+ last_path = cookie.path
+ continue
+ if not self._policy.return_ok(cookie, request):
+ debug(" not returning cookie")
+ continue
+ debug(" it's a match")
+ r.append(cookie)
+ return r
diff --git a/test-tools/cookietest.cgi b/test-tools/cookietest.cgi
index c171904..d1da002 100755
--- a/test-tools/cookietest.cgi
+++ b/test-tools/cookietest.cgi
@@ -1,54 +1,58 @@
#!/usr/bin/python
# -*-python-*-
# This is used by functional_tests.py
#import cgitb; cgitb.enable()
+import time
+
print "Content-Type: text/html"
-print "Set-Cookie: foo=bar\n"
+year_plus_one = time.localtime(time.time())[0] + 1
+expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
+print "Set-Cookie: foo=bar; %s\n" % expires
import sys, os, string, cgi, Cookie, urllib
from xml.sax import saxutils
from types import ListType
print "<html><head><title>Cookies and form submission parameters</title>"
cookie = Cookie.SimpleCookie()
cookieHdr = os.environ.get("HTTP_COOKIE", "")
cookie.load(cookieHdr)
form = cgi.FieldStorage()
refresh_value = None
if form.has_key("refresh"):
refresh = form["refresh"]
if not isinstance(refresh, ListType):
refresh_value = refresh.value
if refresh_value is not None:
print '<meta http-equiv="refresh" content=%s>' % (
saxutils.quoteattr(urllib.unquote_plus(refresh_value)))
elif not cookie.has_key("foo"):
print '<meta http-equiv="refresh" content="5">'
print "</head>"
print "<p>Received cookies:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_COOKIE", ""))
print "</pre>"
if cookie.has_key("foo"):
print "Your browser supports cookies!"
print "<p>Referer:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_REFERER", ""))
print "</pre>"
print "<p>Received parameters:</p>"
print "<pre>"
for k in form.keys():
v = form[k]
if isinstance(v, ListType):
vs = []
for item in v:
vs.append(item.value)
text = string.join(vs, ", ")
else:
text = v.value
print "%s: %s" % (cgi.escape(k), cgi.escape(text))
print "</pre></html>"
diff --git a/test/test_cookies.py b/test/test_cookies.py
index dde6b71..c51894d 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,1565 +1,1689 @@
"""Tests for _ClientCookie."""
import urllib2, re, os, StringIO, mimetools, time, tempfile, errno
from time import localtime
from unittest import TestCase
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
+class TempfileTestMixin():
+
+ def setUp(self):
+ self._tempfiles = []
+
+ def tearDown(self):
+ for fn in self._tempfiles:
+ try:
+ os.remove(fn)
+ except IOError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ def mktemp(self):
+ fn = tempfile.mktemp()
+ self._tempfiles.append(fn)
+ return fn
+
+
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
for whitespace in " ", "":
c = CookieJar()
if ns:
pol = DefaultCookiePolicy(rfc2965=False)
int = interact_netscape
vs = ""
else:
pol = DefaultCookiePolicy(rfc2965=True)
int = interact_2965
vs = "; Version=1"
c.set_policy(pol)
url = "http://www.acme.com/"
int(c, url, "foo1=bar%s%s" % (vs, whitespace))
int(c, url, "foo2=bar%s; secure%s" % (vs, whitespace))
assert not c._cookies["www.acme.com"]["/"]["foo1"].secure, \
"non-secure cookie registered secure"
assert c._cookies["www.acme.com"]["/"]["foo2"].secure, \
"secure cookie registered non-secure"
def test_quote_cookie_value(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/", r'foo=\b"a"r; Version=1')
h = interact_2965(c, "http://www.acme.com/")
assert h == r'$Version=1; foo=\\b\"a\"r'
def test_missing_final_slash(self):
# Missing slash from request URL's abs_path should be assumed present.
from mechanize import CookieJar, Request, DefaultCookiePolicy
url = "http://www.acme.com"
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, url, "foo=bar; Version=1")
req = Request(url)
assert len(c) == 1
c.add_cookie_header(req)
assert req.has_header("Cookie")
def test_domain_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find( "Domain") == -1, \
"absent domain returned with domain present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Domain=.bar.com')
h = interact_2965(c, url)
assert h.find('$Domain=".bar.com"') != -1, \
"domain not returned"
c = CookieJar(pol)
url = "http://foo.bar.com/"
# note missing initial dot in Domain
interact_2965(c, url, 'spam=eggs; Version=1; Domain=bar.com')
h = interact_2965(c, url)
assert h.find('$Domain="bar.com"') != -1, \
"domain not returned"
def test_path_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Path") == -1, \
"absent path returned with path present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Path=/')
h = interact_2965(c, url)
assert h.find('$Path="/"') != -1, "path not returned"
def test_port_mirror(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1")
h = interact_2965(c, url)
assert h.find("Port") == -1, \
"absent port returned with port present"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, "spam=eggs; Version=1; Port")
h = interact_2965(c, url)
assert re.search("\$Port([^=]|$)", h), \
"port with no value not returned with no value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80"')
h = interact_2965(c, url)
assert h.find('$Port="80"') != -1, \
"port with single value not returned with single value"
c = CookieJar(pol)
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; Port="80,8080"')
h = interact_2965(c, url)
assert h.find('$Port="80,8080"') != -1, \
"port with multiple values not returned with multiple values"
def test_no_return_comment(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
-class LWPCookieTests(TestCase):
+class CookieJarPersistenceTests(TempfileTestMixin, TestCase):
+
+ def _interact(self, cj):
+ year_plus_one = localtime(time.time())[0] + 1
+ interact_2965(cj, "http://www.acme.com/",
+ "foo1=bar; max-age=100; Version=1")
+ interact_2965(cj, "http://www.acme.com/",
+ 'foo2=bar; port="80"; max-age=100; Discard; Version=1')
+ interact_2965(cj, "http://www.acme.com/", "foo3=bar; secure; Version=1")
+
+ expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
+ interact_netscape(cj, "http://www.foo.com/",
+ "fooa=bar; %s" % expires)
+ interact_netscape(cj, "http://www.foo.com/",
+ "foob=bar; Domain=.foo.com; %s" % expires)
+ interact_netscape(cj, "http://www.foo.com/",
+ "fooc=bar; Domain=www.foo.com; %s" % expires)
+
+ def test_firefox3_cookiejar_restore(self):
+ try:
+ from mechanize import Firefox3CookieJar
+ except ImportError:
+ pass
+ else:
+ from mechanize import DefaultCookiePolicy
+ filename = self.mktemp()
+ def create_cookiejar():
+ cj = Firefox3CookieJar(filename,
+ policy=DefaultCookiePolicy(rfc2965=True))
+ cj.connect()
+ return cj
+ cj = create_cookiejar()
+ self._interact(cj)
+ self.assertEquals(len(cj), 6)
+ cj.close()
+ cj = create_cookiejar()
+ self.assert_("name='foo1', value='bar'" in repr(cj))
+ self.assertEquals(len(cj), 4)
+
+ def test_firefox3_cookiejar_iteration(self):
+ try:
+ from mechanize import Firefox3CookieJar
+ except ImportError:
+ pass
+ else:
+ from mechanize import DefaultCookiePolicy, Cookie
+ filename = self.mktemp()
+ cj = Firefox3CookieJar(filename,
+ policy=DefaultCookiePolicy(rfc2965=True))
+ cj.connect()
+ self._interact(cj)
+ summary = "\n".join([str(cookie) for cookie in cj])
+ self.assertEquals(summary,
+ """\
+<Cookie foo2=bar for www.acme.com:80/>
+<Cookie foo3=bar for www.acme.com/>
+<Cookie foo1=bar for www.acme.com/>
+<Cookie fooa=bar for www.foo.com/>
+<Cookie foob=bar for .foo.com/>
+<Cookie fooc=bar for .www.foo.com/>""")
+
+ def test_firefox3_cookiejar_clear(self):
+ try:
+ from mechanize import Firefox3CookieJar
+ except ImportError:
+ pass
+ else:
+ from mechanize import DefaultCookiePolicy, Cookie
+ filename = self.mktemp()
+ cj = Firefox3CookieJar(filename,
+ policy=DefaultCookiePolicy(rfc2965=True))
+ cj.connect()
+ self._interact(cj)
+ cj.clear("www.acme.com", "/", "foo2")
+ def summary(): return "\n".join([str(cookie) for cookie in cj])
+ self.assertEquals(summary(),
+ """\
+<Cookie foo3=bar for www.acme.com/>
+<Cookie foo1=bar for www.acme.com/>
+<Cookie fooa=bar for www.foo.com/>
+<Cookie foob=bar for .foo.com/>
+<Cookie fooc=bar for .www.foo.com/>""")
+ cj.clear("www.acme.com")
+ self.assertEquals(summary(),
+ """\
+<Cookie fooa=bar for www.foo.com/>
+<Cookie foob=bar for .foo.com/>
+<Cookie fooc=bar for .www.foo.com/>""")
+ # if name is given, so must path and domain
+ self.assertRaises(ValueError, cj.clear, domain=".foo.com",
+ name="foob")
+ # nonexistent domain
+ self.assertRaises(KeyError, cj.clear, domain=".spam.com")
+
+ def test_firefox3_cookiejar_add_cookie_header(self):
+ try:
+ from mechanize import Firefox3CookieJar
+ except ImportError:
+ pass
+ else:
+ from mechanize import DefaultCookiePolicy, Request
+ filename = self.mktemp()
+ cj = Firefox3CookieJar(filename)
+ cj.connect()
+ # Session cookies (true .discard) and persistent cookies (false
+ # .discard) are stored differently. Check they both get sent.
+ year_plus_one = localtime(time.time())[0] + 1
+ expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
+ interact_netscape(cj, "http://www.foo.com/", "fooa=bar")
+ interact_netscape(cj, "http://www.foo.com/",
+ "foob=bar; %s" % expires)
+ ca, cb = cj
+ self.assert_(ca.discard)
+ self.assertFalse(cb.discard)
+ request = Request("http://www.foo.com/")
+ cj.add_cookie_header(request)
+ self.assertEquals(request.get_header("Cookie"),
+ "fooa=bar; foob=bar")
+
+ def test_mozilla_cookiejar(self):
+ # Save / load Mozilla/Netscape cookie file format.
+ from mechanize import MozillaCookieJar, DefaultCookiePolicy
+ filename = tempfile.mktemp()
+ c = MozillaCookieJar(filename,
+ policy=DefaultCookiePolicy(rfc2965=True))
+ self._interact(c)
+
+ def save_and_restore(cj, ignore_discard, filename=filename):
+ from mechanize import MozillaCookieJar, DefaultCookiePolicy
+ try:
+ cj.save(ignore_discard=ignore_discard)
+ new_c = MozillaCookieJar(filename,
+ DefaultCookiePolicy(rfc2965=True))
+ new_c.load(ignore_discard=ignore_discard)
+ finally:
+ try: os.unlink(filename)
+ except OSError: pass
+ return new_c
+
+ new_c = save_and_restore(c, True)
+ assert len(new_c) == 6 # none discarded
+ assert repr(new_c).find("name='foo1', value='bar'") != -1
+
+ new_c = save_and_restore(c, False)
+ assert len(new_c) == 4 # 2 of them discarded on save
+ assert repr(new_c).find("name='foo1', value='bar'") != -1
+
+ def test_mozilla_cookiejar_embedded_tab(self):
+ from mechanize import MozillaCookieJar
+ filename = tempfile.mktemp()
+ fh = open(filename, "w")
+ try:
+ fh.write(
+ MozillaCookieJar.header + "\n" +
+ "a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
+ "a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
+ fh.close()
+ cj = MozillaCookieJar(filename)
+ cj.revert(ignore_discard=True)
+ cookies = cj._cookies["a.com"]["/"]
+ self.assertEquals(cookies["name"].value, "val\tstillthevalue")
+ self.assertEquals(cookies["name2"].value, "value")
+ finally:
+ try:
+ os.remove(filename)
+ except IOError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+ def test_mozilla_cookiejar_initial_dot_violation(self):
+ from mechanize import MozillaCookieJar, LoadError
+ filename = tempfile.mktemp()
+ fh = open(filename, "w")
+ try:
+ fh.write(
+ MozillaCookieJar.header + "\n" +
+ ".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
+ fh.close()
+ cj = MozillaCookieJar(filename)
+ self.assertRaises(LoadError, cj.revert, ignore_discard=True)
+ finally:
+ try:
+ os.remove(filename)
+ except IOError, exc:
+ if exc.errno != errno.ENOENT:
+ raise
+
+
+
+class LWPCookieTests(TestCase, TempfileTestMixin):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
assert re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie)
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
assert (cookie.find("Rocket_Launcher_0001") != -1 and
not cookie.find("Riding_Rocket_0023") != -1)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from mechanize import LWPCookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
assert not c
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
assert len(c) == 1
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
assert len(c) == 1
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
assert len(c) == 2
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
assert len(c) == 2
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 2
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 3
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
assert len(c) == 3
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
assert len(c) == 4
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
assert len(c) == 5
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
assert len(c) == 6
# save and restore
filename = tempfile.mktemp()
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
assert old == repr(c)
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
assert (cookie.find("foo=bar") != -1 and
version_re.search(cookie))
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345")
assert not cookie
# unicode URL doesn't raise exception, as it used to!
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
- def test_mozilla(self):
- # Save / load Mozilla/Netscape cookie file format.
- from mechanize import MozillaCookieJar, DefaultCookiePolicy
-
- year_plus_one = localtime(time.time())[0] + 1
-
- filename = tempfile.mktemp()
-
- c = MozillaCookieJar(filename,
- policy=DefaultCookiePolicy(rfc2965=True))
- interact_2965(c, "http://www.acme.com/",
- "foo1=bar; max-age=100; Version=1")
- interact_2965(c, "http://www.acme.com/",
- 'foo2=bar; port="80"; max-age=100; Discard; Version=1')
- interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
-
- expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
- interact_netscape(c, "http://www.foo.com/",
- "fooa=bar; %s" % expires)
- interact_netscape(c, "http://www.foo.com/",
- "foob=bar; Domain=.foo.com; %s" % expires)
- interact_netscape(c, "http://www.foo.com/",
- "fooc=bar; Domain=www.foo.com; %s" % expires)
-
- def save_and_restore(cj, ignore_discard, filename=filename):
- from mechanize import MozillaCookieJar, DefaultCookiePolicy
- try:
- cj.save(ignore_discard=ignore_discard)
- new_c = MozillaCookieJar(filename,
- DefaultCookiePolicy(rfc2965=True))
- new_c.load(ignore_discard=ignore_discard)
- finally:
- try: os.unlink(filename)
- except OSError: pass
- return new_c
-
- new_c = save_and_restore(c, True)
- assert len(new_c) == 6 # none discarded
- assert repr(new_c).find("name='foo1', value='bar'") != -1
-
- new_c = save_and_restore(c, False)
- assert len(new_c) == 4 # 2 of them discarded on save
- assert repr(new_c).find("name='foo1', value='bar'") != -1
-
- def test_mozilla_cookiejar_embedded_tab(self):
- from mechanize import MozillaCookieJar
- filename = tempfile.mktemp()
- fh = open(filename, "w")
- try:
- fh.write(
- MozillaCookieJar.header + "\n" +
- "a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
- "a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
- fh.close()
- cj = MozillaCookieJar(filename)
- cj.revert(ignore_discard=True)
- cookies = cj._cookies["a.com"]["/"]
- self.assertEquals(cookies["name"].value, "val\tstillthevalue")
- self.assertEquals(cookies["name2"].value, "value")
- finally:
- try:
- os.remove(filename)
- except OSError, exc:
- if exc.errno != errno.EEXIST:
- raise
-
- def test_mozilla_cookiejar_initial_dot_violation(self):
- from mechanize import MozillaCookieJar, LoadError
- filename = tempfile.mktemp()
- fh = open(filename, "w")
- try:
- fh.write(
- MozillaCookieJar.header + "\n" +
- ".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
- fh.close()
- cj = MozillaCookieJar(filename)
- self.assertRaises(LoadError, cj.revert, ignore_discard=True)
- finally:
- try:
- os.remove(filename)
- except OSError, exc:
- if exc.errno != errno.EEXIST:
- raise
-
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
from mechanize import CookieJar, Request
c = CookieJar()
headers = []
req = Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
assert (
req.get_header("Cookie").find("PART_NUMBER=3,4") != -1 and
req.get_header("Cookie").find("Customer=WILE_E_COYOTE") != -1)
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
assert cookie.find("foo1=bar") >= 0
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 3
def test_intranet_domains_ns(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
assert len(c) == 2
assert cookie.find("foo1=bar") >= 0
cookie = interact_netscape(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 2
def test_empty_path(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = Request("http://www.ants.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# missing path in the request URI
req = Request("http://www.ants.com:8080")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# The correctness of this test is undefined, in the absence of RFC 2965 errata.
## def test_netscape_rfc2965_interop(self):
## # Test mixing of Set-Cookie and Set-Cookie2 headers.
## from mechanize import CookieJar
## # Example from http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl
## # which gives up these headers:
## #
## # HTTP/1.1 200 OK
## # Connection: close
## # Date: Fri, 20 Jul 2001 19:54:58 GMT
## # Server: Apache/1.3.19 (Unix) ApacheJServ/1.1.2
## # Content-Type: text/html
## # Content-Type: text/html; charset=iso-8859-1
## # Link: </trip/stylesheet.css>; rel="stylesheet"; type="text/css"
## # Servlet-Engine: Tomcat Web Server/3.2.1 (JSP 1.1; Servlet 2.2; Java 1.3.0; SunOS 5.8 sparc; java.vendor=Sun Microsystems Inc.)
## # Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/
## # Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs
## # Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"
## # Title: TRIP.com Travel - FlightTRACKER
## # X-Meta-Description: Trip.com privacy policy
## # X-Meta-Keywords: privacy policy
## req = urllib2.Request(
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## headers = []
## headers.append("Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/")
## headers.append("Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs")
## headers.append('Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"')
## res = FakeResponse(
## headers,
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## #print res
## c = CookieJar()
## c.extract_cookies(res, req)
## #print c
## print str(c)
## print """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
## assert c.as_lwp_str() == """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
def test_session_cookies(self):
from mechanize import CookieJar, Request
year_plus_one = localtime(time.time())[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
assert not (
# a permanent cookie got lost accidently
counter["perm_after"] != counter["perm_before"] or
# a session cookie hasn't been cleared
counter["session_after"] != 0 or
# we didn't have session cookies in the first place
counter["session_before"] == 0)
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
3741ede97c71387d07e0aa4e7dc8ec3baf9b3f66
|
Handle missing cookie max-age value. Previously, a warning was emitted in this case.
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index e84a690..c8537ac 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -703,950 +703,954 @@ class DefaultCookiePolicy(CookiePolicy):
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
for n in ("version", "verifiability", "secure", "expires", "port",
"domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, get_type, has_header, get_header,
header_items and add_unredirected_header, as documented by urllib2, and
the port attribute (the port number). Actually,
RequestUpgradeProcessor will automatically upgrade your Request object
to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
req_host, erhn = eff_request_host(request)
strict_non_domain = (
self._policy.strict_ns_domain & self._policy.DomainStrictNonDomain)
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
+ if v is None:
+ debug(" missing value for max-age attribute")
+ bad_cookie = True
+ break
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None: version = int(version)
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
cookies = []
for tup in cookie_tuples:
cookie = self._cookie_from_cookie_tuple(tup, request)
if cookie: cookies.append(cookie)
return cookies
def _process_rfc2109_cookies(self, cookies):
if self._policy.rfc2109_as_netscape is None:
rfc2109_as_netscape = not self._policy.rfc2965
else:
rfc2109_as_netscape = self._policy.rfc2109_as_netscape
for cookie in cookies:
if cookie.version == 1:
cookie.rfc2109 = True
if rfc2109_as_netscape:
# treat 2109 cookies as Netscape cookies rather than
# as RFC2965 cookies
cookie.version = 0
def make_cookies(self, response, request):
"""Return sequence of Cookie objects extracted from response object.
See extract_cookies.__doc__ for the interfaces required of the
response and request arguments.
"""
# get cookie-attributes for RFC 2965 and Netscape protocols
headers = response.info()
rfc2965_hdrs = headers.getheaders("Set-Cookie2")
ns_hdrs = headers.getheaders("Set-Cookie")
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if ((not rfc2965_hdrs and not ns_hdrs) or
(not ns_hdrs and not rfc2965) or
(not rfc2965_hdrs and not netscape) or
(not netscape and not rfc2965)):
return [] # no relevant cookie headers: quick exit
try:
cookies = self._cookies_from_attrs_set(
split_header_words(rfc2965_hdrs), request)
except:
reraise_unmasked_exceptions()
cookies = []
if ns_hdrs and netscape:
try:
# RFC 2109 and Netscape cookies
ns_cookies = self._cookies_from_attrs_set(
parse_ns_headers(ns_hdrs), request)
except:
reraise_unmasked_exceptions()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
# Look for Netscape cookies (from Set-Cookie headers) that match
# corresponding RFC 2965 cookies (from Set-Cookie2 headers).
# For each match, keep the RFC 2965 cookie and ignore the Netscape
# cookie (RFC 2965 section 9.1). Actually, RFC 2109 cookies are
# bundled in with the Netscape cookies for this purpose, which is
# reasonable behaviour.
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = ns_cookie.domain, ns_cookie.path, ns_cookie.name
return not lookup.has_key(key)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
def set_cookie_if_ok(self, cookie, request):
"""Set a cookie if policy says it's OK to do so.
cookie: mechanize.Cookie instance
request: see extract_cookies.__doc__ for the required interface
"""
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
def set_cookie(self, cookie):
"""Set a cookie, without checking whether or not it should be set.
cookie: mechanize.Cookie instance
"""
c = self._cookies
if not c.has_key(cookie.domain): c[cookie.domain] = {}
c2 = c[cookie.domain]
if not c2.has_key(cookie.path): c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
def extract_cookies(self, response, request):
"""Extract cookies from response, where allowable given the request.
Look for allowable Set-Cookie: and Set-Cookie2: headers in the response
object passed as argument. Any of these headers that are found are
used to update the state of the object (subject to the policy.set_ok
method's approval).
The response object (usually be the result of a call to
mechanize.urlopen, or similar) should support an info method, which
returns a mimetools.Message object (in fact, the 'mimetools.Message
object' may be any object that provides a getallmatchingheaders
method).
The request object (usually a urllib2.Request instance) must support
the methods get_full_url and get_host, as documented by urllib2, and
the port attribute (the port number). The request is used to set
default values for cookie-attributes as well as for checking that the
cookie is OK to be set.
"""
debug("extract_cookies: %s", response.info())
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
debug(" setting cookie: %s", cookie)
self.set_cookie(cookie)
def clear(self, domain=None, path=None, name=None):
"""Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.
"""
if name is not None:
if (domain is None) or (path is None):
raise ValueError(
"domain and path must be given to remove a cookie by name")
del self._cookies[domain][path][name]
elif path is not None:
if domain is None:
raise ValueError(
"domain must be given to remove cookies by path")
del self._cookies[domain][path]
elif domain is not None:
del self._cookies[domain]
else:
self._cookies = {}
def clear_session_cookies(self):
"""Discard all session cookies.
Discards all cookies held by object which had either no Max-Age or
Expires cookie-attribute or an explicit Discard cookie-attribute, or
which otherwise have ended up with a true discard attribute. For
interactive browsers, the end of a session usually corresponds to
closing the browser window.
Note that the save method won't save session cookies anyway, unless you
ask otherwise by passing a true ignore_discard argument.
"""
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
def clear_expired_cookies(self):
"""Discard all expired cookies.
You probably don't need to call this method: expired cookies are never
sent back to the server (provided you're using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the save
method won't save expired cookies anyway (unless you ask otherwise by
passing a true ignore_expires argument).
"""
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
def __getitem__(self, i):
if i == 0:
self._getitem_iterator = self.__iter__()
elif self._prev_getitem_index != i-1: raise IndexError(
"CookieJar.__getitem__ only supports sequential iteration")
self._prev_getitem_index = i
try:
return self._getitem_iterator.next()
except StopIteration:
raise IndexError()
def __iter__(self):
return MappingIterator(self._cookies)
def __len__(self):
"""Return number of contained cookies."""
i = 0
for cookie in self: i = i + 1
return i
def __repr__(self):
r = []
for cookie in self: r.append(repr(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
def __str__(self):
r = []
for cookie in self: r.append(str(cookie))
return "<%s[%s]>" % (self.__class__, ", ".join(r))
class LoadError(Exception): pass
class FileCookieJar(CookieJar):
"""CookieJar that can be loaded from and saved to a file.
Additional methods
save(filename=None, ignore_discard=False, ignore_expires=False)
load(filename=None, ignore_discard=False, ignore_expires=False)
revert(filename=None, ignore_discard=False, ignore_expires=False)
Additional public attributes
filename: filename for loading and saving cookies
Additional public readable attributes
delayload: request that cookies are lazily loaded from disk; this is only
a hint since this only affects performance, not behaviour (unless the
cookies on disk are changing); a CookieJar object may ignore it (in fact,
only MSIECookieJar lazily loads cookies at the moment)
"""
def __init__(self, filename=None, delayload=False, policy=None):
"""
See FileCookieJar.__doc__ for argument documentation.
Cookies are NOT loaded from the named file until either the load or
revert method is called.
"""
CookieJar.__init__(self, policy)
if filename is not None and not isstringlike(filename):
raise ValueError("filename must be string-like")
self.filename = filename
self.delayload = bool(delayload)
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Save cookies to a file.
filename: name of file in which to save cookies
ignore_discard: save even cookies set to be discarded
ignore_expires: save even cookies that have expired
The file is overwritten if it already exists, thus wiping all its
cookies. Saved cookies can be restored later using the load or revert
methods. If filename is not specified, self.filename is used; if
self.filename is None, ValueError is raised.
"""
raise NotImplementedError()
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file.
Old cookies are kept unless overwritten by newly loaded ones.
Arguments are as for .save().
If filename is not specified, self.filename is used; if self.filename
is None, ValueError is raised. The named file must be in the format
understood by the class, or LoadError will be raised. This format will
be identical to that written by the save method, unless the load format
is not sufficiently well understood (as is the case for MSIECookieJar).
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
def revert(self, filename=None,
ignore_discard=False, ignore_expires=False):
"""Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object's state will not be altered if this happens.
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
|
Almad/Mechanize
|
3ee760285adca5ec5441fd8fa8a1793098c4bad5
|
Stop tests from clobbering files that happen to be lying around in cwd (!)
|
diff --git a/test/test_cookies.py b/test/test_cookies.py
index 06692b0..dde6b71 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,633 +1,633 @@
"""Tests for _ClientCookie."""
import urllib2, re, os, StringIO, mimetools, time, tempfile, errno
from time import localtime
from unittest import TestCase
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
- filename = os.path.abspath("cookies2.txt")
+ filename = tempfile.mktemp()
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
interact_2965(c, "http://foo.net/", 'foo=bar; Version="1"')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["foo"].value == "bar"
assert interact_2965(c, "http://foo.net/") == "$Version=1; foo=bar"
# won't be returned to any other domain (because domain was implied)
assert interact_2965(c, "http://www.foo.net/") == ""
# unless domain is given explicitly, because then it must be
# rewritten to start with a dot: foo.net --> .foo.net, which does
# not domain-match foo.net
interact_2965(c, "http://foo.net/foo",
'spam=eggs; domain=foo.net; path=/foo; Version="1"')
assert len(c) == 1
assert interact_2965(c, "http://foo.net/foo") == "$Version=1; foo=bar"
# explicit foo.net from three-component domain www.foo.net *does* get
# set, because .foo.net domain-matches .foo.net
interact_2965(c, "http://www.foo.net/foo/",
'spam=eggs; domain=foo.net; Version="1"')
assert c._cookies[".foo.net"]["/foo/"]["spam"].value == "eggs"
assert len(c) == 2
assert interact_2965(c, "http://foo.net/foo/") == "$Version=1; foo=bar"
assert interact_2965(c, "http://www.foo.net/foo/") == \
'$Version=1; spam=eggs; $Domain="foo.net"'
# top-level domain is too general
interact_2965(c, "http://foo.net/",
'ni="ni"; domain=".net"; Version="1"')
assert len(c) == 2
# RFC 2965 doesn't require blocking this
interact_2965(c, "http://foo.co.uk/",
'nasty=trick; domain=.co.uk; Version="1"')
assert len(c) == 3
def test_domain_allow(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
c = CookieJar(policy=DefaultCookiePolicy(
blocked_domains=["acme.com"],
allowed_domains=["www.acme.com"]))
req = Request("http://acme.com/")
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
res = FakeResponse(headers, "http://acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with non-allowed domain...
req = Request("http://www.coyote.com/")
res = FakeResponse(headers, "http://www.coyote.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_domain_block(self):
from mechanize import CookieJar, DefaultCookiePolicy
from mechanize import Request
#import logging; logging.getLogger("mechanize").setLevel(logging.DEBUG)
pol = DefaultCookiePolicy(
rfc2965=True, blocked_domains=[".acme.com"])
c = CookieJar(policy=pol)
headers = ["Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/"]
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
assert len(c) == 0
pol.set_blocked_domains(["acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
c.clear()
req = Request("http://www.roadrunner.net/")
res = FakeResponse(headers, "http://www.roadrunner.net/")
c.extract_cookies(res, req)
assert len(c) == 1
req = Request("http://www.roadrunner.net/")
c.add_cookie_header(req)
assert (req.has_header("Cookie") and
req.has_header("Cookie2"))
c.clear()
pol.set_blocked_domains([".acme.com"])
c.extract_cookies(res, req)
assert len(c) == 1
# set a cookie with blocked domain...
req = Request("http://www.acme.com/")
res = FakeResponse(headers, "http://www.acme.com/")
cookies = c.make_cookies(res, req)
c.set_cookie(cookies[0])
assert len(c) == 2
# ... and check is doesn't get returned
c.add_cookie_header(req)
assert not req.has_header("Cookie")
def test_secure(self):
from mechanize import CookieJar, DefaultCookiePolicy
for ns in True, False:
@@ -753,813 +753,813 @@ class CookieTests(TestCase):
url = "http://foo.bar.com/"
interact_2965(c, url, 'spam=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
h = interact_2965(c, url)
assert h.find("Comment") == -1, \
"Comment or CommentURL cookie-attributes returned to server"
# just pondering security here -- this isn't really a test (yet)
## def test_hack(self):
## from mechanize import CookieJar
## c = CookieJar()
## interact_netscape(c, "http://victim.mall.com/",
## 'prefs="foo"')
## interact_netscape(c, "http://cracker.mall.com/",
## 'prefs="bar"; Domain=.mall.com')
## interact_netscape(c, "http://cracker.mall.com/",
## '$Version="1"; Domain=.mall.com')
## h = interact_netscape(c, "http://victim.mall.com/")
## print h
def test_Cookie_iterator(self):
from mechanize import CookieJar, Cookie, DefaultCookiePolicy
cs = CookieJar(DefaultCookiePolicy(rfc2965=True))
# add some random cookies
interact_2965(cs, "http://blah.spam.org/", 'foo=eggs; Version=1; '
'Comment="does anybody read these?"; '
'CommentURL="http://foo.bar.net/comment.html"')
interact_netscape(cs, "http://www.acme.com/blah/", "spam=bar; secure")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; secure; Version=1")
interact_2965(cs, "http://www.acme.com/blah/", "foo=bar; path=/; Version=1")
interact_2965(cs, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
versions = [1, 1, 1, 0, 1]
names = ["bang", "foo", "foo", "spam", "foo"]
domains = [".sol.no", "blah.spam.org", "www.acme.com",
"www.acme.com", "www.acme.com"]
paths = ["/", "/", "/", "/blah", "/blah/"]
# sequential iteration
for i in range(4):
i = 0
for c in cs:
assert isinstance(c, Cookie)
assert c.version == versions[i]
assert c.name == names[i]
assert c.domain == domains[i]
assert c.path == paths[i]
i = i + 1
self.assertRaises(IndexError, lambda cs=cs : cs[5])
# can't skip
cs[0]
cs[1]
self.assertRaises(IndexError, lambda cs=cs : cs[3])
# can't go backwards
cs[0]
cs[1]
cs[2]
self.assertRaises(IndexError, lambda cs=cs : cs[1])
def test_parse_ns_headers(self):
from mechanize._headersutil import parse_ns_headers
# missing domain value (invalid cookie)
assert parse_ns_headers(["foo=bar; path=/; domain"]) == [
[("foo", "bar"),
("path", "/"), ("domain", None), ("version", "0")]]
# invalid expires value
assert parse_ns_headers(
["foo=bar; expires=Foo Bar 12 33:22:11 2000"]) == \
[[("foo", "bar"), ("expires", None), ("version", "0")]]
# missing cookie name (valid cookie)
assert parse_ns_headers(["foo"]) == [[("foo", None), ("version", "0")]]
# shouldn't add version if header is empty
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
class LWPCookieTests(TestCase):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
assert re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie)
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
assert (cookie.find("Rocket_Launcher_0001") != -1 and
not cookie.find("Riding_Rocket_0023") != -1)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from mechanize import LWPCookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
assert not c
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
assert len(c) == 1
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
assert len(c) == 1
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
assert len(c) == 2
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
assert len(c) == 2
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 2
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 3
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
assert len(c) == 3
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
assert len(c) == 4
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
assert len(c) == 5
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
assert len(c) == 6
# save and restore
- filename = "lwp-cookies.txt"
+ filename = tempfile.mktemp()
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
assert old == repr(c)
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
assert (cookie.find("foo=bar") != -1 and
version_re.search(cookie))
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345")
assert not cookie
# unicode URL doesn't raise exception, as it used to!
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
from mechanize import MozillaCookieJar, DefaultCookiePolicy
year_plus_one = localtime(time.time())[0] + 1
- filename = "cookies.txt"
+ filename = tempfile.mktemp()
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard, filename=filename):
from mechanize import MozillaCookieJar, DefaultCookiePolicy
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
assert len(new_c) == 6 # none discarded
assert repr(new_c).find("name='foo1', value='bar'") != -1
new_c = save_and_restore(c, False)
assert len(new_c) == 4 # 2 of them discarded on save
assert repr(new_c).find("name='foo1', value='bar'") != -1
def test_mozilla_cookiejar_embedded_tab(self):
from mechanize import MozillaCookieJar
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
"a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
"a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
cj.revert(ignore_discard=True)
cookies = cj._cookies["a.com"]["/"]
self.assertEquals(cookies["name"].value, "val\tstillthevalue")
self.assertEquals(cookies["name2"].value, "value")
finally:
try:
os.remove(filename)
except OSError, exc:
if exc.errno != errno.EEXIST:
raise
def test_mozilla_cookiejar_initial_dot_violation(self):
from mechanize import MozillaCookieJar, LoadError
filename = tempfile.mktemp()
fh = open(filename, "w")
try:
fh.write(
MozillaCookieJar.header + "\n" +
".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
fh.close()
cj = MozillaCookieJar(filename)
self.assertRaises(LoadError, cj.revert, ignore_discard=True)
finally:
try:
os.remove(filename)
except OSError, exc:
if exc.errno != errno.EEXIST:
raise
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
from mechanize import CookieJar, Request
c = CookieJar()
headers = []
req = Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
assert (
req.get_header("Cookie").find("PART_NUMBER=3,4") != -1 and
req.get_header("Cookie").find("Customer=WILE_E_COYOTE") != -1)
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
assert cookie.find("foo1=bar") >= 0
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 3
def test_intranet_domains_ns(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
assert len(c) == 2
assert cookie.find("foo1=bar") >= 0
cookie = interact_netscape(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 2
def test_empty_path(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = Request("http://www.ants.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# missing path in the request URI
req = Request("http://www.ants.com:8080")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# The correctness of this test is undefined, in the absence of RFC 2965 errata.
## def test_netscape_rfc2965_interop(self):
## # Test mixing of Set-Cookie and Set-Cookie2 headers.
## from mechanize import CookieJar
## # Example from http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl
## # which gives up these headers:
## #
## # HTTP/1.1 200 OK
## # Connection: close
## # Date: Fri, 20 Jul 2001 19:54:58 GMT
## # Server: Apache/1.3.19 (Unix) ApacheJServ/1.1.2
## # Content-Type: text/html
## # Content-Type: text/html; charset=iso-8859-1
## # Link: </trip/stylesheet.css>; rel="stylesheet"; type="text/css"
## # Servlet-Engine: Tomcat Web Server/3.2.1 (JSP 1.1; Servlet 2.2; Java 1.3.0; SunOS 5.8 sparc; java.vendor=Sun Microsystems Inc.)
## # Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/
## # Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs
## # Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"
## # Title: TRIP.com Travel - FlightTRACKER
## # X-Meta-Description: Trip.com privacy policy
## # X-Meta-Keywords: privacy policy
## req = urllib2.Request(
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## headers = []
## headers.append("Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/")
## headers.append("Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs")
## headers.append('Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"')
## res = FakeResponse(
## headers,
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## #print res
## c = CookieJar()
## c.extract_cookies(res, req)
## #print c
## print str(c)
## print """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
## assert c.as_lwp_str() == """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
def test_session_cookies(self):
from mechanize import CookieJar, Request
year_plus_one = localtime(time.time())[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
assert not (
# a permanent cookie got lost accidently
counter["perm_after"] != counter["perm_before"] or
# a session cookie hasn't been cleared
counter["session_after"] != 0 or
# we didn't have session cookies in the first place
counter["session_before"] == 0)
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
8690d0e13f983368f25b6afa001764d4f0723b38
|
* Handle cookies containing embedded tabs in mozilla format files * Remove an assertion about mozilla format cookies file contents (raise LoadError instead)
|
diff --git a/mechanize/_mozillacookiejar.py b/mechanize/_mozillacookiejar.py
index 8dbdb20..51e81bb 100644
--- a/mechanize/_mozillacookiejar.py
+++ b/mechanize/_mozillacookiejar.py
@@ -1,159 +1,161 @@
"""Mozilla / Netscape cookie loading / saving.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, time, logging
from _clientcookie import reraise_unmasked_exceptions, FileCookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("ClientCookie").debug
class MozillaCookieJar(FileCookieJar):
"""
WARNING: you may want to backup your browser's cookies file if you use
this class to save cookies. I *think* it works, but there have been
bugs in the past!
This class differs from CookieJar only in the format it uses to save and
load cookies to and from a file. This class uses the Mozilla/Netscape
`cookies.txt' format. lynx uses this file format, too.
Don't expect cookies saved while the browser is running to be noticed by
the browser (in fact, Mozilla on unix will overwrite your saved cookies if
you change them on disk while it's running; on Windows, you probably can't
save at all while the browser is running).
Note that the Mozilla/Netscape format will downgrade RFC2965 cookies to
Netscape cookies on saving.
In particular, the cookie version and port number information is lost,
together with information about whether or not Path, Port and Discard were
specified by the Set-Cookie2 (or Set-Cookie) header, and whether or not the
domain as set in the HTTP header started with a dot (yes, I'm aware some
domains in Netscape files start with a dot and some don't -- trust me, you
really don't want to know any more about this).
Note that though Mozilla and Netscape use the same format, they use
slightly different headers. The class saves cookies using the Netscape
header by default (Mozilla can cope with that).
"""
magic_re = "#( Netscape)? HTTP Cookie File"
header = """\
# Netscape HTTP Cookie File
# http://www.netscape.com/newsref/std/cookie_spec.html
# This is a generated file! Do not edit.
"""
def _really_load(self, f, filename, ignore_discard, ignore_expires):
now = time.time()
magic = f.readline()
if not re.search(self.magic_re, magic):
f.close()
raise LoadError(
"%s does not look like a Netscape format cookies file" %
filename)
try:
while 1:
line = f.readline()
if line == "": break
# last field may be absent, so keep any trailing tab
if line.endswith("\n"): line = line[:-1]
# skip comments and blank lines XXX what is $ for?
if (line.strip().startswith("#") or
line.strip().startswith("$") or
line.strip() == ""):
continue
domain, domain_specified, path, secure, expires, name, value = \
- line.split("\t")
+ line.split("\t", 6)
secure = (secure == "TRUE")
domain_specified = (domain_specified == "TRUE")
if name == "":
name = value
value = None
initial_dot = domain.startswith(".")
- assert domain_specified == initial_dot
+ if domain_specified != initial_dot:
+ raise LoadError("domain and domain specified flag don't "
+ "match in %s: %s" % (filename, line))
discard = False
if expires == "":
expires = None
discard = True
# assume path_specified is false
c = Cookie(0, name, value,
None, False,
domain, domain_specified, initial_dot,
path, False,
secure,
expires,
discard,
None,
None,
{})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
self.set_cookie(c)
except:
- reraise_unmasked_exceptions((IOError,))
+ reraise_unmasked_exceptions((IOError, LoadError))
raise LoadError("invalid Netscape format file %s: %s" %
- (filename, line))
+ (filename, line))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename, "w")
try:
debug("Saving Netscape cookies.txt file")
f.write(self.header)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
debug(" Not saving %s: marked for discard", cookie.name)
continue
if not ignore_expires and cookie.is_expired(now):
debug(" Not saving %s: expired", cookie.name)
continue
if cookie.secure: secure = "TRUE"
else: secure = "FALSE"
if cookie.domain.startswith("."): initial_dot = "TRUE"
else: initial_dot = "FALSE"
if cookie.expires is not None:
expires = str(cookie.expires)
else:
expires = ""
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas cookielib regards it as a
# cookie with no value.
name = ""
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
"\t".join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value])+
"\n")
finally:
f.close()
diff --git a/test/test_cookies.py b/test/test_cookies.py
index cbc16fd..06692b0 100644
--- a/test/test_cookies.py
+++ b/test/test_cookies.py
@@ -1,515 +1,515 @@
"""Tests for _ClientCookie."""
-import urllib2, re, os, StringIO, mimetools, time
+import urllib2, re, os, StringIO, mimetools, time, tempfile, errno
from time import localtime
from unittest import TestCase
class FakeResponse:
def __init__(self, headers=[], url=None):
"""
headers: list of RFC822-style 'Key: value' strings
"""
f = StringIO.StringIO("\n".join(headers))
self._headers = mimetools.Message(f)
self._url = url
def info(self): return self._headers
def url(): return self._url
def interact_2965(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie2")
def interact_netscape(cookiejar, url, *set_cookie_hdrs):
return _interact(cookiejar, url, set_cookie_hdrs, "Set-Cookie")
def _interact(cookiejar, url, set_cookie_hdrs, hdr_name):
"""Perform a single request / response cycle, returning Cookie: header."""
from mechanize import Request
req = Request(url)
cookiejar.add_cookie_header(req)
cookie_hdr = req.get_header("Cookie", "")
headers = []
for hdr in set_cookie_hdrs:
headers.append("%s: %s" % (hdr_name, hdr))
res = FakeResponse(headers, url)
cookiejar.extract_cookies(res, req)
return cookie_hdr
class CookieTests(TestCase):
# XXX
# Get rid of string comparisons where not actually testing str / repr.
# .clear() etc.
# IP addresses like 50 (single number, no dot) and domain-matching
# functions (and is_HDN)? See draft RFC 2965 errata.
# Strictness switches
# is_third_party()
# unverifiability / third_party blocking
# Netscape cookies work the same as RFC 2965 with regard to port.
# Set-Cookie with negative max age.
# If turn RFC 2965 handling off, Set-Cookie2 cookies should not clobber
# Set-Cookie cookies.
# Cookie2 should be sent if *any* cookies are not V1 (ie. V0 OR V2 etc.).
# Cookies (V1 and V0) with no expiry date should be set to be discarded.
# RFC 2965 Quoting:
# Should accept unquoted cookie-attribute values? check errata draft.
# Which are required on the way in and out?
# Should always return quoted cookie-attribute values?
# Proper testing of when RFC 2965 clobbers Netscape (waiting for errata).
# Path-match on return (same for V0 and V1).
# RFC 2965 acceptance and returning rules
# Set-Cookie2 without version attribute is rejected.
# Netscape peculiarities list from Ronald Tschalar.
# The first two still need tests, the rest are covered.
## - Quoting: only quotes around the expires value are recognized as such
## (and yes, some folks quote the expires value); quotes around any other
## value are treated as part of the value.
## - White space: white space around names and values is ignored
## - Default path: if no path parameter is given, the path defaults to the
## path in the request-uri up to, but not including, the last '/'. Note
## that this is entirely different from what the spec says.
## - Commas and other delimiters: Netscape just parses until the next ';'.
## This means it will allow commas etc inside values (and yes, both
## commas and equals are commonly appear in the cookie value). This also
## means that if you fold multiple Set-Cookie header fields into one,
## comma-separated list, it'll be a headache to parse (at least my head
## starts hurting everytime I think of that code).
## - Expires: You'll get all sorts of date formats in the expires,
## including emtpy expires attributes ("expires="). Be as flexible as you
## can, and certainly don't expect the weekday to be there; if you can't
## parse it, just ignore it and pretend it's a session cookie.
## - Domain-matching: Netscape uses the 2-dot rule for _all_ domains, not
## just the 7 special TLD's listed in their spec. And folks rely on
## that...
def test_domain_return_ok(self):
# test optimization: .domain_return_ok() should filter out most
# domains in the CookieJar before we try to access them (because that
# may require disk access -- in particular, with MSIECookieJar)
# This is only a rough check for performance reasons, so it's not too
# critical as long as it's sufficiently liberal.
import mechanize
pol = mechanize.DefaultCookiePolicy()
for url, domain, ok in [
("http://foo.bar.com/", "blah.com", False),
("http://foo.bar.com/", "rhubarb.blah.com", False),
("http://foo.bar.com/", "rhubarb.foo.bar.com", False),
("http://foo.bar.com/", ".foo.bar.com", True),
("http://foo.bar.com/", "foo.bar.com", True),
("http://foo.bar.com/", ".bar.com", True),
("http://foo.bar.com/", "com", True),
("http://foo.com/", "rhubarb.foo.com", False),
("http://foo.com/", ".foo.com", True),
("http://foo.com/", "foo.com", True),
("http://foo.com/", "com", True),
("http://foo/", "rhubarb.foo", False),
("http://foo/", ".foo", True),
("http://foo/", "foo", True),
("http://foo/", "foo.local", True),
("http://foo/", ".local", True),
]:
request = mechanize.Request(url)
r = pol.domain_return_ok(domain, request)
if ok: self.assert_(r)
else: self.assert_(not r)
def test_missing_name(self):
from mechanize import MozillaCookieJar, lwp_cookie_str
# missing = sign in Cookie: header is regarded by Mozilla as a missing
# NAME. WE regard it as a missing VALUE.
filename = os.path.abspath("cookies2.txt")
c = MozillaCookieJar(filename)
interact_netscape(c, "http://www.acme.com/", 'eggs')
interact_netscape(c, "http://www.acme.com/", '"spam"; path=/foo/')
cookie = c._cookies["www.acme.com"]["/"]['eggs']
assert cookie.name == "eggs"
assert cookie.value is None
cookie = c._cookies["www.acme.com"]['/foo/']['"spam"']
assert cookie.name == '"spam"'
assert cookie.value is None
assert lwp_cookie_str(cookie) == (
r'"spam"; path="/foo/"; domain="www.acme.com"; '
'path_spec; discard; version=0')
old_str = repr(c)
c.save(ignore_expires=True, ignore_discard=True)
try:
c = MozillaCookieJar(filename)
c.revert(ignore_expires=True, ignore_discard=True)
finally:
os.unlink(c.filename)
# cookies unchanged apart from lost info re. whether path was specified
assert repr(c) == \
re.sub("path_specified=%s" % True, "path_specified=%s" % False,
old_str)
assert interact_netscape(c, "http://www.acme.com/foo/") == \
'"spam"; eggs'
def test_rfc2109_handling(self):
# 2109 cookies have rfc2109 attr set correctly, and are handled
# as 2965 or Netscape cookies depending on policy settings
from mechanize import CookieJar, DefaultCookiePolicy
for policy, version in [
(DefaultCookiePolicy(), 0),
(DefaultCookiePolicy(rfc2965=True), 1),
(DefaultCookiePolicy(rfc2109_as_netscape=True), 0),
(DefaultCookiePolicy(rfc2965=True, rfc2109_as_netscape=True), 0),
]:
c = CookieJar(policy)
interact_netscape(c, "http://www.example.com/", "ni=ni; Version=1")
cookie = c._cookies["www.example.com"]["/"]["ni"]
self.assert_(cookie.rfc2109)
self.assertEqual(cookie.version, version)
def test_ns_parser(self):
from mechanize import CookieJar
from mechanize._clientcookie import DEFAULT_HTTP_PORT
c = CookieJar()
interact_netscape(c, "http://www.acme.com/",
'spam=eggs; DoMain=.acme.com; port; blArgh="feep"')
interact_netscape(c, "http://www.acme.com/", 'ni=ni; port=80,8080')
interact_netscape(c, "http://www.acme.com:80/", 'nini=ni')
interact_netscape(c, "http://www.acme.com:80/", 'foo=bar; expires=')
interact_netscape(c, "http://www.acme.com:80/", 'spam=eggs; '
'expires="Foo Bar 25 33:22:11 3022"')
cookie = c._cookies[".acme.com"]["/"]["spam"]
assert cookie.domain == ".acme.com"
assert cookie.domain_specified
assert cookie.port == DEFAULT_HTTP_PORT
assert not cookie.port_specified
# case is preserved
assert (cookie.has_nonstandard_attr("blArgh") and
not cookie.has_nonstandard_attr("blargh"))
cookie = c._cookies["www.acme.com"]["/"]["ni"]
assert cookie.domain == "www.acme.com"
assert not cookie.domain_specified
assert cookie.port == "80,8080"
assert cookie.port_specified
cookie = c._cookies["www.acme.com"]["/"]["nini"]
assert cookie.port is None
assert not cookie.port_specified
# invalid expires should not cause cookie to be dropped
foo = c._cookies["www.acme.com"]["/"]["foo"]
spam = c._cookies["www.acme.com"]["/"]["foo"]
assert foo.expires is None
assert spam.expires is None
def test_ns_parser_special_names(self):
# names such as 'expires' are not special in first name=value pair
# of Set-Cookie: header
from mechanize import CookieJar
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'expires=eggs')
interact_netscape(c, "http://www.acme.com/", 'version=eggs; spam=eggs')
cookies = c._cookies["www.acme.com"]["/"]
self.assert_(cookies.has_key('expires'))
self.assert_(cookies.has_key('version'))
def test_expires(self):
from mechanize._util import time2netscape
from mechanize import CookieJar
# if expires is in future, keep cookie...
c = CookieJar()
future = time2netscape(time.time()+3600)
interact_netscape(c, "http://www.acme.com/", 'spam="bar"; expires=%s' %
future)
assert len(c) == 1
now = time2netscape(time.time()-1)
# ... and if in past or present, discard it
interact_netscape(c, "http://www.acme.com/", 'foo="eggs"; expires=%s' %
now)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
assert h.find('spam="bar"') != -1 and h.find("foo") == -1
# max-age takes precedence over expires, and zero max-age is request to
# delete both new cookie and any old matching cookie
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; expires=%s' %
future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; expires=%s' %
future)
assert len(c) == 3
interact_netscape(c, "http://www.acme.com/", 'eggs="bar"; '
'expires=%s; max-age=0' % future)
interact_netscape(c, "http://www.acme.com/", 'bar="bar"; '
'max-age=0; expires=%s' % future)
h = interact_netscape(c, "http://www.acme.com/")
assert len(c) == 1
# test expiry at end of session for cookies with no expires attribute
interact_netscape(c, "http://www.rhubarb.net/", 'whum="fizz"')
assert len(c) == 2
c.clear_session_cookies()
assert len(c) == 1
assert h.find('spam="bar"') != -1
# XXX RFC 2965 expiry rules (some apply to V0 too)
def test_default_path(self):
from mechanize import CookieJar, DefaultCookiePolicy
# RFC 2965
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/", 'spam="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah", 'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/")
c = CookieJar(pol)
interact_2965(c, "http://www.acme.com/blah/rhubarb/",
'eggs="bar"; Version="1"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb/")
# Netscape
c = CookieJar()
interact_netscape(c, "http://www.acme.com/", 'spam="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah")
c = CookieJar()
interact_netscape(c, "http://www.acme.com/blah/rhubarb/", 'eggs="bar"')
assert c._cookies["www.acme.com"].has_key("/blah/rhubarb")
def test_escape_path(self):
from mechanize._clientcookie import escape_path
cases = [
# quoted safe
("/foo%2f/bar", "/foo%2F/bar"),
("/foo%2F/bar", "/foo%2F/bar"),
# quoted %
("/foo%%/bar", "/foo%%/bar"),
# quoted unsafe
("/fo%19o/bar", "/fo%19o/bar"),
("/fo%7do/bar", "/fo%7Do/bar"),
# unquoted safe
("/foo/bar&", "/foo/bar&"),
("/foo//bar", "/foo//bar"),
("\176/foo/bar", "\176/foo/bar"),
# unquoted unsafe
("/foo\031/bar", "/foo%19/bar"),
("/\175foo/bar", "/%7Dfoo/bar"),
# unicode
(u"/foo/bar\uabcd", "/foo/bar%EA%AF%8D"), # UTF-8 encoded
]
for arg, result in cases:
self.assert_(escape_path(arg) == result)
def test_request_path(self):
from urllib2 import Request
from mechanize._clientcookie import request_path
# with parameters
req = Request("http://www.example.com/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum;"
"foo=bar;sing=song?apples=pears&spam=eggs#ni")
# without parameters
req = Request("http://www.example.com/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
self.assert_(request_path(req) == "/rheum/rhaponicum?"
"apples=pears&spam=eggs#ni")
# missing final slash
req = Request("http://www.example.com")
self.assert_(request_path(req) == "/")
def test_request_port(self):
from urllib2 import Request
from mechanize._clientcookie import request_port, DEFAULT_HTTP_PORT
req = Request("http://www.acme.com:1234/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == "1234"
req = Request("http://www.acme.com/",
headers={"Host": "www.acme.com:4321"})
assert request_port(req) == DEFAULT_HTTP_PORT
def test_request_host(self):
from mechanize import Request
from mechanize._clientcookie import request_host
# this request is illegal (RFC2616, 14.2.3)
req = Request("http://1.1.1.1/",
headers={"Host": "www.acme.com:80"})
# libwww-perl wants this response, but that seems wrong (RFC 2616,
# section 5.2, point 1., and RFC 2965 section 1, paragraph 3)
#assert request_host(req) == "www.acme.com"
assert request_host(req) == "1.1.1.1"
req = Request("http://www.acme.com/",
headers={"Host": "irrelevant.com"})
assert request_host(req) == "www.acme.com"
# not actually sure this one is valid Request object, so maybe should
# remove test for no host in url in request_host function?
req = Request("/resource.html",
headers={"Host": "www.acme.com"})
assert request_host(req) == "www.acme.com"
# port shouldn't be in request-host
req = Request("http://www.acme.com:2345/resource.html",
headers={"Host": "www.acme.com:5432"})
assert request_host(req) == "www.acme.com"
def test_is_HDN(self):
from mechanize._clientcookie import is_HDN
assert is_HDN("foo.bar.com")
assert is_HDN("1foo2.3bar4.5com")
assert not is_HDN("192.168.1.1")
assert not is_HDN("")
assert not is_HDN(".")
assert not is_HDN(".foo.bar.com")
assert not is_HDN("..foo")
assert not is_HDN("foo.")
def test_reach(self):
from mechanize._clientcookie import reach
assert reach("www.acme.com") == ".acme.com"
assert reach("acme.com") == "acme.com"
assert reach("acme.local") == ".local"
assert reach(".local") == ".local"
assert reach(".com") == ".com"
assert reach(".") == "."
assert reach("") == ""
assert reach("192.168.0.1") == "192.168.0.1"
def test_domain_match(self):
from mechanize._clientcookie import domain_match, user_domain_match
assert domain_match("192.168.1.1", "192.168.1.1")
assert not domain_match("192.168.1.1", ".168.1.1")
assert domain_match("x.y.com", "x.Y.com")
assert domain_match("x.y.com", ".Y.com")
assert not domain_match("x.y.com", "Y.com")
assert domain_match("a.b.c.com", ".c.com")
assert not domain_match(".c.com", "a.b.c.com")
assert domain_match("example.local", ".local")
assert not domain_match("blah.blah", "")
assert not domain_match("", ".rhubarb.rhubarb")
assert domain_match("", "")
assert user_domain_match("acme.com", "acme.com")
assert not user_domain_match("acme.com", ".acme.com")
assert user_domain_match("rhubarb.acme.com", ".acme.com")
assert user_domain_match("www.rhubarb.acme.com", ".acme.com")
assert user_domain_match("x.y.com", "x.Y.com")
assert user_domain_match("x.y.com", ".Y.com")
assert not user_domain_match("x.y.com", "Y.com")
assert user_domain_match("y.com", "Y.com")
assert not user_domain_match(".y.com", "Y.com")
assert user_domain_match(".y.com", ".Y.com")
assert user_domain_match("x.y.com", ".com")
assert not user_domain_match("x.y.com", "com")
assert not user_domain_match("x.y.com", "m")
assert not user_domain_match("x.y.com", ".m")
assert not user_domain_match("x.y.com", "")
assert not user_domain_match("x.y.com", ".")
assert user_domain_match("192.168.1.1", "192.168.1.1")
# not both HDNs, so must string-compare equal to match
assert not user_domain_match("192.168.1.1", ".168.1.1")
assert not user_domain_match("192.168.1.1", ".")
# empty string is a special case
assert not user_domain_match("192.168.1.1", "")
def test_wrong_domain(self):
"""Cookies whose ERH does not domain-match the domain are rejected.
ERH = effective request-host.
"""
# XXX far from complete
from mechanize import CookieJar
c = CookieJar()
interact_2965(c, "http://www.nasty.com/", 'foo=bar; domain=friendly.org; Version="1"')
assert len(c) == 0
def test_strict_domain(self):
# Cookies whose domain is a country-code tld like .co.uk should
# not be set if CookiePolicy.strict_domain is true.
from mechanize import CookieJar, DefaultCookiePolicy
cp = DefaultCookiePolicy(strict_domain=True)
cj = CookieJar(policy=cp)
interact_netscape(cj, "http://example.co.uk/", 'no=problemo')
interact_netscape(cj, "http://example.co.uk/",
'okey=dokey; Domain=.example.co.uk')
self.assertEquals(len(cj), 2)
for pseudo_tld in [".co.uk", ".org.za", ".tx.us", ".name.us"]:
interact_netscape(cj, "http://example.%s/" % pseudo_tld,
'spam=eggs; Domain=.co.uk')
self.assertEquals(len(cj), 2)
# XXXX This should be compared with the Konqueror (kcookiejar.cpp) and
# Mozilla implementations.
def test_two_component_domain_ns(self):
# Netscape: .www.bar.com, www.bar.com, .bar.com, bar.com, no domain should
# all get accepted, as should .acme.com, acme.com and no domain for
# 2-component domains like acme.com.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar()
# two-component V0 domain is OK
interact_netscape(c, "http://foo.net/", 'ns=bar')
assert len(c) == 1
assert c._cookies["foo.net"]["/"]["ns"].value == "bar"
assert interact_netscape(c, "http://foo.net/") == "ns=bar"
# *will* be returned to any other domain (unlike RFC 2965)...
assert interact_netscape(c, "http://www.foo.net/") == "ns=bar"
# ...unless requested otherwise
pol = DefaultCookiePolicy(
strict_ns_domain=DefaultCookiePolicy.DomainStrictNonDomain)
c.set_policy(pol)
assert interact_netscape(c, "http://www.foo.net/") == ""
# unlike RFC 2965, even explicit two-component domain is OK,
# because .foo.net matches foo.net
interact_netscape(c, "http://foo.net/foo/",
'spam1=eggs; domain=foo.net')
# even if starts with a dot -- in NS rules, .foo.net matches foo.net!
interact_netscape(c, "http://foo.net/foo/bar/",
'spam2=eggs; domain=.foo.net')
assert len(c) == 3
assert c._cookies[".foo.net"]["/foo"]["spam1"].value == "eggs"
assert c._cookies[".foo.net"]["/foo/bar"]["spam2"].value == "eggs"
assert interact_netscape(c, "http://foo.net/foo/bar/") == \
"spam2=eggs; spam1=eggs; ns=bar"
# top-level domain is too general
interact_netscape(c, "http://foo.net/", 'nini="ni"; domain=.net')
assert len(c) == 3
## # Netscape protocol doesn't allow non-special top level domains (such
## # as co.uk) in the domain attribute unless there are at least three
## # dots in it.
# Oh yes it does! Real implementations don't check this, and real
# cookies (of course) rely on that behaviour.
interact_netscape(c, "http://foo.co.uk", 'nasty=trick; domain=.co.uk')
## assert len(c) == 2
assert len(c) == 4
def test_two_component_domain_rfc2965(self):
from mechanize import CookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = CookieJar(pol)
# two-component V1 domain is OK
@@ -835,691 +835,731 @@ class CookieTests(TestCase):
assert parse_ns_headers([""]) == []
def test_bad_cookie_header(self):
def cookiejar_from_cookie_headers(headers):
from mechanize import CookieJar, Request
c = CookieJar()
req = Request("http://www.example.com/")
r = FakeResponse(headers, "http://www.example.com/")
c.extract_cookies(r, req)
return c
# none of these bad headers should cause an exception to be raised
for headers in [
["Set-Cookie: "], # actually, nothing wrong with this
["Set-Cookie2: "], # ditto
# missing domain value
["Set-Cookie2: a=foo; path=/; Version=1; domain"],
# bad max-age
["Set-Cookie: b=foo; max-age=oops"],
]:
c = cookiejar_from_cookie_headers(headers)
# these bad cookies shouldn't be set
assert len(c) == 0
# cookie with invalid expires is treated as session cookie
headers = ["Set-Cookie: c=foo; expires=Foo Bar 12 33:22:11 2000"]
c = cookiejar_from_cookie_headers(headers)
cookie = c._cookies["www.example.com"]["/"]["c"]
assert cookie.expires is None
class LWPCookieTests(TestCase):
# Tests taken from libwww-perl, with a few modifications.
def test_netscape_example_1(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
#-------------------------------------------------------------------
# First we check that it works for the original example at
# http://www.netscape.com/newsref/std/cookie_spec.html
# Client requests a document, and receives in the response:
#
# Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE
#
# Client requests a document, and receives in the response:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: SHIPPING=FEDEX; path=/fo
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# When client requests a URL in path "/foo" on this server, it sends:
#
# Cookie: CUSTOMER=WILE_E_COYOTE; PART_NUMBER=ROCKET_LAUNCHER_0001; SHIPPING=FEDEX
#
# The last Cookie is buggy, because both specifications say that the
# most specific cookie must be sent first. SHIPPING=FEDEX is the
# most specific and should thus be first.
year_plus_one = localtime(time.time())[0] + 1
headers = []
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
#req = Request("http://1.1.1.1/",
# headers={"Host": "www.acme.com:80"})
req = Request("http://www.acme.com:80/",
headers={"Host": "www.acme.com:80"})
headers.append(
"Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/ ; "
"expires=Wednesday, 09-Nov-%d 23:12:40 GMT" % year_plus_one)
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "CUSTOMER=WILE_E_COYOTE" and
req.get_header("Cookie2") == '$Version="1"')
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/foo/bar")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1)
headers.append('Set-Cookie: SHIPPING=FEDEX; path=/foo')
res = FakeResponse(headers, "http://www.acme.com")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
not h.find("SHIPPING=FEDEX") != -1)
req = Request("http://www.acme.com/foo/")
c.add_cookie_header(req)
h = req.get_header("Cookie")
assert (h.find("PART_NUMBER=ROCKET_LAUNCHER_0001") != -1 and
h.find("CUSTOMER=WILE_E_COYOTE") != -1 and
h.startswith("SHIPPING=FEDEX;"))
def test_netscape_example_2(self):
from mechanize import CookieJar, Request
# Second Example transaction sequence:
#
# Assume all mappings from above have been cleared.
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/
#
# When client requests a URL in path "/" on this server, it sends:
#
# Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001
#
# Client receives:
#
# Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo
#
# When client requests a URL in path "/ammo" on this server, it sends:
#
# Cookie: PART_NUMBER=RIDING_ROCKET_0023; PART_NUMBER=ROCKET_LAUNCHER_0001
#
# NOTE: There are two name/value pairs named "PART_NUMBER" due to
# the inheritance of the "/" mapping in addition to the "/ammo" mapping.
c = CookieJar()
headers = []
req = Request("http://www.acme.com/")
headers.append("Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "PART_NUMBER=ROCKET_LAUNCHER_0001")
headers.append(
"Set-Cookie: PART_NUMBER=RIDING_ROCKET_0023; path=/ammo")
res = FakeResponse(headers, "http://www.acme.com/")
c.extract_cookies(res, req)
req = Request("http://www.acme.com/ammo")
c.add_cookie_header(req)
assert re.search(r"PART_NUMBER=RIDING_ROCKET_0023;\s*"
"PART_NUMBER=ROCKET_LAUNCHER_0001",
req.get_header("Cookie"))
def test_ietf_example_1(self):
from mechanize import CookieJar, DefaultCookiePolicy
#-------------------------------------------------------------------
# Then we test with the examples from draft-ietf-http-state-man-mec-03.txt
#
# 5. EXAMPLES
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
#
# 5.1 Example 1
#
# Most detail of request and response headers has been omitted. Assume
# the user agent has no stored cookies.
#
# 1. User Agent -> Server
#
# POST /acme/login HTTP/1.1
# [form data]
#
# User identifies self via a form.
#
# 2. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"
#
# Cookie reflects user's identity.
cookie = interact_2965(
c, 'http://www.acme.com/acme/login',
'Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
assert not cookie
#
# 3. User Agent -> Server
#
# POST /acme/pickitem HTTP/1.1
# Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"
# [form data]
#
# User selects an item for ``shopping basket.''
#
# 4. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# Shopping basket contains an item.
cookie = interact_2965(c, 'http://www.acme.com/acme/pickitem',
'Part_Number="Rocket_Launcher_0001"; '
'Version="1"; Path="/acme"');
assert re.search(
r'^\$Version="?1"?; Customer="?WILE_E_COYOTE"?; \$Path="/acme"$',
cookie)
#
# 5. User Agent -> Server
#
# POST /acme/shipping HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
# [form data]
#
# User selects shipping method from form.
#
# 6. Server -> User Agent
#
# HTTP/1.1 200 OK
# Set-Cookie2: Shipping="FedEx"; Version="1"; Path="/acme"
#
# New cookie reflects shipping method.
cookie = interact_2965(c, "http://www.acme.com/acme/shipping",
'Shipping="FedEx"; Version="1"; Path="/acme"')
assert (re.search(r'^\$Version="?1"?;', cookie) and
re.search(r'Part_Number="?Rocket_Launcher_0001"?;'
'\s*\$Path="\/acme"', cookie) and
re.search(r'Customer="?WILE_E_COYOTE"?;\s*\$Path="\/acme"',
cookie))
#
# 7. User Agent -> Server
#
# POST /acme/process HTTP/1.1
# Cookie: $Version="1";
# Customer="WILE_E_COYOTE"; $Path="/acme";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme";
# Shipping="FedEx"; $Path="/acme"
# [form data]
#
# User chooses to process order.
#
# 8. Server -> User Agent
#
# HTTP/1.1 200 OK
#
# Transaction is complete.
cookie = interact_2965(c, "http://www.acme.com/acme/process")
assert (re.search(r'Shipping="?FedEx"?;\s*\$Path="\/acme"', cookie) and
cookie.find("WILE_E_COYOTE") != -1)
#
# The user agent makes a series of requests on the origin server, after
# each of which it receives a new cookie. All the cookies have the same
# Path attribute and (default) domain. Because the request URLs all have
# /acme as a prefix, and that matches the Path attribute, each request
# contains all the cookies received so far.
def test_ietf_example_2(self):
from mechanize import CookieJar, DefaultCookiePolicy
# 5.2 Example 2
#
# This example illustrates the effect of the Path attribute. All detail
# of request and response headers has been omitted. Assume the user agent
# has no stored cookies.
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
# Imagine the user agent has received, in response to earlier requests,
# the response headers
#
# Set-Cookie2: Part_Number="Rocket_Launcher_0001"; Version="1";
# Path="/acme"
#
# and
#
# Set-Cookie2: Part_Number="Riding_Rocket_0023"; Version="1";
# Path="/acme/ammo"
interact_2965(
c, "http://www.acme.com/acme/ammo/specific",
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"',
'Part_Number="Riding_Rocket_0023"; Version="1"; Path="/acme/ammo"')
# A subsequent request by the user agent to the (same) server for URLs of
# the form /acme/ammo/... would include the following request header:
#
# Cookie: $Version="1";
# Part_Number="Riding_Rocket_0023"; $Path="/acme/ammo";
# Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Note that the NAME=VALUE pair for the cookie with the more specific Path
# attribute, /acme/ammo, comes before the one with the less specific Path
# attribute, /acme. Further note that the same cookie name appears more
# than once.
cookie = interact_2965(c, "http://www.acme.com/acme/ammo/...")
assert re.search(r"Riding_Rocket_0023.*Rocket_Launcher_0001", cookie)
# A subsequent request by the user agent to the (same) server for a URL of
# the form /acme/parts/ would include the following request header:
#
# Cookie: $Version="1"; Part_Number="Rocket_Launcher_0001"; $Path="/acme"
#
# Here, the second cookie's Path attribute /acme/ammo is not a prefix of
# the request URL, /acme/parts/, so the cookie does not get forwarded to
# the server.
cookie = interact_2965(c, "http://www.acme.com/acme/parts/")
assert (cookie.find("Rocket_Launcher_0001") != -1 and
not cookie.find("Riding_Rocket_0023") != -1)
def test_rejection(self):
# Test rejection of Set-Cookie2 responses based on domain, path, port.
from mechanize import LWPCookieJar, DefaultCookiePolicy
pol = DefaultCookiePolicy(rfc2965=True)
c = LWPCookieJar(policy=pol)
max_age = "max-age=3600"
# illegal domain (no embedded dots)
cookie = interact_2965(c, "http://www.acme.com",
'foo=bar; domain=".com"; version=1')
assert not c
# legal domain
cookie = interact_2965(c, "http://www.acme.com",
'ping=pong; domain="acme.com"; version=1')
assert len(c) == 1
# illegal domain (host prefix "www.a" contains a dot)
cookie = interact_2965(c, "http://www.a.acme.com",
'whiz=bang; domain="acme.com"; version=1')
assert len(c) == 1
# legal domain
cookie = interact_2965(c, "http://www.a.acme.com",
'wow=flutter; domain=".a.acme.com"; version=1')
assert len(c) == 2
# can't partially match an IP-address
cookie = interact_2965(c, "http://125.125.125.125",
'zzzz=ping; domain="125.125.125"; version=1')
assert len(c) == 2
# illegal path (must be prefix of request path)
cookie = interact_2965(c, "http://www.sol.no",
'blah=rhubarb; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 2
# legal path
cookie = interact_2965(c, "http://www.sol.no/foo/bar",
'bing=bong; domain=".sol.no"; path="/foo"; '
'version=1')
assert len(c) == 3
# illegal port (request-port not in list)
cookie = interact_2965(c, "http://www.sol.no",
'whiz=ffft; domain=".sol.no"; port="90,100"; '
'version=1')
assert len(c) == 3
# legal port
cookie = interact_2965(
c, "http://www.sol.no",
r'bang=wallop; version=1; domain=".sol.no"; '
r'port="90,100, 80,8080"; '
r'max-age=100; Comment = "Just kidding! (\"|\\\\) "')
assert len(c) == 4
# port attribute without any value (current port)
cookie = interact_2965(c, "http://www.sol.no",
'foo9=bar; version=1; domain=".sol.no"; port; '
'max-age=100;')
assert len(c) == 5
# encoded path
# LWP has this test, but unescaping allowed path characters seems
# like a bad idea, so I think this should fail:
## cookie = interact_2965(c, "http://www.sol.no/foo/",
## r'foo8=bar; version=1; path="/%66oo"')
# but this is OK, because '<' is not an allowed HTTP URL path
# character:
cookie = interact_2965(c, "http://www.sol.no/<oo/",
r'foo8=bar; version=1; path="/%3coo"')
assert len(c) == 6
# save and restore
filename = "lwp-cookies.txt"
try:
c.save(filename, ignore_discard=True)
old = repr(c)
c = LWPCookieJar(policy=pol)
c.load(filename, ignore_discard=True)
finally:
try: os.unlink(filename)
except OSError: pass
assert old == repr(c)
def test_url_encoding(self):
# Try some URL encodings of the PATHs.
# (the behaviour here has changed from libwww-perl)
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/foo%2f%25/%3c%3c%0Anew%E5/%E5",
"foo = bar; version = 1")
cookie = interact_2965(
c, "http://www.acme.com/foo%2f%25/<<%0anew\345/\346\370\345",
'bar=baz; path="/foo/"; version=1');
version_re = re.compile(r'^\$version=\"?1\"?', re.I)
assert (cookie.find("foo=bar") != -1 and
version_re.search(cookie))
cookie = interact_2965(
c, "http://www.acme.com/foo/%25/<<%0anew\345/\346\370\345")
assert not cookie
# unicode URL doesn't raise exception, as it used to!
cookie = interact_2965(c, u"http://www.acme.com/\xfc")
def test_mozilla(self):
# Save / load Mozilla/Netscape cookie file format.
from mechanize import MozillaCookieJar, DefaultCookiePolicy
year_plus_one = localtime(time.time())[0] + 1
filename = "cookies.txt"
c = MozillaCookieJar(filename,
policy=DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://www.acme.com/",
"foo1=bar; max-age=100; Version=1")
interact_2965(c, "http://www.acme.com/",
'foo2=bar; port="80"; max-age=100; Discard; Version=1')
interact_2965(c, "http://www.acme.com/", "foo3=bar; secure; Version=1")
expires = "expires=09-Nov-%d 23:12:40 GMT" % (year_plus_one,)
interact_netscape(c, "http://www.foo.com/",
"fooa=bar; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"foob=bar; Domain=.foo.com; %s" % expires)
interact_netscape(c, "http://www.foo.com/",
"fooc=bar; Domain=www.foo.com; %s" % expires)
def save_and_restore(cj, ignore_discard, filename=filename):
from mechanize import MozillaCookieJar, DefaultCookiePolicy
try:
cj.save(ignore_discard=ignore_discard)
new_c = MozillaCookieJar(filename,
DefaultCookiePolicy(rfc2965=True))
new_c.load(ignore_discard=ignore_discard)
finally:
try: os.unlink(filename)
except OSError: pass
return new_c
new_c = save_and_restore(c, True)
assert len(new_c) == 6 # none discarded
assert repr(new_c).find("name='foo1', value='bar'") != -1
new_c = save_and_restore(c, False)
assert len(new_c) == 4 # 2 of them discarded on save
assert repr(new_c).find("name='foo1', value='bar'") != -1
+ def test_mozilla_cookiejar_embedded_tab(self):
+ from mechanize import MozillaCookieJar
+ filename = tempfile.mktemp()
+ fh = open(filename, "w")
+ try:
+ fh.write(
+ MozillaCookieJar.header + "\n" +
+ "a.com\tFALSE\t/\tFALSE\t\tname\tval\tstillthevalue\n"
+ "a.com\tFALSE\t/\tFALSE\t\tname2\tvalue\n")
+ fh.close()
+ cj = MozillaCookieJar(filename)
+ cj.revert(ignore_discard=True)
+ cookies = cj._cookies["a.com"]["/"]
+ self.assertEquals(cookies["name"].value, "val\tstillthevalue")
+ self.assertEquals(cookies["name2"].value, "value")
+ finally:
+ try:
+ os.remove(filename)
+ except OSError, exc:
+ if exc.errno != errno.EEXIST:
+ raise
+
+ def test_mozilla_cookiejar_initial_dot_violation(self):
+ from mechanize import MozillaCookieJar, LoadError
+ filename = tempfile.mktemp()
+ fh = open(filename, "w")
+ try:
+ fh.write(
+ MozillaCookieJar.header + "\n" +
+ ".a.com\tFALSE\t/\tFALSE\t\tname\tvalue\n")
+ fh.close()
+ cj = MozillaCookieJar(filename)
+ self.assertRaises(LoadError, cj.revert, ignore_discard=True)
+ finally:
+ try:
+ os.remove(filename)
+ except OSError, exc:
+ if exc.errno != errno.EEXIST:
+ raise
+
def test_netscape_misc(self):
# Some additional Netscape cookies tests.
from mechanize import CookieJar, Request
c = CookieJar()
headers = []
req = Request("http://foo.bar.acme.com/foo")
# Netscape allows a host part that contains dots
headers.append("Set-Cookie: Customer=WILE_E_COYOTE; domain=.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
# and that the domain is the same as the host without adding a leading
# dot to the domain. Should not quote even if strange chars are used
# in the cookie value.
headers.append("Set-Cookie: PART_NUMBER=3,4; domain=foo.bar.acme.com")
res = FakeResponse(headers, "http://www.acme.com/foo")
c.extract_cookies(res, req)
req = Request("http://foo.bar.acme.com/foo")
c.add_cookie_header(req)
assert (
req.get_header("Cookie").find("PART_NUMBER=3,4") != -1 and
req.get_header("Cookie").find("Customer=WILE_E_COYOTE") != -1)
def test_intranet_domains_2965(self):
# Test handling of local intranet hostnames without a dot.
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965=True))
interact_2965(c, "http://example/",
"foo1=bar; PORT; Discard; Version=1;")
cookie = interact_2965(c, "http://example/",
'foo2=bar; domain=".local"; Version=1')
assert cookie.find("foo1=bar") >= 0
interact_2965(c, "http://example/", 'foo3=bar; Version=1')
cookie = interact_2965(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 3
def test_intranet_domains_ns(self):
from mechanize import CookieJar, DefaultCookiePolicy
c = CookieJar(DefaultCookiePolicy(rfc2965 = False))
interact_netscape(c, "http://example/", "foo1=bar")
cookie = interact_netscape(c, "http://example/",
'foo2=bar; domain=.local')
assert len(c) == 2
assert cookie.find("foo1=bar") >= 0
cookie = interact_netscape(c, "http://example/")
assert cookie.find("foo2=bar") >= 0 and len(c) == 2
def test_empty_path(self):
from mechanize import CookieJar, Request, DefaultCookiePolicy
# Test for empty path
# Broken web-server ORION/1.3.38 returns to the client response like
#
# Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=
#
# ie. with Path set to nothing.
# In this case, extract_cookies() must set cookie to / (root)
c = CookieJar(DefaultCookiePolicy(rfc2965 = True))
headers = []
req = Request("http://www.ants.com/")
headers.append("Set-Cookie: JSESSIONID=ABCDERANDOM123; Path=")
res = FakeResponse(headers, "http://www.ants.com/")
c.extract_cookies(res, req)
req = Request("http://www.ants.com/")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# missing path in the request URI
req = Request("http://www.ants.com:8080")
c.add_cookie_header(req)
assert (req.get_header("Cookie") == "JSESSIONID=ABCDERANDOM123" and
req.get_header("Cookie2") == '$Version="1"')
# The correctness of this test is undefined, in the absence of RFC 2965 errata.
## def test_netscape_rfc2965_interop(self):
## # Test mixing of Set-Cookie and Set-Cookie2 headers.
## from mechanize import CookieJar
## # Example from http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl
## # which gives up these headers:
## #
## # HTTP/1.1 200 OK
## # Connection: close
## # Date: Fri, 20 Jul 2001 19:54:58 GMT
## # Server: Apache/1.3.19 (Unix) ApacheJServ/1.1.2
## # Content-Type: text/html
## # Content-Type: text/html; charset=iso-8859-1
## # Link: </trip/stylesheet.css>; rel="stylesheet"; type="text/css"
## # Servlet-Engine: Tomcat Web Server/3.2.1 (JSP 1.1; Servlet 2.2; Java 1.3.0; SunOS 5.8 sparc; java.vendor=Sun Microsystems Inc.)
## # Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/
## # Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs
## # Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"
## # Title: TRIP.com Travel - FlightTRACKER
## # X-Meta-Description: Trip.com privacy policy
## # X-Meta-Keywords: privacy policy
## req = urllib2.Request(
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## headers = []
## headers.append("Set-Cookie: trip.appServer=1111-0000-x-024;Domain=.trip.com;Path=/")
## headers.append("Set-Cookie: JSESSIONID=fkumjm7nt1.JS24;Path=/trs")
## headers.append('Set-Cookie2: JSESSIONID=fkumjm7nt1.JS24;Version=1;Discard;Path="/trs"')
## res = FakeResponse(
## headers,
## 'http://www.trip.com/trs/trip/flighttracker/flight_tracker_home.xsl')
## #print res
## c = CookieJar()
## c.extract_cookies(res, req)
## #print c
## print str(c)
## print """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
## assert c.as_lwp_str() == """Set-Cookie3: trip.appServer="1111-0000-x-024"; path="/"; domain=".trip.com"; path_spec; discard; version=0
## Set-Cookie3: JSESSIONID="fkumjm7nt1.JS24"; path="/trs"; domain="www.trip.com"; path_spec; discard; version=1
## """
def test_session_cookies(self):
from mechanize import CookieJar, Request
year_plus_one = localtime(time.time())[0] + 1
# Check session cookies are deleted properly by
# CookieJar.clear_session_cookies method
req = Request('http://www.perlmeister.com/scripts')
headers = []
headers.append("Set-Cookie: s1=session;Path=/scripts")
headers.append("Set-Cookie: p1=perm; Domain=.perlmeister.com;"
"Path=/;expires=Fri, 02-Feb-%d 23:24:20 GMT" %
year_plus_one)
headers.append("Set-Cookie: p2=perm;Path=/;expires=Fri, "
"02-Feb-%d 23:24:20 GMT" % year_plus_one)
headers.append("Set-Cookie: s2=session;Path=/scripts;"
"Domain=.perlmeister.com")
headers.append('Set-Cookie2: s3=session;Version=1;Discard;Path="/"')
res = FakeResponse(headers, 'http://www.perlmeister.com/scripts')
c = CookieJar()
c.extract_cookies(res, req)
# How many session/permanent cookies do we have?
counter = {"session_after": 0,
"perm_after": 0,
"session_before": 0,
"perm_before": 0}
for cookie in c:
key = "%s_before" % cookie.value
counter[key] = counter[key] + 1
c.clear_session_cookies()
# How many now?
for cookie in c:
key = "%s_after" % cookie.value
counter[key] = counter[key] + 1
assert not (
# a permanent cookie got lost accidently
counter["perm_after"] != counter["perm_before"] or
# a session cookie hasn't been cleared
counter["session_after"] != 0 or
# we didn't have session cookies in the first place
counter["session_before"] == 0)
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
4cd674839fba5d345d0e12cc1f5330a6edccfb03
|
Avoid running doctest files that need special context by naming convention rather than special-purpose code
|
diff --git a/test.py b/test.py
index 31d9363..fa5d0f6 100755
--- a/test.py
+++ b/test.py
@@ -1,159 +1,146 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
]
import sys, os, logging, glob
#level = logging.DEBUG
#level = logging.INFO
#level = logging.WARNING
#level = logging.NOTSET
#logging.getLogger("mechanize").setLevel(level)
#logging.getLogger("mechanize").addHandler(logging.StreamHandler(sys.stdout))
if __name__ == "__main__":
# XXX
# temporary stop-gap to run doctests &c.
# should switch to nose or something
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
import testprogram
if run_coverage:
import coverage
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
pm_doctest_filename = os.path.join(
- "test", "test_password_manager.doctest")
+ "test", "test_password_manager.special_doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
- doctest.testfile(
- pm_doctest_filename,
- #os.path.join("test", "test_scratch.doctest"),
- globs=globs,
- )
+ doctest.testfile(pm_doctest_filename, globs=globs)
try:
import robotparser
except ImportError:
pass
else:
- doctest.testfile(os.path.join("test",
- "test_robotfileparser.doctest"))
+ doctest.testfile(os.path.join(
+ "test", "test_robotfileparser.special_doctest"))
# run .doctest files
- special_doctests = [pm_doctest_filename,
- os.path.join("test", "test_scratch.doctest"),
- os.path.join("test",
- "test_robotfileparser.doctest"),
- ]
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
-
- for dt in special_doctests:
- if dt in doctest_files:
- doctest_files.remove(dt)
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
_http, _rfc3986, _useragent
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
doctest.testmod(_useragent)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
test_runner = testprogram.CgitbTextTestRunner()
prog = testprogram.TestProgram(
MODULE_NAMES,
testRunner=test_runner,
localServerProcess=testprogram.TwistedServerProcess(),
)
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
diff --git a/test/test_password_manager.doctest b/test/test_password_manager.special_doctest
similarity index 100%
rename from test/test_password_manager.doctest
rename to test/test_password_manager.special_doctest
diff --git a/test/test_robotfileparser.doctest b/test/test_robotfileparser.special_doctest
similarity index 100%
rename from test/test_robotfileparser.doctest
rename to test/test_robotfileparser.special_doctest
|
Almad/Mechanize
|
e82a08010a9a9cd98f82f9be44ab75b20531f436
|
Minor doc fixes
|
diff --git a/doc.html.in b/doc.html.in
index 607e86d..3af51c6 100644
--- a/doc.html.in
+++ b/doc.html.in
@@ -135,814 +135,814 @@ means), you don't need to call <code>.extract_cookies()</code> or
<code>.add_cookie_header()</code> yourself</strong>. If, on the other hand,
you don't want to use <code>urllib2</code>, you will need to use this pair of
methods. You can make your own <code>request</code> and <code>response</code>
objects, which must support the interfaces described in the docstrings of
<code>.extract_cookies()</code> and <code>.add_cookie_header()</code>.
<p>There are also some <code>CookieJar</code> subclasses which can store
cookies in files and databases. <code>FileCookieJar</code> is the abstract
class for <code>CookieJar</code>s that can store cookies in disk files.
<code>LWPCookieJar</code> saves cookies in a format compatible with the
libwww-perl library. This class is convenient if you want to store cookies in
a human-readable file:
@{colorize(r"""
import mechanize
cj = mechanize.LWPCookieJar()
cj.revert("cookie3.txt")
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
r = opener.open("http://foobar.com/")
cj.save("cookie3.txt")
""")}
<p>The <code>.revert()</code> method discards all existing cookies held by the
<code>CookieJar</code> (it won't lose any existing cookies if the load fails).
The <code>.load()</code> method, on the other hand, adds the loaded cookies to
existing cookies held in the <code>CookieJar</code> (old cookies are kept
unless overwritten by newly loaded ones).
<p><code>MozillaCookieJar</code> can load and save to the
Mozilla/Netscape/lynx-compatible <code>'cookies.txt'</code> format. This
format loses some information (unusual and nonstandard cookie attributes such
as comment, and also information specific to RFC 2965 cookies). The subclass
<code>MSIECookieJar</code> can load (but not save, yet) from Microsoft Internet
Explorer's cookie files (on Windows). <code>BSDDBCookieJar</code> (NOT FULLY
TESTED!) saves to a BSDDB database using the standard library's
<code>bsddb</code> module. There's an unfinished <code>MSIEDBCookieJar</code>,
which uses (reads and writes) the Windows MSIE cookie database directly, rather
than storing copies of cookies as <code>MSIECookieJar</code> does.
<h2>Important note</h2>
<p>Only use names you can import directly from the <code>mechanize</code>
package, and that don't start with a single underscore. Everything else is
subject to change or disappearance without notice.
<a name="browsers"></a>
<h2>Cooperating with Mozilla/Netscape, lynx and Internet Explorer</h2>
<p>The subclass <code>MozillaCookieJar</code> differs from
<code>CookieJar</code> only in storing cookies using a different,
Mozilla/Netscape-compatible, file format. The lynx browser also uses this
format. This file format can't store RFC 2965 cookies, so they are downgraded
to Netscape cookies on saving. <code>LWPCookieJar</code> itself uses a
libwww-perl specific format (`Set-Cookie3') - see the example above. Python
and your browser should be able to share a cookies file (note that the file
location here will differ on non-unix OSes):
<p><strong>WARNING:</strong> you may want to backup your browser's cookies file
if you use <code>MozillaCookieJar</code> to save cookies. I <em>think</em> it
works, but there have been bugs in the past!
@{colorize(r"""
import os, mechanize
cookies = mechanize.MozillaCookieJar()
cookies.load(os.path.join(os.environ["HOME"], "/.netscape/cookies.txt"))
# see also the save and revert methods
""")}
<p>Note that cookies saved while Mozilla is running will get clobbered by
Mozilla - see <code>MozillaCookieJar.__doc__</code>.
<p><code>MSIECookieJar</code> does the same for Microsoft Internet Explorer
(MSIE) 5.x and 6.x on Windows, but does not allow saving cookies in this
format. In future, the Windows API calls might be used to load and save
(though the index has to be read directly, since there is no API for that,
AFAIK; there's also an unfinished <code>MSIEDBCookieJar</code>, which uses
(reads and writes) the Windows MSIE cookie database directly, rather than
storing copies of cookies as <code>MSIECookieJar</code> does).
@{colorize(r"""
import mechanize
cj = mechanize.MSIECookieJar(delayload=True)
cj.load_from_registry() # finds cookie index file from registry
""")}
<p>A true <code>delayload</code> argument speeds things up.
<p>On Windows 9x (win 95, win 98, win ME), you need to supply a username to the
<code>.load_from_registry()</code> method:
@{colorize(r"""
cj.load_from_registry(username="jbloggs")
""")}
<p>Konqueror/Safari and Opera use different file formats, which aren't yet
supported.
<a name="file"></a>
<h2>Saving cookies in a file</h2>
<p>If you have no need to co-operate with a browser, the most convenient way to
save cookies on disk between sessions in human-readable form is to use
<code>LWPCookieJar</code>. This class uses a libwww-perl specific format
(`Set-Cookie3'). Unlike <code>MozilliaCookieJar</code>, this file format
doesn't lose information.
<a name="cookiejar"></a>
<h2>Using your own CookieJar instance</h2>
<p>You might want to do this to <a href="./doc.html#browsers">use your
browser's cookies</a>, to customize <code>CookieJar</code>'s behaviour by
passing constructor arguments, or to be able to get at the cookies it will hold
(for example, for saving cookies between sessions and for debugging).
<p>If you're using the higher-level <code>urllib2</code>-like interface
(<code>urlopen()</code>, etc), you'll have to let it know what
<code>CookieJar</code> it should use:
@{colorize(r"""
import mechanize
cookies = mechanize.CookieJar()
# build_opener() adds standard handlers (such as HTTPHandler and
# HTTPCookieProcessor) by default. The cookie processor we supply
# will replace the default one.
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cookies))
r = opener.open("http://acme.com/") # GET
r = opener.open("http://acme.com/", data) # POST
""")}
<p>The <code>urlopen()</code> function uses a global
<code>OpenerDirector</code> instance to do its work, so if you want to use
<code>urlopen()</code> with your own <code>CookieJar</code>, install the
<code>OpenerDirector</code> you built with <code>build_opener()</code> using
the <code>mechanize.install_opener()</code> function, then proceed as usual:
@{colorize(r"""
mechanize.install_opener(opener)
r = mechanize.urlopen("http://www.acme.com/")
""")}
<p>Of course, everyone using <code>urlopen</code> is using the same global
<code>CookieJar</code> instance!
<a name="policy"></a>
<p>You can set a policy object (must satisfy the interface defined by
<code>mechanize.CookiePolicy</code>), which determines which cookies are
allowed to be set and returned. Use the policy argument to the
<code>CookieJar</code> constructor, or use the .set_policy() method. The
default implementation has some useful switches:
@{colorize(r"""
from mechanize import CookieJar, DefaultCookiePolicy as Policy
cookies = CookieJar()
# turn on RFC 2965 cookies, be more strict about domains when setting and
# returning Netscape cookies, and block some domains from setting cookies
# or having them returned (read the DefaultCookiePolicy docstring for the
# domain matching rules here)
policy = Policy(rfc2965=True, strict_ns_domain=Policy.DomainStrict,
blocked_domains=["ads.net", ".ads.net"])
cookies.set_policy(policy)
""")}
<a name="extras"></a>
<h2>Optional extras: robots.txt, HTTP-EQUIV, Refresh, Referer</h2>
<p>These are implemented as processor classes. Processors are an extension of
<code>urllib2</code>'s handlers (now a standard part of urllib2 in Python 2.4):
you just pass them to <code>build_opener()</code> (example code below).
<dl>
<dt><code>HTTPRobotRulesProcessor</code>
<dd><p>WWW Robots (also called wanderers or spiders) are programs that traverse
many pages in the World Wide Web by recursively retrieving linked pages. This
kind of program can place significant loads on web servers, so there is a <a
href="http://www.robotstxt.org/wc/norobots.html">standard</a> for a <code>
robots.txt</code> file by which web site operators can request robots to keep
out of their site, or out of particular areas of it. This processor uses the
standard Python library's <code>robotparser</code> module. It raises
<code>mechanize.RobotExclusionError</code> (subclass of
<code>urllib2.HTTPError</code>) if an attempt is made to open a URL prohibited
by <code>robots.txt</code>. XXX ATM, this makes use of code in the
<code>robotparser</code> module that uses <code>urllib</code> - this will
likely change in future to use <code>urllib2</code>.
<dt><code>HTTPEquivProcessor</code>
<dd><p>The <code><META HTTP-EQUIV></code> tag is a way of including data
in HTML to be treated as if it were part of the HTTP headers. mechanize can
automatically read these tags and add the <code>HTTP-EQUIV</code> headers to
the response object's real HTTP headers. The HTML is left unchanged.
<dt><code>HTTPRefreshProcessor</code>
<dd><p>The <code>Refresh</code> HTTP header is a non-standard header which is
widely used. It requests that the user-agent follow a URL after a specified
time delay. mechanize can treat these headers (which may have been set in
<code><META HTTP-EQUIV></code> tags) as if they were 302 redirections.
Exactly when and how <code>Refresh</code> headers are handled is configurable
using the constructor arguments.
<dt><code>HTTPRefererProcessor</code>
<dd><p>The <code>Referer</code> HTTP header lets the server know which URL
you've just visited. Some servers use this header as state information, and
don't like it if this is not present. It's a chore to add this header by hand
every time you make a request. This adds it automatically.
<strong>NOTE</strong>: this only makes sense if you use each processor for a
single chain of HTTP requests (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single page,
<strong>this will break</strong>). <a
href="../mechanize/">mechanize.Browser</a> does this properly.</p>
</dl>
@{colorize(r"""
import mechanize
cookies = mechanize.CookieJar()
opener = mechanize.build_opener(mechanize.HTTPRefererProcessor,
mechanize.HTTPEquivProcessor,
mechanize.HTTPRefreshProcessor,
)
opener.open("http://www.rhubarb.com/")
""")}
<a name="seekable"></a>
<h2>Seekable responses</h2>
<p>Response objects returned from (or raised as exceptions by)
<code>mechanize.SeekableResponseOpener</code>, <code>mechanize.UserAgent</code>
(if <code>.set_seekable_responses(True)</code> has been called) and
<code>mechanize.Browser()</code> have <code>.seek()</code>,
<code>.get_data()</code> and <code>.set_data()</code> methods:
@{colorize(r"""
import mechanize
opener = mechanize.OpenerFactory(mechanize.SeekableResponseOpener).build_opener()
response = opener.open("http://example.com/")
# same return value as .read(), but without affecting seek position
total_nr_bytes = len(response.get_data())
assert len(response.read()) == total_nr_bytes
assert len(response.read()) == 0 # we've already read the data
response.seek(0)
assert len(response.read()) == total_nr_bytes
response.set_data("blah\n")
assert response.get_data() == "blah\n"
...
""")}
<p>This caching behaviour can be avoided by using
<code>mechanize.OpenerDirector</code> (as long as
<code>SeekableProcessor</code>, <code>HTTPEquivProcessor</code> and
<code>HTTPResponseDebugProcessor</code> are not used). It can also be avoided
with <code>mechanize.UserAgent</code>:
@{colorize(r"""
import mechanize
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
ua.set_debug_responses(False)
""")}
<p>Note that if you turn on features that use seekable responses (currently:
HTTP-EQUIV handling and response body debug printing), returned responses
<em>may</em> be seekable as a side-effect of these features. However, this is
not guaranteed (currently, in these cases, returned response objects are
seekable, but raised respose objects — <code>mechanize.HTTPError</code>
instances — are not seekable). This applies regardless of whether you
use <code>mechanize.UserAgent</code> or <code>mechanize.OpenerDirector</code>.
If you explicitly request seekable responses by calling
<code>.set_seekable_responses(True)</code> on a
<code>mechanize.UserAgent</code> instance, or by using
<code>mechanize.Browser</code> or
<code>mechanize.SeekableResponseOpener</code>, which always return seekable
responses, then both returned and raised responses are guaranteed to be
seekable.
<p>Handlers should call <code>response =
mechanize.seek_wrapped_response(response)</code> if they require the
<code>.seek()</code>, <code>.get_data()</code> or <code>.set_data()</code>
methods.
<p>Note that <code>SeekableProcessor</code> (and
<code>ResponseUpgradeProcessor</code>) are deprecated since mechanize 0.1.6b.
The reason for the deprecation is that these were really abuses of the response
processing chain (the <code>.process_response()</code> support documented by
urllib2). The response processing chain is sensibly used only for processing
response headers and data, not for processing response <em>objects</em>,
because the same data may occur as different Python objects (this can occur for
example when <code>HTTPError</code> is raised by
<code>HTTPDefaultErrorHandler</code>), but should only get processed once
(during <code>.open()</code>).
<a name="requests"></a>
<h2>Confusing fact about headers and Requests</h2>
<p>mechanize automatically upgrades <code>urllib2.Request</code> objects to
<code>mechanize.Request</code>, as a backwards-compatibility hack. This
means that you won't see any headers that are added to Request objects by
handlers unless you use <code>mechanize.Request</code> in the first place.
Sorry about that.
<p>Note also that handlers may create new <code>Request</code> instances (for
example when performing redirects) rather than adding headers to existing
<code>Request objects</code>.
<a name="headers"></a>
<h2>Adding headers</h2>
<p>Adding headers is done like so:
@{colorize(r"""
import mechanize, urllib2
req = urllib2.Request("http://foobar.com/")
req.add_header("Referer", "http://wwwsearch.sourceforge.net/mechanize/")
r = mechanize.urlopen(req)
""")}
<p>You can also use the headers argument to the <code>urllib2.Request</code>
constructor.
<p><code>urllib2</code> (in fact, mechanize takes over this task from
<code>urllib2</code>) adds some headers to <code>Request</code> objects
automatically - see the next section for details.
<h2>Changing the automatically-added headers (User-Agent)</h2>
<p><code>OpenerDirector</code> automatically adds a <code>User-Agent</code>
header to every <code>Request</code>.
<p>To change this and/or add similar headers, use your own
<code>OpenerDirector</code>:
@{colorize(r"""
import mechanize
cookies = mechanize.CookieJar()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cookies))
opener.addheaders = [("User-agent", "Mozilla/5.0 (compatible; MyProgram/0.1)"),
("From", "[email protected]")]
""")}
<p>Again, to use <code>urlopen()</code>, install your
<code>OpenerDirector</code> globally:
@{colorize(r"""
mechanize.install_opener(opener)
r = mechanize.urlopen("http://acme.com/")
""")}
<p>Also, a few standard headers (<code>Content-Length</code>,
<code>Content-Type</code> and <code>Host</code>) are added when the
<code>Request</code> is passed to <code>urlopen()</code> (or
<code>OpenerDirector.open()</code>). mechanize explictly adds these (and
<code>User-Agent</code>) to the <code>Request</code> object, unlike versions of
<code>urllib2</code> before Python 2.4 (but <strong>note</strong> that
Content-Length is an exception to this rule: it is sent, but not explicitly
added to the <code>Request</code>'s headers; this is due to a bug in
<code>httplib</code> in Python 2.3 and earlier). You shouldn't need to change
these headers, but since this is done by <code>AbstractHTTPHandler</code>, you
can change the way it works by passing a subclass of that handler to
<code>build_opener()</code> (or, as always, by constructing an opener yourself
and calling .add_handler()).
<a name="unverifiable"></a>
<h2>Initiating unverifiable transactions</h2>
<p>This section is only of interest for correct handling of third-party HTTP
cookies. See <a href="./doc.html#standards">below</a> for an explanation of
'third-party'.
<p>First, some terminology.
<p>An <em>unverifiable request</em> (defined fully by RFC 2965) is one whose
URL the user did not have the option to approve. For example, a transaction is
unverifiable if the request is for an image in an HTML document, and the user
had no option to approve the fetching of the image from a particular URL.
<p>The <em>request-host of the origin transaction</em> (defined fully by RFC
2965) is the host name or IP address of the original request that was initiated
by the user. For example, if the request is for an image in an HTML document,
this is the request-host of the request for the page containing the image.
<p><strong>mechanize knows that redirected transactions are unverifiable,
and will handle that on its own (ie. you don't need to think about the origin
request-host or verifiability yourself).</strong>
<p>If you want to initiate an unverifiable transaction yourself (which you
should if, for example, you're downloading the images from a page, and 'the
user' hasn't explicitly OKed those URLs):
<ol>
<li>If you're using a <code>urllib2.Request</code> from Python 2.3 or
earlier, set the <code>unverifiable</code> and <code>origin_req_host</code>
attributes on your <code>Request</code> instance:
@{colorize(r"""
request.unverifiable = True
request.origin_req_host = "www.example.com"
""")}
<li>If you're using a <code>urllib2.Request</code> from Python 2.4 or later,
or you're using a <code>mechanize.Request</code>, use the
<code>unverifiable</code> and <code>origin_req_host</code> arguments to the
constructor:
@{colorize(r"""
request = Request(origin_req_host="www.example.com", unverifiable=True)
""")}
</ol>
<a name="rfc2965"></a>
<h2>RFC 2965 handling</h2>
<p>RFC 2965 handling is switched off by default, because few browsers implement
it, so the RFC 2965 protocol is essentially never seen on the internet. To
switch it on, see <a href="./doc.html#policy">here</a>.
<a name="debugging"></a>
<h2>Debugging</h2>
<!--XXX move as much as poss. to General page-->
<p>First, a few common problems. The most frequent mistake people seem to make
is to use <code>mechanize.urlopen()</code>, <em>and</em> the
<code>.extract_cookies()</code> and <code>.add_cookie_header()</code> methods
on a cookie object themselves. If you use <code>mechanize.urlopen()</code>
(or <code>OpenerDirector.open()</code>), the module handles extraction and
adding of cookies by itself, so you should not call
<code>.extract_cookies()</code> or <code>.add_cookie_header()</code>.
<p>Are you sure the server is sending you any cookies in the first place?
Maybe the server is keeping track of state in some other way
(<code>HIDDEN</code> HTML form entries (possibly in a separate page referenced
by a frame), URL-encoded session keys, IP address, HTTP <code>Referer</code>
headers)? Perhaps some embedded script in the HTML is setting cookies (see
below)? Maybe you messed up your request, and the server is sending you some
standard failure page (even if the page doesn't appear to indicate any
failure). Sometimes, a server wants particular headers set to the values it
expects, or it won't play nicely. The most frequent offenders here are the
<code>Referer</code> [<em>sic</em>] and / or <code>User-Agent</code> HTTP
headers (<a href="./doc.html#headers">see above</a> for how to set these). The
<code>User-Agent</code> header may need to be set to a value like that of a
popular browser. The <code>Referer</code> header may need to be set to the URL
that the server expects you to have followed a link from. Occasionally, it may
even be that operators deliberately configure a server to insist on precisely
the headers that the popular browsers (MS Internet Explorer, Mozilla/Netscape,
Opera, Konqueror/Safari) generate, but remember that incompetence (possibly on
your part) is more probable than deliberate sabotage (and if a site owner is
that keen to stop robots, you probably shouldn't be scraping it anyway).
<p>When you <code>.save()</code> to or
<code>.load()</code>/<code>.revert()</code> from a file, single-session cookies
will expire unless you explicitly request otherwise with the
<code>ignore_discard</code> argument. This may be your problem if you find
cookies are going away after saving and loading.
@{colorize(r"""
import mechanize
cj = mechanize.LWPCookieJar()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
mechanize.install_opener(opener)
r = mechanize.urlopen("http://foobar.com/")
cj.save("/some/file", ignore_discard=True, ignore_expires=True)
""")}
<p>If none of the advice above solves your problem quickly, try comparing the
headers and data that you are sending out with those that a browser emits.
Often this will give you the clue you need. Of course, you'll want to check
that the browser is able to do manually what you're trying to achieve
programatically before minutely examining the headers. Make sure that what you
do manually is <em>exactly</em> the same as what you're trying to do from
Python - you may simply be hitting a server bug that only gets revealed if you
view pages in a particular order, for example. In order to see what your
browser is sending to the server (even if HTTPS is in use), see <a
href="../clientx.html">the General FAQ page</a>. If nothing is obviously wrong
with the requests your program is sending and you're out of ideas, you can try
the last resort of good old brute force binary-search debugging. Temporarily
switch to sending HTTP headers (with <code>httplib</code>). Start by copying
Netscape/Mozilla or IE slavishly (apart from session IDs, etc., of course),
then begin the tedious process of mutating your headers and data until they
match what your higher-level code was sending. This will at least reliably
find your problem.
<p>You can turn on display of HTTP headers:
@{colorize(r"""
import mechanize
hh = mechanize.HTTPHandler() # you might want HTTPSHandler, too
hh.set_http_debuglevel(1)
opener = mechanize.build_opener(hh)
response = opener.open(url)
""")}
<p>Alternatively, you can examine your individual request and response
objects to see what's going on. Note, though, that mechanize upgrades
-urllib2.Request objects to mechanize.Request, so you won't see any
-headers that are added to requests by handlers unless you use
-mechanize.Request in the first place. In addition, requests may
-involve "sub-requests" in cases such as redirection, in which case you
-will also not see everything that's going on just by examining the
-original request and final response. mechanize's responses can be
-made to have <code>.seek()</code> and <code>.get_data()</code>
-methods. It's often useful to use the <code>.get_data()</code> method
-during debugging (see <a href="./doc.html#seekable">above</a>).
+<code>urllib2.Request</code> objects to <code>mechanize.Request</code>, so you
+won't see any headers that are added to requests by handlers unless you use
+<code>mechanize.Request</code> in the first place. In addition, requests may
+involve "sub-requests" in cases such as redirection, in which case you will
+also not see everything that's going on just by examining the original request
+and final response. mechanize's responses can be made to
+have <code>.seek()</code> and <code>.get_data()</code> methods. It's often
+useful to use the <code>.get_data()</code> method during debugging
+(see <a href="./doc.html#seekable">above</a>).
<p>Also, note <code>HTTPRedirectDebugProcessor</code> (which prints information
about redirections) and <code>HTTPResponseDebugProcessor</code> (which prints
out all response bodies, including those that are read during redirections).
<strong>NOTE</strong>: as well as having these processors in your
<code>OpenerDirector</code> (for example, by passing them to
<code>build_opener()</code>) you have to turn on logging at the
<code>INFO</code> level or lower in order to see any output.
<p>If you would like to see what is going on in mechanize's tiny mind, do
this:
@{colorize(r"""
import sys, logging
# logging.DEBUG covers masses of debugging information,
# logging.INFO just shows the output from HTTPRedirectDebugProcessor,
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.DEBUG)
""")}
<p>The <code>DEBUG</code> level (as opposed to the <code>INFO</code> level) can
actually be quite useful, as it explains why particular cookies are accepted or
rejected and why they are or are not returned.
<p>One final thing to note is that there are some catch-all bare
<code>except:</code> statements in the module, which are there to handle
unexpected bad input without crashing your program. If this happens, it's a
bug in mechanize, so please mail me the warning text.
<a name="script"></a>
<h2>Embedded script that sets cookies</h2>
<p>It is possible to embed script in HTML pages (sandwiched between
<code><SCRIPT>here</SCRIPT></code> tags, and in
<code>javascript:</code> URLs) - JavaScript / ECMAScript, VBScript, or even
Python - that causes cookies to be set in a browser. See the <a
href="../bits/clientx.html">General FAQs</a> page for what to do about this.
<a name="dates"></a>
<h2>Parsing HTTP date strings</h2>
<p>A function named <code>str2time</code> is provided by the package,
which may be useful for parsing dates in HTTP headers.
<code>str2time</code> is intended to be liberal, since HTTP date/time
formats are poorly standardised in practice. There is no need to use this
function in normal operations: <code>CookieJar</code> instances keep track
of cookie lifetimes automatically. This function will stay around in some
form, though the supported date/time formats may change.
<a name="badhtml"></a>
<h2>Dealing with bad HTML</h2>
<p>XXX Intro
<p>XXX Test me
@{colorize("""\
import copy
import mechanize
class CommentCleanProcessor(mechanize.BaseProcessor):
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = mechanize.response_seek_wrapper(response)
response.seek(0)
new_response = copy.copy(response)
new_response.set_data(
re.sub("<!-([^-]*)->", "<!--\\1-->", response.read()))
return new_response
https_response = http_response
""")}
<p>XXX TidyProcessor: mxTidy? tidylib? tidy?
<a name="standards"></a>
<h2>Note about cookie standards</h2>
<p>The various cookie standards and their history form a case study of the
terrible things that can happen to a protocol. The long-suffering David
Kristol has written a <a
href="http://arxiv.org/abs/cs.SE/0105018">paper</a> about it, if you
want to know the gory details.
<p>Here is a summary.
<p>The <a href="http://www.netscape.com/newsref/std/cookie_spec.html">Netscape
protocol</a> (cookie_spec.html) is still the only standard supported by most
browsers (including Internet Explorer and Netscape). Be aware that
cookie_spec.html is not, and never was, actually followed to the letter (or
anything close) by anyone (including Netscape, IE and mechanize): the
Netscape protocol standard is really defined by the behaviour of Netscape (and
now IE). Netscape cookies are also known as V0 cookies, to distinguish them
from RFC 2109 or RFC 2965 cookies, which have a version cookie-attribute with a
value of 1.
<p><a href="http://www.ietf.org/rfcs/rfc2109.txt">RFC 2109</a> was introduced
to fix some problems identified with the Netscape protocol, while still keeping
the same HTTP headers (<code>Cookie</code> and <code>Set-Cookie</code>). The
most prominent of these problems is the 'third-party' cookie issue, which was
an accidental feature of the Netscape protocol. When one visits www.bland.org,
one doesn't expect to get a cookie from www.lurid.com, a site one has never
visited. Depending on browser configuration, this can still happen, because
the unreconstructed Netscape protocol is happy to accept cookies from, say, an
image in a webpage (www.bland.org) that's included by linking to an
advertiser's server (www.lurid.com). This kind of event, where your browser
talks to a server that you haven't explicitly okayed by some means, is what the
RFCs call an 'unverifiable transaction'. In addition to the potential for
embarrassment caused by the presence of lurid.com's cookies on one's machine,
this may also be used to track your movements on the web, because advertising
agencies like doubleclick.net place ads on many sites. RFC 2109 tried to
change this by requiring cookies to be turned off during unverifiable
transactions with third-party servers - unless the user explicitly asks them to
be turned on. This clashed with the business model of advertisers like
doubleclick.net, who had started to take advantage of the third-party cookies
'bug'. Since the browser vendors were more interested in the advertisers'
concerns than those of the browser users, this arguably doomed both RFC 2109
and its successor, RFC 2965, from the start. Other problems than the
third-party cookie issue were also fixed by 2109. However, even ignoring the
advertising issue, 2109 was stillborn, because Internet Explorer and Netscape
behaved differently in response to its extended <code>Set-Cookie</code>
headers. This was not really RFC 2109's fault: it worked the way it did to
keep compatibility with the Netscape protocol as implemented by Netscape.
Microsoft Internet Explorer (MSIE) was very new when the standard was designed,
but was starting to be very popular when the standard was finalised. XXX P3P,
and MSIE & Mozilla options
<p>XXX Apparently MSIE implements bits of RFC 2109 - but not very compliant
(surprise). Presumably other browsers do too, as a result. mechanize
already does allow Netscape cookies to have <code>max-age</code> and
<code>port</code> cookie-attributes, and as far as I know that's the extent of
the support present in MSIE. I haven't tested, though!
<p><a href="http://www.ietf.org/rfcs/rfc2965.txt">RFC 2965</a> attempted to fix
the compatibility problem by introducing two new headers,
<code>Set-Cookie2</code> and <code>Cookie2</code>. Unlike the
<code>Cookie</code> header, <code>Cookie2</code> does <em>not</em> carry
cookies to the server - rather, it simply advertises to the server that RFC
2965 is understood. <code>Set-Cookie2</code> <em>does</em> carry cookies, from
server to client: the new header means that both IE and Netscape completely
ignore these cookies. This prevents breakage, but introduces a chicken-egg
problem that means 2965 may never be widely adopted, especially since Microsoft
shows no interest in it. XXX Rumour has it that the European Union is unhappy
with P3P, and might introduce legislation that requires something better,
forming a gap that RFC 2965 might fill - any truth in this? Opera is the only
browser I know of that supports the standard. On the server side, Apache's
<code>mod_usertrack</code> supports it. One confusing point to note about RFC
2965 is that it uses the same value (1) of the Version attribute in HTTP
headers as does RFC 2109.
<p>Most recently, it was discovered that RFC 2965 does not fully take account
of issues arising when 2965 and Netscape cookies coexist, and errata were
discussed on the W3C http-state mailing list, but the list traffic died and it
seems RFC 2965 is dead as an internet protocol (but still a useful basis for
implementing the de-facto standards, and perhaps as an intranet protocol).
<p>Because Netscape cookies are so poorly specified, the general philosophy
of the module's Netscape cookie implementation is to start with RFC 2965
and open holes where required for Netscape protocol-compatibility. RFC
2965 cookies are <em>always</em> treated as RFC 2965 requires, of course!
<a name="faq_pre"></a>
<h2>FAQs - pre install</h2>
<ul>
<li>Doesn't the standard Python library module, <code>Cookie</code>, do
this?
<p>No: Cookie.py does the server end of the job. It doesn't know when to
accept cookies from a server or when to pass them back.
<li>Is urllib2.py required?
<p>No. You probably want it, though.
<li>Where can I find out more about the HTTP cookie protocol?
<p>There is more than one protocol, in fact (see the <a href="./doc.html">docs</a>
for a brief explanation of the history):
<ul>
<li>The original <a href="http://www.netscape.com/newsref/std/cookie_spec.html">
Netscape cookie protocol</a> - the standard still in use today, in
theory (in reality, the protocol implemented by all the major browsers
only bears a passing resemblance to the protocol sketched out in this
document).
<li><a href="http://www.ietf.org/rfcs/rfc2109.txt">RFC 2109</a> - obsoleted
by RFC 2965.
<li><a href="http://www.ietf.org/rfcs/rfc2965.txt">RFC 2965</a> - the
Netscape protocol with the bugs fixed (not widely used - the Netscape
protocol still dominates, and seems likely to remain dominant
indefinitely, at least on the Internet).
<a href="http://www.ietf.org/rfcs/rfc2964.txt">RFC 2964</a> discusses use
of the protocol.
<a href="http://kristol.org/cookie/errata.html">Errata</a> to RFC 2965
are currently being discussed on the
<a href="http://lists.bell-labs.com/mailman/listinfo/http-state">
http-state mailing list</a> (update: list traffic died months ago and
hasn't revived).
<li>A <a href="http://doi.acm.org/10.1145/502152.502153">paper</a> by David
Kristol setting out the history of the cookie standards in exhausting
detail.
<li>HTTP cookies <a href="http://www.cookiecentral.com/">FAQ</a>.
</ul>
- <li>Which protocols does ClientCookie support?
+ <li>Which protocols does mechanize support?
<p>Netscape and RFC 2965. RFC 2965 handling is switched off by default.
<li>What about RFC 2109?
<p>RFC 2109 cookies are currently parsed as Netscape cookies, and treated
by default as RFC 2965 cookies thereafter if RFC 2965 handling is enabled,
or as Netscape cookies otherwise. RFC 2109 is officially obsoleted by RFC
2965. Browsers do use a few RFC 2109 features in their Netscape cookie
implementations (<code>port</code> and <code>max-age</code>), and
- ClientCookie knows about that, too.
+ mechanize knows about that, too.
</ul>
<a name="faq_use"></a>
<h2>FAQs - usage</h2>
<ul>
<li>Why don't I have any cookies?
<p>Read the <a href="./doc.html#debugging">debugging section</a> of this page.
<li>My response claims to be empty, but I know it's not!
<p>Did you call <code>response.read()</code> (eg., in a debug statement),
then forget that all the data has already been read? In that case, you
may want to use <code>mechanize.response_seek_wrapper</code>.
<li>How do I download only part of a response body?
<p>Just call <code>.read()</code> or <code>.readline()</code> methods on your
response object as many times as you need. The <code>.seek()</code>
method (which is not always present, see <a
href="./doc.html#seekable">above</a>) still works, because mechanize
caches read data.
<li>What's the difference between the <code>.load()</code> and
<code>.revert()</code> methods of <code>CookieJar</code>?
<p><code>.load()</code> <em>appends</em> cookies from a file.
<code>.revert()</code> discards all existing cookies held by the
<code>CookieJar</code> first (but it won't lose any existing cookies if
the loading fails).
<li>Is it threadsafe?
<p>No. <em>Tested</em> patches welcome. Clarification: As far as I know,
it's perfectly possible to use mechanize in threaded code, but it
provides no synchronisation: you have to provide that yourself.
<li>How do I do <X>
<p>The module docstrings are worth reading if you want to do something
unusual.
<li>What's this "processor" business about? I knew
<code>urllib2</code> used "handlers", but not these
"processors".
<p>This Python library <a href="http://www.python.org/sf/852995">patch</a>
contains an explanation. Processors are now a standard part of urllib2
in Python 2.4.
<li>How do I use it without urllib2.py?
@{colorize(r"""
from mechanize import CookieJar
print CookieJar.extract_cookies.__doc__
print CookieJar.add_cookie_header.__doc__
""")}
</ul>
<p>I prefer questions and comments to be sent to the <a
href="http://lists.sourceforge.net/lists/listinfo/wwwsearch-general">
mailing list</a> rather than direct to me.
<p><a href="mailto:jjl@@pobox.com">John J. Lee</a>,
@(time.strftime("%B %Y", last_modified)).
<hr>
</div>
<div id="Menu">
@(release.navbar('ccdocs'))
<br>
<a href="./doc.html#examples">Examples</a><br>
<a href="./doc.html#browsers">Mozilla & MSIE</a><br>
<a href="./doc.html#file">Cookies in a file</a><br>
<a href="./doc.html#cookiejar">Using a <code>CookieJar</code></a><br>
<a href="./doc.html#extras">Processors</a><br>
<a href="./doc.html#seekable">Seekable responses</a><br>
<a href="./doc.html#requests">Request confusion</a><br>
<a href="./doc.html#headers">Adding headers</a><br>
<a href="./doc.html#unverifiable">Verifiability</a><br>
<a href="./doc.html#rfc2965">RFC 2965</a><br>
<a href="./doc.html#debugging">Debugging</a><br>
<a href="./doc.html#script">Embedded scripts</a><br>
<a href="./doc.html#dates">HTTP date parsing</a><br>
<a href="./doc.html#standards">Standards</a><br>
<a href="./doc.html#faq_use">FAQs - usage</a><br>
</div>
</body>
</html>
|
Almad/Mechanize
|
cf7c46d4fdc8e218ddab9d28fc635006895cf558
|
* Fix thoroughly broken digest auth (still need functional test!) ([email protected]) * Fix issue where more tags after <title> caused default parser to raise an exception * Fix MechanizeRobotFileParser.set_opener() * Fix a _gzip.py NameError (gzip support is experimental) * Remove unused imports revealed by running pyflakes * Run _useragent.py doctests
|
diff --git a/mechanize/_auth.py b/mechanize/_auth.py
index 9bb5873..7626eba 100644
--- a/mechanize/_auth.py
+++ b/mechanize/_auth.py
@@ -1,500 +1,500 @@
"""HTTP Authentication and Proxy support.
All but HTTPProxyPasswordMgr come from Python 2.5.
Copyright 2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
-import re, base64, urlparse, posixpath, md5, sha, sys, copy
+import re, base64, urlparse, posixpath, md5, os, random, sha, time, copy
-from urllib2 import BaseHandler
+from urllib2 import BaseHandler, HTTPError, parse_keqv_list, parse_http_list
from urllib import getproxies, unquote, splittype, splituser, splitpasswd, \
splitport
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:[email protected]')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:[email protected]:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:[email protected]/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:[email protected]:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:[email protected]/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:[email protected]')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.encodestring(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type:
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
rx = re.compile('[ \t]*([^ \t]+)[ \t]+realm="([^"]*)"', re.I)
# XXX there can actually be multiple auth-schemes in a
# www-authenticate header. should probably be a lot more careful
# in parsing them to extract multiple alternatives
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, realm = mo.groups()
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.encodestring(raw).strip()
if req.headers.get(self.auth_header, None) == auth:
return None
newreq = copy.copy(req)
newreq.add_header(self.auth_header, auth)
newreq.visit = False
return self.parent.open(newreq)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
return self.http_error_auth_reqed('www-authenticate',
url, req, headers)
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
return self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
newreq = copy.copy(req)
newreq.add_unredirected_header(self.auth_header, auth_val)
newreq.visit = False
return self.parent.open(newreq)
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = sha.new("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
self.nonce_count += 1
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
pass
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: md5.new(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: sha.new(x).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
# XXX ugly implementation, should probably not bother deriving
class HTTPProxyPasswordMgr(HTTPPasswordMgr):
# has default realm and host/port
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if uri is None or isinstance(uri, basestring):
uris = [uri]
else:
uris = uri
passwd_by_domain = self.passwd.setdefault(realm, {})
for uri in uris:
for default_port in True, False:
reduced_uri = self.reduce_uri(uri, default_port)
passwd_by_domain[reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
attempts = [(realm, authuri), (None, authuri)]
# bleh, want default realm to take precedence over default
# URI/authority, hence this outer loop
for default_uri in False, True:
for realm, authuri in attempts:
authinfo_by_domain = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uri, authinfo in authinfo_by_domain.iteritems():
if uri is None and not default_uri:
continue
if self.is_suburi(uri, reduced_authuri):
return authinfo
user, password = None, None
if user is not None:
break
return user, password
def reduce_uri(self, uri, default_port=True):
if uri is None:
return None
return HTTPPasswordMgr.reduce_uri(self, uri, default_port)
def is_suburi(self, base, test):
if base is None:
# default to the proxy's host/port
hostport, path = test
base = (hostport, "/")
return HTTPPasswordMgr.is_suburi(self, base, test)
class HTTPSClientCertMgr(HTTPPasswordMgr):
# implementation inheritance: this is not a proper subclass
def add_key_cert(self, uri, key_file, cert_file):
self.add_password(None, uri, key_file, cert_file)
def find_key_cert(self, authuri):
return HTTPPasswordMgr.find_user_password(self, None, authuri)
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index ea9e453..e84a690 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -1,547 +1,547 @@
"""HTTP cookie handling for web clients.
This module originally developed from my port of Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
Comments to John J Lee <[email protected]>.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
Copyright 2002-2003 Johnny Lee (original MSIE Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
-import sys, re, copy, time, struct, urllib, types, logging
+import sys, re, copy, time, urllib, types, logging
try:
import threading
_threading = threading; del threading
except ImportError:
import dummy_threading
_threading = dummy_threading; del dummy_threading
import httplib # only for the default HTTP port
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
from _headersutil import split_header_words, parse_ns_headers
from _util import isstringlike
import _rfc3986
debug = logging.getLogger("mechanize.cookies").debug
def reraise_unmasked_exceptions(unmasked=()):
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways.
# This function re-raises some exceptions we don't want to trap.
import mechanize, warnings
if not mechanize.USE_BARE_EXCEPT:
raise
unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError)
etype = sys.exc_info()[0]
if issubclass(etype, unmasked):
raise
# swallowed an exception
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2)
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
return not (IPV4_RE.search(text) or
text == "" or
text[0] == "." or text[-1] == ".")
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
has_form_nb = not (i == -1 or i == 0)
return (
has_form_nb and
B.startswith(".") and
is_HDN(B[1:])
)
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
return not IPV4_RE.search(text)
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = _rfc3986.urlsplit(url)[1]
if host is None:
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
path, query, frag = _rfc3986.urlsplit(url)[2:]
path = escape_path(path)
req_path = _rfc3986.urlunsplit((None, None, path, query, frag))
if not req_path.startswith("/"):
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, types.UnicodeType):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
# the origin request's request-host was stuffed into request by
# _urllib2_support.AbstractHTTPHandler
return not domain_match(req_host, reach(request.origin_req_host))
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
version: integer;
name: string;
value: string (may be None);
port: string; None indicates no attribute was supplied (eg. "Port", rather
than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list
string (eg. "80,8080")
port_specified: boolean; true if a value was supplied with the Port
cookie-attribute
domain: string;
domain_specified: boolean; true if Domain was explicitly set
domain_initial_dot: boolean; true if Domain as set in HTTP header by server
started with a dot (yes, this really is necessary!)
path: string;
path_specified: boolean; true if Path was explicitly set
secure: boolean; true if should only be returned over secure connection
expires: integer; seconds since epoch (RFC 2965 cookies should calculate
this value from the Max-Age attribute)
discard: boolean, true if this is a session cookie; (if no expires value,
this should be true)
comment: string;
comment_url: string;
rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not
Set-Cookie2:) header, but had a version cookie-attribute of 1
rest: mapping of other cookie-attributes
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return self._rest.has_key(name)
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def nonstandard_attr_keys(self):
return self._rest.keys()
def is_expired(self, now=None):
if now is None: now = time.time()
return (self.expires is not None) and (self.expires <= now)
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ["version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
]:
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
As well as implementing set_ok and return_ok, implementations of this
interface must also supply the following attributes, indicating which
protocols should be used, and how. These can be read and set at any time,
though whether that makes complete sense from the protocol point of view is
doubtful.
Public attributes:
netscape: implement netscape protocol
rfc2965: implement RFC 2965 protocol
rfc2109_as_netscape:
WARNING: This argument will change or go away if is not accepted into
the Python standard library in this form!
If true, treat RFC 2109 cookies as though they were Netscape cookies. The
default is for this attribute to be None, which means treat 2109 cookies
as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is,
by default), and as Netscape cookies otherwise.
hide_cookie2: don't add Cookie2 header to requests (the presence of
this header indicates to the server that we understand RFC 2965
cookies)
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.extract_cookies.__doc__
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.add_cookie_header.__doc__
"""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
This is here as an optimization, to remove the need for checking every
cookie with a particular domain (which may involve reading many files).
The default implementations of domain_return_ok and path_return_ok
(return True) leave all the work to return_ok.
If domain_return_ok returns true for the cookie domain, path_return_ok
is called for the cookie path. Otherwise, path_return_ok and return_ok
are never called for that cookie domain. If path_return_ok returns
true, return_ok is called with the Cookie object itself for a full
check. Otherwise, return_ok is never called for that cookie path.
Note that domain_return_ok is called for every *cookie* domain, not
just for the *request* domain. For example, the function might be
called with both ".acme.com" and "www.acme.com" if the request domain
is "www.acme.com". The same goes for path_return_ok.
For argument documentation, see the docstring for return_ok.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
See the docstring for domain_return_ok.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies.
Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is
switched off by default.
The easiest way to provide your own policy is to override this class and
call its methods in your overriden implementations before adding your own
additional checks.
import mechanize
class MyCookiePolicy(mechanize.DefaultCookiePolicy):
def set_ok(self, cookie, request):
if not mechanize.DefaultCookiePolicy.set_ok(
self, cookie, request):
return False
if i_dont_want_to_store_this_cookie():
return False
return True
In addition to the features required to implement the CookiePolicy
interface, this class allows you to block and allow domains from setting
and receiving cookies. There are also some strictness switches that allow
you to tighten up the rather loose Netscape protocol rules a little bit (at
the cost of blocking some benign cookies).
A domain blacklist and whitelist is provided (both off by default). Only
domains not in the blacklist and present in the whitelist (if the whitelist
is active) participate in cookie setting and returning. Use the
blocked_domains constructor argument, and blocked_domains and
set_blocked_domains methods (and the corresponding argument and methods for
allowed_domains). If you set a whitelist, you can turn it off again by
setting it to None.
Domains in block or allow lists that do not start with a dot must
string-compare equal. For example, "acme.com" matches a blacklist entry of
"acme.com", but "www.acme.com" does not. Domains that do start with a dot
are matched by more specific domains too. For example, both "www.acme.com"
and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
not). IP addresses are an exception, and must match exactly. For example,
if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
blocked, but 193.168.1.2 is not.
Additional Public Attributes:
General strictness switches
strict_domain: don't allow sites to set two-component domains with
country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
This is far from perfect and isn't guaranteed to work!
RFC 2965 protocol strictness switches
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
diff --git a/mechanize/_gzip.py b/mechanize/_gzip.py
index 46a98a3..26c2743 100644
--- a/mechanize/_gzip.py
+++ b/mechanize/_gzip.py
@@ -1,103 +1,103 @@
import urllib2
from cStringIO import StringIO
import _response
# GzipConsumer was taken from Fredrik Lundh's effbot.org-0.1-20041009 library
class GzipConsumer:
def __init__(self, consumer):
self.__consumer = consumer
self.__decoder = None
self.__data = ""
def __getattr__(self, key):
return getattr(self.__consumer, key)
def feed(self, data):
if self.__decoder is None:
# check if we have a full gzip header
data = self.__data + data
try:
i = 10
flag = ord(data[3])
if flag & 4: # extra
x = ord(data[i]) + 256*ord(data[i+1])
i = i + 2 + x
if flag & 8: # filename
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 16: # comment
while ord(data[i]):
i = i + 1
i = i + 1
if flag & 2: # crc
i = i + 2
if len(data) < i:
raise IndexError("not enough data")
if data[:3] != "\x1f\x8b\x08":
raise IOError("invalid gzip data")
data = data[i:]
except IndexError:
self.__data = data
return # need more data
import zlib
self.__data = ""
self.__decoder = zlib.decompressobj(-zlib.MAX_WBITS)
data = self.__decoder.decompress(data)
if data:
self.__consumer.feed(data)
def close(self):
if self.__decoder:
data = self.__decoder.flush()
if data:
self.__consumer.feed(data)
self.__consumer.close()
# --------------------------------------------------------------------
# the rest of this module is John Lee's stupid code, not
# Fredrik's nice code :-)
class stupid_gzip_consumer:
def __init__(self): self.data = []
def feed(self, data): self.data.append(data)
class stupid_gzip_wrapper(_response.closeable_response):
def __init__(self, response):
self._response = response
c = stupid_gzip_consumer()
gzc = GzipConsumer(c)
gzc.feed(response.read())
self.__data = StringIO("".join(c.data))
def read(self, size=-1):
return self.__data.read(size)
def readline(self, size=-1):
return self.__data.readline(size)
def readlines(self, sizehint=-1):
- return self.__data.readlines(size)
+ return self.__data.readlines(sizehint)
def __getattr__(self, name):
# delegate unknown methods/attributes
return getattr(self._response, name)
class HTTPGzipProcessor(urllib2.BaseHandler):
handler_order = 200 # response processing before HTTPEquivProcessor
def http_request(self, request):
request.add_header("Accept-Encoding", "gzip")
return request
def http_response(self, request, response):
# post-process response
enc_hdrs = response.info().getheaders("Content-encoding")
for enc_hdr in enc_hdrs:
if ("gzip" in enc_hdr) or ("compress" in enc_hdr):
return stupid_gzip_wrapper(response)
return response
https_response = http_response
diff --git a/mechanize/_html.py b/mechanize/_html.py
index 9e7521b..5da0815 100644
--- a/mechanize/_html.py
+++ b/mechanize/_html.py
@@ -1,634 +1,631 @@
"""HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import re, copy, htmlentitydefs
-import sgmllib, HTMLParser, ClientForm
+import sgmllib, ClientForm
import _request
from _headersutil import split_header_words, is_html as _is_html
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
# the base classe is purely for backwards compatibility
class ParseError(ClientForm.ParseError): pass
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
return v
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
# idea for this argument-processing trick is from Peter Otten
class Args:
def __init__(self, args_map):
self.dictionary = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
name = attrs.get("name")
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for eg.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying ClientForm.HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ClientForm.ParseResponse
argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
import ClientForm
self.select_default = select_default
if form_parser_class is None:
form_parser_class = ClientForm.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
import ClientForm
encoding = self.encoding
try:
forms = ClientForm.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
except ClientForm.ParseError, exc:
raise ParseError(exc)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
+ import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
- except NoMoreTokensError:
+ except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
# bizarre import gymnastics for bundled BeautifulSoup
import _beautifulsoup
import ClientForm
RobustFormParser, NestingRobustFormParser = ClientForm._create_bs_classes(
_beautifulsoup.BeautifulSoup, _beautifulsoup.ICantBelieveItsBeautifulSoup
)
# monkeypatch sgmllib to fix http://www.python.org/sf/803422 :-(
-import sgmllib
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
- import _beautifulsoup
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
import _beautifulsoup
bs = self._bs
base_url = self._base_url
encoding = self._encoding
gen = bs.recursiveChildGenerator()
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
- import ClientForm
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
import _beautifulsoup
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set urllib2.Request class.
ClientForm.HTMLForm instances returned by .forms() will return
instances of this class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by urllib2.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over ClientForm.HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
- import _beautifulsoup
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
diff --git a/mechanize/_http.py b/mechanize/_http.py
index 39060bc..96fd405 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -1,734 +1,733 @@
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
-import copy, time, tempfile, htmlentitydefs, re, logging, socket, \
+import time, htmlentitydefs, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _request import Request
-from _util import isstringlike
from _response import closeable_response, response_seek_wrapper
from _html import unescape, unescape_charref
from _headersutil import is_html
-from _clientcookie import CookieJar, request_host
+from _clientcookie import CookieJar
import _rfc3986
debug = logging.getLogger("mechanize").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response,
self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
- import _opener
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
def set_opener(self, opener=None):
+ import _opener
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
robotparser._debug("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
robotparser._debug("disallow all")
elif status >= 400:
self.allow_all = True
robotparser._debug("allow all")
elif status == 200 and lines:
robotparser._debug("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index b7f559a..6e51f90 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,524 +1,524 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
-import urllib2, sys, copy, re, os, urllib
+import urllib2, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants
of any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
diff --git a/mechanize/_opener.py b/mechanize/_opener.py
index 145350f..ae50f93 100644
--- a/mechanize/_opener.py
+++ b/mechanize/_opener.py
@@ -1,421 +1,421 @@
"""Integration with Python standard library module urllib2: OpenerDirector
class.
Copyright 2004-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
-import os, urllib2, bisect, urllib, httplib, types, tempfile
+import os, urllib2, bisect, httplib, types, tempfile
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
set
except NameError:
import sets
set = sets.Set
import _http
import _upgrade
import _rfc3986
import _response
from _util import isstringlike
from _request import Request
class ContentTooShortError(urllib2.URLError):
def __init__(self, reason, result):
urllib2.URLError.__init__(self, reason)
self.result = result
class OpenerDirector(urllib2.OpenerDirector):
def __init__(self):
urllib2.OpenerDirector.__init__(self)
# really none of these are (sanely) public -- the lack of initial
# underscore on some is just due to following urllib2
self.process_response = {}
self.process_request = {}
self._any_request = {}
self._any_response = {}
self._handler_index_valid = True
self._tempfiles = []
def add_handler(self, handler):
if handler in self.handlers:
return
# XXX why does self.handlers need to be sorted?
bisect.insort(self.handlers, handler)
handler.add_parent(self)
self._handler_index_valid = False
def _maybe_reindex_handlers(self):
if self._handler_index_valid:
return
handle_error = {}
handle_open = {}
process_request = {}
process_response = {}
any_request = set()
any_response = set()
unwanted = []
for handler in self.handlers:
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
if meth == "any_request":
any_request.add(handler)
added = True
continue
elif meth == "any_response":
any_response.add(handler)
added = True
continue
ii = meth.find("_")
scheme = meth[:ii]
condition = meth[ii+1:]
if condition.startswith("error"):
jj = meth[ii+1:].find("_") + ii + 1
kind = meth[jj+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = handle_error.setdefault(scheme, {})
elif condition == "open":
kind = scheme
lookup = handle_open
elif condition == "request":
kind = scheme
lookup = process_request
elif condition == "response":
kind = scheme
lookup = process_response
else:
continue
lookup.setdefault(kind, set()).add(handler)
added = True
if not added:
unwanted.append(handler)
for handler in unwanted:
self.handlers.remove(handler)
# sort indexed methods
# XXX could be cleaned up
for lookup in [process_request, process_response]:
for scheme, handlers in lookup.iteritems():
lookup[scheme] = handlers
for scheme, lookup in handle_error.iteritems():
for code, handlers in lookup.iteritems():
handlers = list(handlers)
handlers.sort()
lookup[code] = handlers
for scheme, handlers in handle_open.iteritems():
handlers = list(handlers)
handlers.sort()
handle_open[scheme] = handlers
# cache the indexes
self.handle_error = handle_error
self.handle_open = handle_open
self.process_request = process_request
self.process_response = process_response
self._any_request = any_request
self._any_response = any_response
def _request(self, url_or_req, data, visit):
if isstringlike(url_or_req):
req = Request(url_or_req, data, visit=visit)
else:
# already a urllib2.Request or mechanize.Request instance
req = url_or_req
if data is not None:
req.add_data(data)
# XXX yuck, give request a .visit attribute if it doesn't have one
try:
req.visit
except AttributeError:
req.visit = None
if visit is not None:
req.visit = visit
return req
def open(self, fullurl, data=None):
req = self._request(fullurl, data, None)
req_scheme = req.get_type()
self._maybe_reindex_handlers()
# pre-process request
# XXX should we allow a Processor to change the URL scheme
# of the request?
request_processors = set(self.process_request.get(req_scheme, []))
request_processors.update(self._any_request)
request_processors = list(request_processors)
request_processors.sort()
for processor in request_processors:
for meth_name in ["any_request", req_scheme+"_request"]:
meth = getattr(processor, meth_name, None)
if meth:
req = meth(req)
# In Python >= 2.4, .open() supports processors already, so we must
# call ._open() instead.
urlopen = getattr(urllib2.OpenerDirector, "_open",
urllib2.OpenerDirector.open)
response = urlopen(self, req, data)
# post-process response
response_processors = set(self.process_response.get(req_scheme, []))
response_processors.update(self._any_response)
response_processors = list(response_processors)
response_processors.sort()
for processor in response_processors:
for meth_name in ["any_response", req_scheme+"_response"]:
meth = getattr(processor, meth_name, None)
if meth:
response = meth(req, response)
return response
def error(self, proto, *args):
if proto in ['http', 'https']:
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = apply(self._call_chain, args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return apply(self._call_chain, args)
BLOCK_SIZE = 1024*8
def retrieve(self, fullurl, filename=None, reporthook=None, data=None):
"""Returns (filename, headers).
For remote objects, the default filename will refer to a temporary
file. Temporary files are removed when the OpenerDirector.close()
method is called.
For file: URLs, at present the returned filename is None. This may
change in future.
If the actual number of bytes read is less than indicated by the
Content-Length header, raises ContentTooShortError (a URLError
subclass). The exception's .result attribute contains the (filename,
headers) that would have been returned.
"""
req = self._request(fullurl, data, False)
scheme = req.get_type()
fp = self.open(req)
headers = fp.info()
if filename is None and scheme == 'file':
# XXX req.get_selector() seems broken here, return None,
# pending sanity :-/
return None, headers
#return urllib.url2pathname(req.get_selector()), headers
if filename:
tfp = open(filename, 'wb')
else:
path = _rfc3986.urlsplit(fullurl)[2]
suffix = os.path.splitext(path)[1]
fd, filename = tempfile.mkstemp(suffix)
self._tempfiles.append(filename)
tfp = os.fdopen(fd, 'wb')
result = filename, headers
bs = self.BLOCK_SIZE
size = -1
read = 0
blocknum = 0
if reporthook:
if "content-length" in headers:
size = int(headers["Content-Length"])
reporthook(blocknum, bs, size)
while 1:
block = fp.read(bs)
if block == "":
break
read += len(block)
tfp.write(block)
blocknum += 1
if reporthook:
reporthook(blocknum, bs, size)
fp.close()
tfp.close()
del fp
del tfp
# raise exception if actual size does not match content-length header
if size >= 0 and read < size:
raise ContentTooShortError(
"retrieval incomplete: "
"got only %i out of %i bytes" % (read, size),
result
)
return result
def close(self):
urllib2.OpenerDirector.close(self)
# make it very obvious this object is no longer supposed to be used
self.open = self.error = self.retrieve = self.add_handler = None
if self._tempfiles:
for filename in self._tempfiles:
try:
os.unlink(filename)
except OSError:
pass
del self._tempfiles[:]
def wrapped_open(urlopen, process_response_object, fullurl, data=None):
success = True
try:
response = urlopen(fullurl, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
if response is not None:
response = process_response_object(response)
if not success:
raise response
return response
class ResponseProcessingOpener(OpenerDirector):
def open(self, fullurl, data=None):
def bound_open(fullurl, data=None):
return OpenerDirector.open(self, fullurl, data)
return wrapped_open(
bound_open, self.process_response_object, fullurl, data)
def process_response_object(self, response):
return response
class SeekableResponseOpener(ResponseProcessingOpener):
def process_response_object(self, response):
return _response.seek_wrapped_response(response)
class OpenerFactory:
"""This class's interface is quite likely to change."""
default_classes = [
# handlers
urllib2.ProxyHandler,
urllib2.UnknownHandler,
_http.HTTPHandler, # derived from new AbstractHTTPHandler
_http.HTTPDefaultErrorHandler,
_http.HTTPRedirectHandler, # bugfixed
urllib2.FTPHandler,
urllib2.FileHandler,
# processors
_upgrade.HTTPRequestUpgradeProcessor,
_http.HTTPCookieProcessor,
_http.HTTPErrorProcessor,
]
if hasattr(httplib, 'HTTPS'):
default_classes.append(_http.HTTPSHandler)
handlers = []
replacement_handlers = []
def __init__(self, klass=OpenerDirector):
self.klass = klass
def build_opener(self, *handlers):
"""Create an opener object from a list of handlers and processors.
The opener will use several default handlers and processors, including
support for HTTP and FTP.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
opener = self.klass()
default_classes = list(self.default_classes)
skip = []
for klass in default_classes:
for check in handlers:
if type(check) == types.ClassType:
if issubclass(check, klass):
skip.append(klass)
elif type(check) == types.InstanceType:
if isinstance(check, klass):
skip.append(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if type(h) == types.ClassType:
h = h()
opener.add_handler(h)
return opener
build_opener = OpenerFactory().build_opener
_opener = None
urlopen_lock = _threading.Lock()
def urlopen(url, data=None):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.open(url, data)
def urlretrieve(url, filename=None, reporthook=None, data=None):
global _opener
if _opener is None:
urlopen_lock.acquire()
try:
if _opener is None:
_opener = build_opener()
finally:
urlopen_lock.release()
return _opener.retrieve(url, filename, reporthook, data)
def install_opener(opener):
global _opener
_opener = opener
diff --git a/mechanize/_rfc3986.py b/mechanize/_rfc3986.py
index 15a46a5..1bb5021 100644
--- a/mechanize/_rfc3986.py
+++ b/mechanize/_rfc3986.py
@@ -1,240 +1,241 @@
"""RFC 3986 URI parsing and relative reference resolution / absolutization.
(aka splitting and joining)
Copyright 2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
# XXX Wow, this is ugly. Overly-direct translation of the RFC ATM.
-import sys, re, posixpath, urllib
+import re, urllib
## def chr_range(a, b):
## return "".join(map(chr, range(ord(a), ord(b)+1)))
## UNRESERVED_URI_CHARS = ("ABCDEFGHIJKLMNOPQRSTUVWXYZ"
## "abcdefghijklmnopqrstuvwxyz"
## "0123456789"
## "-_.~")
## RESERVED_URI_CHARS = "!*'();:@&=+$,/?#[]"
## URI_CHARS = RESERVED_URI_CHARS+UNRESERVED_URI_CHARS+'%'
# this re matches any character that's not in URI_CHARS
BAD_URI_CHARS_RE = re.compile("[^A-Za-z0-9\-_.~!*'();:@&=+$,/?%#[\]]")
def clean_url(url, encoding):
# percent-encode illegal URI characters
# Trying to come up with test cases for this gave me a headache, revisit
# when do switch to unicode.
# Somebody else's comments (lost the attribution):
## - IE will return you the url in the encoding you send it
## - Mozilla/Firefox will send you latin-1 if there's no non latin-1
## characters in your link. It will send you utf-8 however if there are...
if type(url) == type(""):
url = url.decode(encoding, "replace")
url = url.strip()
# for second param to urllib.quote(), we want URI_CHARS, minus the
# 'always_safe' characters that urllib.quote() never percent-encodes
return urllib.quote(url.encode(encoding), "!*'();:@&=+$,/?%#[]~")
def is_clean_uri(uri):
"""
>>> is_clean_uri("ABC!")
True
>>> is_clean_uri(u"ABC!")
True
>>> is_clean_uri("ABC|")
False
>>> is_clean_uri(u"ABC|")
False
>>> is_clean_uri("http://example.com/0")
True
>>> is_clean_uri(u"http://example.com/0")
True
"""
# note module re treats bytestrings as through they were decoded as latin-1
# so this function accepts both unicode and bytestrings
return not bool(BAD_URI_CHARS_RE.search(uri))
SPLIT_MATCH = re.compile(
r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?").match
def urlsplit(absolute_uri):
"""Return scheme, authority, path, query, fragment."""
match = SPLIT_MATCH(absolute_uri)
if match:
g = match.groups()
return g[1], g[3], g[4], g[6], g[8]
def urlunsplit(parts):
scheme, authority, path, query, fragment = parts
r = []
append = r.append
if scheme is not None:
append(scheme)
append(":")
if authority is not None:
append("//")
append(authority)
append(path)
if query is not None:
append("?")
append(query)
if fragment is not None:
append("#")
append(fragment)
return "".join(r)
def urljoin(base_uri, uri_reference):
return urlunsplit(urljoin_parts(urlsplit(base_uri),
urlsplit(uri_reference)))
# oops, this doesn't do the same thing as the literal translation
# from the RFC below
+## import posixpath
## def urljoin_parts(base_parts, reference_parts):
## scheme, authority, path, query, fragment = base_parts
## rscheme, rauthority, rpath, rquery, rfragment = reference_parts
## # compute target URI path
## if rpath == "":
## tpath = path
## else:
## tpath = rpath
## if not tpath.startswith("/"):
## tpath = merge(authority, path, tpath)
## tpath = posixpath.normpath(tpath)
## if rscheme is not None:
## return (rscheme, rauthority, tpath, rquery, rfragment)
## elif rauthority is not None:
## return (scheme, rauthority, tpath, rquery, rfragment)
## elif rpath == "":
## if rquery is not None:
## tquery = rquery
## else:
## tquery = query
## return (scheme, authority, tpath, tquery, rfragment)
## else:
## return (scheme, authority, tpath, rquery, rfragment)
def urljoin_parts(base_parts, reference_parts):
scheme, authority, path, query, fragment = base_parts
rscheme, rauthority, rpath, rquery, rfragment = reference_parts
if rscheme == scheme:
rscheme = None
if rscheme is not None:
tscheme, tauthority, tpath, tquery = (
rscheme, rauthority, remove_dot_segments(rpath), rquery)
else:
if rauthority is not None:
tauthority, tpath, tquery = (
rauthority, remove_dot_segments(rpath), rquery)
else:
if rpath == "":
tpath = path
if rquery is not None:
tquery = rquery
else:
tquery = query
else:
if rpath.startswith("/"):
tpath = remove_dot_segments(rpath)
else:
tpath = merge(authority, path, rpath)
tpath = remove_dot_segments(tpath)
tquery = rquery
tauthority = authority
tscheme = scheme
tfragment = rfragment
return (tscheme, tauthority, tpath, tquery, tfragment)
# um, something *vaguely* like this is what I want, but I have to generate
# lots of test cases first, if only to understand what it is that
# remove_dot_segments really does...
## def remove_dot_segments(path):
## if path == '':
## return ''
## comps = path.split('/')
## new_comps = []
## for comp in comps:
## if comp in ['.', '']:
## if not new_comps or new_comps[-1]:
## new_comps.append('')
## continue
## if comp != '..':
## new_comps.append(comp)
## elif new_comps:
## new_comps.pop()
## return '/'.join(new_comps)
def remove_dot_segments(path):
r = []
while path:
# A
if path.startswith("../"):
path = path[3:]
continue
if path.startswith("./"):
path = path[2:]
continue
# B
if path.startswith("/./"):
path = path[2:]
continue
if path == "/.":
path = "/"
continue
# C
if path.startswith("/../"):
path = path[3:]
if r:
r.pop()
continue
if path == "/..":
path = "/"
if r:
r.pop()
continue
# D
if path == ".":
path = path[1:]
continue
if path == "..":
path = path[2:]
continue
# E
start = 0
if path.startswith("/"):
start = 1
ii = path.find("/", start)
if ii < 0:
ii = None
r.append(path[:ii])
if ii is None:
break
path = path[ii:]
return "".join(r)
def merge(base_authority, base_path, ref_path):
# XXXX Oddly, the sample Perl implementation of this by Roy Fielding
# doesn't even take base_authority as a parameter, despite the wording in
# the RFC suggesting otherwise. Perhaps I'm missing some obvious identity.
#if base_authority is not None and base_path == "":
if base_path == "":
return "/" + ref_path
ii = base_path.rfind("/")
if ii >= 0:
return base_path[:ii+1] + ref_path
return ref_path
if __name__ == "__main__":
import doctest
doctest.testmod()
diff --git a/mechanize/_useragent.py b/mechanize/_useragent.py
index 766ac35..1e4c084 100644
--- a/mechanize/_useragent.py
+++ b/mechanize/_useragent.py
@@ -1,348 +1,348 @@
"""Convenient HTTP UserAgent class.
This is a subclass of urllib2.OpenerDirector.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
-import sys, warnings, urllib2
+import warnings
import _opener
import _urllib2
import _auth
import _gzip
import _response
class UserAgentBase(_opener.OpenerDirector):
"""Convenient user-agent class.
Do not use .add_handler() to add a handler for something already dealt with
by this code.
The only reason at present for the distinction between UserAgent and
UserAgentBase is so that classes that depend on .seek()able responses
(e.g. mechanize.Browser) can inherit from UserAgentBase. The subclass
UserAgent exposes a .set_seekable_responses() method that allows switching
off the adding of a .seek() method to responses.
Public attributes:
addheaders: list of (name, value) pairs specifying headers to send with
every request, unless they are overridden in the Request instance.
>>> ua = UserAgentBase()
>>> ua.addheaders = [
... ("User-agent", "Mozilla/5.0 (compatible)"),
... ("From", "[email protected]")]
"""
handler_classes = {
# scheme handlers
"http": _urllib2.HTTPHandler,
# CacheFTPHandler is buggy, at least in 2.3, so we don't use it
"ftp": _urllib2.FTPHandler,
"file": _urllib2.FileHandler,
# other handlers
"_unknown": _urllib2.UnknownHandler,
# HTTP{S,}Handler depend on HTTPErrorProcessor too
"_http_error": _urllib2.HTTPErrorProcessor,
"_http_request_upgrade": _urllib2.HTTPRequestUpgradeProcessor,
"_http_default_error": _urllib2.HTTPDefaultErrorHandler,
# feature handlers
"_basicauth": _urllib2.HTTPBasicAuthHandler,
"_digestauth": _urllib2.HTTPDigestAuthHandler,
"_redirect": _urllib2.HTTPRedirectHandler,
"_cookies": _urllib2.HTTPCookieProcessor,
"_refresh": _urllib2.HTTPRefreshProcessor,
"_equiv": _urllib2.HTTPEquivProcessor,
"_proxy": _urllib2.ProxyHandler,
"_proxy_basicauth": _urllib2.ProxyBasicAuthHandler,
"_proxy_digestauth": _urllib2.ProxyDigestAuthHandler,
"_robots": _urllib2.HTTPRobotRulesProcessor,
"_gzip": _gzip.HTTPGzipProcessor, # experimental!
# debug handlers
"_debug_redirect": _urllib2.HTTPRedirectDebugProcessor,
"_debug_response_body": _urllib2.HTTPResponseDebugProcessor,
}
default_schemes = ["http", "ftp", "file"]
default_others = ["_unknown", "_http_error", "_http_request_upgrade",
"_http_default_error",
]
default_features = ["_redirect", "_cookies",
"_refresh", "_equiv",
"_basicauth", "_digestauth",
"_proxy", "_proxy_basicauth", "_proxy_digestauth",
"_robots",
]
if hasattr(_urllib2, 'HTTPSHandler'):
handler_classes["https"] = _urllib2.HTTPSHandler
default_schemes.append("https")
def __init__(self):
_opener.OpenerDirector.__init__(self)
ua_handlers = self._ua_handlers = {}
for scheme in (self.default_schemes+
self.default_others+
self.default_features):
klass = self.handler_classes[scheme]
ua_handlers[scheme] = klass()
for handler in ua_handlers.itervalues():
self.add_handler(handler)
# Yuck.
# Ensure correct default constructor args were passed to
# HTTPRefreshProcessor and HTTPEquivProcessor.
if "_refresh" in ua_handlers:
self.set_handle_refresh(True)
if "_equiv" in ua_handlers:
self.set_handle_equiv(True)
# Ensure default password managers are installed.
pm = ppm = None
if "_basicauth" in ua_handlers or "_digestauth" in ua_handlers:
pm = _urllib2.HTTPPasswordMgrWithDefaultRealm()
if ("_proxy_basicauth" in ua_handlers or
"_proxy_digestauth" in ua_handlers):
ppm = _auth.HTTPProxyPasswordMgr()
self.set_password_manager(pm)
self.set_proxy_password_manager(ppm)
# set default certificate manager
if "https" in ua_handlers:
cm = _urllib2.HTTPSClientCertMgr()
self.set_client_cert_manager(cm)
def close(self):
_opener.OpenerDirector.close(self)
self._ua_handlers = None
# XXX
## def set_timeout(self, timeout):
## self._timeout = timeout
## def set_http_connection_cache(self, conn_cache):
## self._http_conn_cache = conn_cache
## def set_ftp_connection_cache(self, conn_cache):
## # XXX ATM, FTP has cache as part of handler; should it be separate?
## self._ftp_conn_cache = conn_cache
def set_handled_schemes(self, schemes):
"""Set sequence of URL scheme (protocol) strings.
For example: ua.set_handled_schemes(["http", "ftp"])
If this fails (with ValueError) because you've passed an unknown
scheme, the set of handled schemes will not be changed.
"""
want = {}
for scheme in schemes:
if scheme.startswith("_"):
raise ValueError("not a scheme '%s'" % scheme)
if scheme not in self.handler_classes:
raise ValueError("unknown scheme '%s'")
want[scheme] = None
# get rid of scheme handlers we don't want
for scheme, oldhandler in self._ua_handlers.items():
if scheme.startswith("_"): continue # not a scheme handler
if scheme not in want:
self._replace_handler(scheme, None)
else:
del want[scheme] # already got it
# add the scheme handlers that are missing
for scheme in want.keys():
self._set_handler(scheme, True)
def set_cookiejar(self, cookiejar):
"""Set a mechanize.CookieJar, or None."""
self._set_handler("_cookies", obj=cookiejar)
# XXX could use Greg Stein's httpx for some of this instead?
# or httplib2??
def set_proxies(self, proxies):
"""Set a dictionary mapping URL scheme to proxy specification, or None.
e.g. {"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com"}
"""
self._set_handler("_proxy", obj=proxies)
def add_password(self, url, user, password, realm=None):
self._password_manager.add_password(realm, url, user, password)
def add_proxy_password(self, user, password, hostport=None, realm=None):
self._proxy_password_manager.add_password(
realm, hostport, user, password)
def add_client_certificate(self, url, key_file, cert_file):
"""Add an SSL client certificate, for HTTPS client auth.
key_file and cert_file must be filenames of the key and certificate
files, in PEM format. You can use e.g. OpenSSL to convert a p12 (PKCS
12) file to PEM format:
openssl pkcs12 -clcerts -nokeys -in cert.p12 -out cert.pem
openssl pkcs12 -nocerts -in cert.p12 -out key.pem
Note that client certificate password input is very inflexible ATM. At
the moment this seems to be console only, which is presumably the
default behaviour of libopenssl. In future mechanize may support
third-party libraries that (I assume) allow more options here.
"""
self._client_cert_manager.add_key_cert(url, key_file, cert_file)
# the following are rarely useful -- use add_password / add_proxy_password
# instead
def set_password_manager(self, password_manager):
"""Set a mechanize.HTTPPasswordMgrWithDefaultRealm, or None."""
self._password_manager = password_manager
self._set_handler("_basicauth", obj=password_manager)
self._set_handler("_digestauth", obj=password_manager)
def set_proxy_password_manager(self, password_manager):
"""Set a mechanize.HTTPProxyPasswordMgr, or None."""
self._proxy_password_manager = password_manager
self._set_handler("_proxy_basicauth", obj=password_manager)
self._set_handler("_proxy_digestauth", obj=password_manager)
def set_client_cert_manager(self, cert_manager):
"""Set a mechanize.HTTPClientCertMgr, or None."""
self._client_cert_manager = cert_manager
handler = self._ua_handlers["https"]
handler.client_cert_manager = cert_manager
# these methods all take a boolean parameter
def set_handle_robots(self, handle):
"""Set whether to observe rules from robots.txt."""
self._set_handler("_robots", handle)
def set_handle_redirect(self, handle):
"""Set whether to handle HTTP 30x redirections."""
self._set_handler("_redirect", handle)
def set_handle_refresh(self, handle, max_time=30.0, honor_time=False):
"""Set whether to handle HTTP Refresh headers."""
self._set_handler("_refresh", handle, constructor_kwds=
{"max_time": max_time, "honor_time": honor_time})
def set_handle_equiv(self, handle, head_parser_class=None):
"""Set whether to treat HTML http-equiv headers like HTTP headers.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
if head_parser_class is not None:
constructor_kwds = {"head_parser_class": head_parser_class}
else:
constructor_kwds={}
self._set_handler("_equiv", handle, constructor_kwds=constructor_kwds)
def set_handle_gzip(self, handle):
"""Handle gzip transfer encoding.
"""
if handle:
warnings.warn(
"gzip transfer encoding is experimental!", stacklevel=2)
self._set_handler("_gzip", handle)
def set_debug_redirects(self, handle):
"""Log information about HTTP redirects (including refreshes).
Logging is performed using module logging. The logger name is
"mechanize.http_redirects". To actually print some debug output,
eg:
import sys, logging
logger = logging.getLogger("mechanize.http_redirects")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
Other logger names relevant to this module:
"mechanize.http_responses"
"mechanize.cookies" (or "cookielib" if running Python 2.4)
To turn on everything:
import sys, logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
"""
self._set_handler("_debug_redirect", handle)
def set_debug_responses(self, handle):
"""Log HTTP response bodies.
See docstring for .set_debug_redirects() for details of logging.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
self._set_handler("_debug_response_body", handle)
def set_debug_http(self, handle):
"""Print HTTP headers to sys.stdout."""
level = int(bool(handle))
for scheme in "http", "https":
h = self._ua_handlers.get(scheme)
if h is not None:
h.set_http_debuglevel(level)
def _set_handler(self, name, handle=None, obj=None,
constructor_args=(), constructor_kwds={}):
if handle is None:
handle = obj is not None
if handle:
handler_class = self.handler_classes[name]
if obj is not None:
newhandler = handler_class(obj)
else:
newhandler = handler_class(
*constructor_args, **constructor_kwds)
else:
newhandler = None
self._replace_handler(name, newhandler)
def _replace_handler(self, name, newhandler=None):
# first, if handler was previously added, remove it
if name is not None:
handler = self._ua_handlers.get(name)
if handler:
try:
self.handlers.remove(handler)
except ValueError:
pass
# then add the replacement, if any
if newhandler is not None:
self.add_handler(newhandler)
self._ua_handlers[name] = newhandler
class UserAgent(UserAgentBase):
def __init__(self):
UserAgentBase.__init__(self)
self._seekable = False
def set_seekable_responses(self, handle):
"""Make response objects .seek()able."""
self._seekable = bool(handle)
def open(self, fullurl, data=None):
if self._seekable:
def bound_open(fullurl, data=None):
return UserAgentBase.open(self, fullurl, data)
response = _opener.wrapped_open(
bound_open, _response.seek_wrapped_response, fullurl, data)
else:
response = UserAgentBase.open(self, fullurl, data)
return response
diff --git a/mechanize/_util.py b/mechanize/_util.py
index 3516e82..ef34af2 100644
--- a/mechanize/_util.py
+++ b/mechanize/_util.py
@@ -1,280 +1,280 @@
"""Utility functions and date/time routines.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
-import re, string, time, warnings
+import re, time, warnings
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def hide_deprecations():
warnings.filterwarnings('ignore', category=DeprecationWarning)
def reset_deprecations():
warnings.filterwarnings('default', category=DeprecationWarning)
def isstringlike(x):
try: x+""
except: return False
else: return True
## def caller():
## try:
## raise SyntaxError
## except:
## import sys
## return sys.exc_traceback.tb_frame.f_back.f_back.f_code.co_name
from calendar import timegm
# Date/time conversion routines for formats used by the HTTP protocol.
EPOCH = 1970
def my_timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
months_lower = []
for month in months: months_lower.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
days[wday], mday, months[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = months_lower.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = my_timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
wkday_re = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
loose_http_re = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = strict_re.search(text)
if m:
g = m.groups()
mon = months_lower.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return my_timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = wkday_re.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = loose_http_re.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
iso_re = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = iso_re.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
diff --git a/test.py b/test.py
index a23254b..31d9363 100755
--- a/test.py
+++ b/test.py
@@ -1,149 +1,159 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
]
import sys, os, logging, glob
#level = logging.DEBUG
#level = logging.INFO
#level = logging.WARNING
#level = logging.NOTSET
#logging.getLogger("mechanize").setLevel(level)
#logging.getLogger("mechanize").addHandler(logging.StreamHandler(sys.stdout))
if __name__ == "__main__":
# XXX
# temporary stop-gap to run doctests &c.
# should switch to nose or something
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
import testprogram
if run_coverage:
import coverage
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
pm_doctest_filename = os.path.join(
"test", "test_password_manager.doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
doctest.testfile(
pm_doctest_filename,
#os.path.join("test", "test_scratch.doctest"),
globs=globs,
)
+ try:
+ import robotparser
+ except ImportError:
+ pass
+ else:
+ doctest.testfile(os.path.join("test",
+ "test_robotfileparser.doctest"))
# run .doctest files
special_doctests = [pm_doctest_filename,
os.path.join("test", "test_scratch.doctest"),
+ os.path.join("test",
+ "test_robotfileparser.doctest"),
]
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
for dt in special_doctests:
if dt in doctest_files:
doctest_files.remove(dt)
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
- _http, _rfc3986
+ _http, _rfc3986, _useragent
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
+ doctest.testmod(_useragent)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
test_runner = testprogram.CgitbTextTestRunner()
prog = testprogram.TestProgram(
MODULE_NAMES,
testRunner=test_runner,
localServerProcess=testprogram.TwistedServerProcess(),
)
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
diff --git a/test/test_html.doctest b/test/test_html.doctest
index 4355c10..476951b 100644
--- a/test/test_html.doctest
+++ b/test/test_html.doctest
@@ -1,253 +1,262 @@
>>> import mechanize
>>> from mechanize._response import test_html_response
>>> from mechanize._html import LinksFactory, FormsFactory, TitleFactory, \
... MechanizeBs, \
... RobustLinksFactory, RobustFormsFactory, RobustTitleFactory
mechanize.ParseError should be raised on parsing erroneous HTML.
For backwards compatibility, mechanize.ParseError derives from
exception classes that mechanize used to raise, prior to version
0.1.6.
>>> import sgmllib
>>> import HTMLParser
>>> import ClientForm
>>> issubclass(mechanize.ParseError, sgmllib.SGMLParseError)
True
>>> issubclass(mechanize.ParseError, HTMLParser.HTMLParseError)
True
>>> issubclass(mechanize.ParseError, ClientForm.ParseError)
True
>>> def create_response(error=True):
... extra = ""
... if error:
... extra = "<!!!>"
... html = """\
... <html>
... <head>
... <title>Title</title>
... %s
... </head>
... <body>
... <p>Hello world
... </body>
... </html>
... """ % extra
... return test_html_response(html)
>>> f = LinksFactory()
>>> f.set_response(create_response(), "http://example.com", "latin-1")
>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = FormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = TitleFactory()
>>> f.set_response(create_response(), "latin-1")
>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
Accessing attributes on Factory may also raise ParseError
>>> def factory_getattr(attr_name):
... fact = mechanize.DefaultFactory()
... fact.set_response(create_response())
... getattr(fact, attr_name)
>>> factory_getattr("title") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> factory_getattr("global_form") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
BeautifulSoup ParseErrors:
XXX If I could come up with examples that break links and forms
parsing, I'd uncomment these!
>>> def create_soup(html):
... r = test_html_response(html)
... return MechanizeBs("latin-1", r.read())
#>>> f = RobustLinksFactory()
#>>> html = """\
#... <a href="a">
#... <frame src="b">
#... <a href="c">
#... <iframe src="d">
#... </a>
#... </area>
#... </frame>
#... """
#>>> f.set_soup(create_soup(html), "http://example.com", "latin-1")
#>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
>>> html = """\
... <table>
... <tr><td>
... <input name='broken'>
... </td>
... </form>
... </tr>
... </form>
... """
>>> f = RobustFormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
#>>> f = RobustTitleFactory()
#>>> f.set_soup(create_soup(""), "latin-1")
#>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
Utility class for caching forms etc.
>>> from mechanize._html import CachingGeneratorFunction
>>> i = [1]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1]
>>> list(func())
[1]
>>> i = [1, 2, 3]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1, 2, 3]
>>> i = func()
>>> i.next()
1
>>> i.next()
2
>>> i.next()
3
>>> i = func()
>>> j = func()
>>> i.next()
1
>>> j.next()
1
>>> i.next()
2
>>> j.next()
2
>>> j.next()
3
>>> i.next()
3
>>> i.next()
Traceback (most recent call last):
...
StopIteration
>>> j.next()
Traceback (most recent call last):
...
StopIteration
Link text parsing
>>> def get_first_link_text_bs(html):
... factory = RobustLinksFactory()
... soup = MechanizeBs("utf-8", html)
... factory.set_soup(soup, "http://example.com/", "utf-8")
... return list(factory.links())[0].text
>>> def get_first_link_text_sgmllib(html):
... factory = LinksFactory()
... response = test_html_response(html)
... factory.set_response(response, "http://example.com/", "utf-8")
... return list(factory.links())[0].text
Whitespace gets compressed down to single spaces. Tags are removed.
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><a href="http://example.com/">The quick\tbrown fox jumps
... over the <i><b>lazy</b></i> dog </a>
... </body></html>
... """)
>>> get_first_link_text_bs(html)
'The quick brown fox jumps over the lazy dog'
>>> get_first_link_text_sgmllib(html)
'The quick brown fox jumps over the lazy dog'
Empty <a> links have empty link text
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><a href="http://example.com/"></a>
... </body></html>
... """)
>>> get_first_link_text_bs(html)
''
>>> get_first_link_text_sgmllib(html)
''
But for backwards-compatibility, empty non-<a> links have None link text
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><frame src="http://example.com/"></frame>
... </body></html>
... """)
>>> print get_first_link_text_bs(html)
None
>>> print get_first_link_text_sgmllib(html)
None
Title parsing. We follow Firefox's behaviour with regard to child
elements (haven't tested IE).
>>> def get_title_bs(html):
... factory = RobustTitleFactory()
... soup = MechanizeBs("utf-8", html)
... factory.set_soup(soup, "utf-8")
... return factory.title()
>>> def get_title_sgmllib(html):
... factory = TitleFactory()
... response = test_html_response(html)
... factory.set_response(response, "utf-8")
... return factory.title()
>>> html = ("""\
... <html><head>
... <title>Title</title>
... </head><body><p>Blah.<p></body></html>
... """)
>>> get_title_bs(html)
'Title'
>>> get_title_sgmllib(html)
'Title'
>>> html = ("""\
... <html><head>
... <title> Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script>
... tle &&
... </title>
... </head><body><p>Blah.<p></body></html>
... """)
>>> get_title_bs(html)
'Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script> tle &&'
>>> get_title_sgmllib(html)
'Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script> tle &&'
+
+
+No more tags after <title> used to cause an exception
+
+>>> html = ("""\
+... <html><head>
+... <title>""")
+>>> get_title_sgmllib(html)
+''
diff --git a/test/test_robotfileparser.doctest b/test/test_robotfileparser.doctest
new file mode 100644
index 0000000..0939900
--- /dev/null
+++ b/test/test_robotfileparser.doctest
@@ -0,0 +1,8 @@
+>>> from mechanize._http import MechanizeRobotFileParser
+
+Calling .set_opener() without args sets a default opener.
+
+>>> rfp = MechanizeRobotFileParser()
+>>> rfp.set_opener()
+>>> rfp._opener # doctest: +ELLIPSIS
+<mechanize._opener.OpenerDirector instance at ...>
|
Almad/Mechanize
|
f679d9867e81162749c0a15d602ad74bccc5d029
|
Fix some too-long lines.
|
diff --git a/mechanize/_clientcookie.py b/mechanize/_clientcookie.py
index e8f0f67..ea9e453 100644
--- a/mechanize/_clientcookie.py
+++ b/mechanize/_clientcookie.py
@@ -1,1333 +1,1334 @@
"""HTTP cookie handling for web clients.
This module originally developed from my port of Gisle Aas' Perl module
HTTP::Cookies, from the libwww-perl library.
Docstrings, comments and debug strings in this code refer to the
attributes of the HTTP cookie system as cookie-attributes, to distinguish
them clearly from Python attributes.
CookieJar____
/ \ \
FileCookieJar \ \
/ | \ \ \
MozillaCookieJar | LWPCookieJar \ \
| | \
| ---MSIEBase | \
| / | | \
| / MSIEDBCookieJar BSDDBCookieJar
|/
MSIECookieJar
Comments to John J Lee <[email protected]>.
Copyright 2002-2006 John J Lee <[email protected]>
Copyright 1997-1999 Gisle Aas (original libwww-perl code)
Copyright 2002-2003 Johnny Lee (original MSIE Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import sys, re, copy, time, struct, urllib, types, logging
try:
import threading
_threading = threading; del threading
except ImportError:
import dummy_threading
_threading = dummy_threading; del dummy_threading
import httplib # only for the default HTTP port
MISSING_FILENAME_TEXT = ("a filename was not supplied (nor was the CookieJar "
"instance initialised with one)")
DEFAULT_HTTP_PORT = str(httplib.HTTP_PORT)
from _headersutil import split_header_words, parse_ns_headers
from _util import isstringlike
import _rfc3986
debug = logging.getLogger("mechanize.cookies").debug
def reraise_unmasked_exceptions(unmasked=()):
# There are a few catch-all except: statements in this module, for
# catching input that's bad in unexpected ways.
# This function re-raises some exceptions we don't want to trap.
import mechanize, warnings
if not mechanize.USE_BARE_EXCEPT:
raise
unmasked = unmasked + (KeyboardInterrupt, SystemExit, MemoryError)
etype = sys.exc_info()[0]
if issubclass(etype, unmasked):
raise
# swallowed an exception
import traceback, StringIO
f = StringIO.StringIO()
traceback.print_exc(None, f)
msg = f.getvalue()
warnings.warn("mechanize bug!\n%s" % msg, stacklevel=2)
IPV4_RE = re.compile(r"\.\d+$")
def is_HDN(text):
"""Return True if text is a host domain name."""
# XXX
# This may well be wrong. Which RFC is HDN defined in, if any (for
# the purposes of RFC 2965)?
# For the current implementation, what about IPv6? Remember to look
# at other uses of IPV4_RE also, if change this.
return not (IPV4_RE.search(text) or
text == "" or
text[0] == "." or text[-1] == ".")
def domain_match(A, B):
"""Return True if domain A domain-matches domain B, according to RFC 2965.
A and B may be host domain names or IP addresses.
RFC 2965, section 1:
Host names can be specified either as an IP address or a HDN string.
Sometimes we compare one host name with another. (Such comparisons SHALL
be case-insensitive.) Host A's name domain-matches host B's if
* their host name strings string-compare equal; or
* A is a HDN string and has the form NB, where N is a non-empty
name string, B has the form .B', and B' is a HDN string. (So,
x.y.com domain-matches .Y.com but not Y.com.)
Note that domain-match is not a commutative operation: a.b.c.com
domain-matches .c.com, but not the reverse.
"""
# Note that, if A or B are IP addresses, the only relevant part of the
# definition of the domain-match algorithm is the direct string-compare.
A = A.lower()
B = B.lower()
if A == B:
return True
if not is_HDN(A):
return False
i = A.rfind(B)
has_form_nb = not (i == -1 or i == 0)
return (
has_form_nb and
B.startswith(".") and
is_HDN(B[1:])
)
def liberal_is_HDN(text):
"""Return True if text is a sort-of-like a host domain name.
For accepting/blocking domains.
"""
return not IPV4_RE.search(text)
def user_domain_match(A, B):
"""For blocking/accepting domains.
A and B may be host domain names or IP addresses.
"""
A = A.lower()
B = B.lower()
if not (liberal_is_HDN(A) and liberal_is_HDN(B)):
if A == B:
# equal IP addresses
return True
return False
initial_dot = B.startswith(".")
if initial_dot and A.endswith(B):
return True
if not initial_dot and A == B:
return True
return False
cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = _rfc3986.urlsplit(url)[1]
if host is None:
host = request.get_header("Host", "")
# remove port, if present
host = cut_port_re.sub("", host, 1)
return host.lower()
def eff_request_host(request):
"""Return a tuple (request-host, effective request-host name).
As defined by RFC 2965, except both are lowercased.
"""
erhn = req_host = request_host(request)
if req_host.find(".") == -1 and not IPV4_RE.search(req_host):
erhn = req_host + ".local"
return req_host, erhn
def request_path(request):
"""request-URI, as defined by RFC 2965."""
url = request.get_full_url()
path, query, frag = _rfc3986.urlsplit(url)[2:]
path = escape_path(path)
req_path = _rfc3986.urlunsplit((None, None, path, query, frag))
if not req_path.startswith("/"):
req_path = "/"+req_path
return req_path
def request_port(request):
host = request.get_host()
i = host.find(':')
if i >= 0:
port = host[i+1:]
try:
int(port)
except ValueError:
debug("nonnumeric port: '%s'", port)
return None
else:
port = DEFAULT_HTTP_PORT
return port
# Characters in addition to A-Z, a-z, 0-9, '_', '.', and '-' that don't
# need to be escaped to form a valid HTTP URL (RFCs 2396 and 1738).
HTTP_PATH_SAFE = "%/;:@&=+$,!~*'()"
ESCAPED_CHAR_RE = re.compile(r"%([0-9a-fA-F][0-9a-fA-F])")
def uppercase_escaped_char(match):
return "%%%s" % match.group(1).upper()
def escape_path(path):
"""Escape any invalid characters in HTTP URL, and uppercase all escapes."""
# There's no knowing what character encoding was used to create URLs
# containing %-escapes, but since we have to pick one to escape invalid
# path characters, we pick UTF-8, as recommended in the HTML 4.0
# specification:
# http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.2.1
# And here, kind of: draft-fielding-uri-rfc2396bis-03
# (And in draft IRI specification: draft-duerst-iri-05)
# (And here, for new URI schemes: RFC 2718)
if isinstance(path, types.UnicodeType):
path = path.encode("utf-8")
path = urllib.quote(path, HTTP_PATH_SAFE)
path = ESCAPED_CHAR_RE.sub(uppercase_escaped_char, path)
return path
def reach(h):
"""Return reach of host h, as defined by RFC 2965, section 1.
The reach R of a host name H is defined as follows:
* If
- H is the host domain name of a host; and,
- H has the form A.B; and
- A has no embedded (that is, interior) dots; and
- B has at least one embedded dot, or B is the string "local".
then the reach of H is .B.
* Otherwise, the reach of H is H.
>>> reach("www.acme.com")
'.acme.com'
>>> reach("acme.com")
'acme.com'
>>> reach("acme.local")
'.local'
"""
i = h.find(".")
if i >= 0:
#a = h[:i] # this line is only here to show what a is
b = h[i+1:]
i = b.find(".")
if is_HDN(h) and (i >= 0 or b == "local"):
return "."+b
return h
def is_third_party(request):
"""
RFC 2965, section 3.3.6:
An unverifiable transaction is to a third-party host if its request-
host U does not domain-match the reach R of the request-host O in the
origin transaction.
"""
req_host = request_host(request)
# the origin request's request-host was stuffed into request by
# _urllib2_support.AbstractHTTPHandler
return not domain_match(req_host, reach(request.origin_req_host))
class Cookie:
"""HTTP Cookie.
This class represents both Netscape and RFC 2965 cookies.
This is deliberately a very simple class. It just holds attributes. It's
possible to construct Cookie instances that don't comply with the cookie
standards. CookieJar.make_cookies is the factory function for Cookie
objects -- it deals with cookie parsing, supplying defaults, and
normalising to the representation used in this class. CookiePolicy is
responsible for checking them to see whether they should be accepted from
and returned to the server.
version: integer;
name: string;
value: string (may be None);
port: string; None indicates no attribute was supplied (eg. "Port", rather
than eg. "Port=80"); otherwise, a port string (eg. "80") or a port list
string (eg. "80,8080")
port_specified: boolean; true if a value was supplied with the Port
cookie-attribute
domain: string;
domain_specified: boolean; true if Domain was explicitly set
domain_initial_dot: boolean; true if Domain as set in HTTP header by server
started with a dot (yes, this really is necessary!)
path: string;
path_specified: boolean; true if Path was explicitly set
secure: boolean; true if should only be returned over secure connection
expires: integer; seconds since epoch (RFC 2965 cookies should calculate
this value from the Max-Age attribute)
discard: boolean, true if this is a session cookie; (if no expires value,
this should be true)
comment: string;
comment_url: string;
rfc2109: boolean; true if cookie arrived in a Set-Cookie: (not
Set-Cookie2:) header, but had a version cookie-attribute of 1
rest: mapping of other cookie-attributes
Note that the port may be present in the headers, but unspecified ("Port"
rather than"Port=80", for example); if this is the case, port is None.
"""
def __init__(self, version, name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest,
rfc2109=False,
):
if version is not None: version = int(version)
if expires is not None: expires = int(expires)
if port is None and port_specified is True:
raise ValueError("if port is None, port_specified must be false")
self.version = version
self.name = name
self.value = value
self.port = port
self.port_specified = port_specified
# normalise case, as per RFC 2965 section 3.3.3
self.domain = domain.lower()
self.domain_specified = domain_specified
# Sigh. We need to know whether the domain given in the
# cookie-attribute had an initial dot, in order to follow RFC 2965
# (as clarified in draft errata). Needed for the returned $Domain
# value.
self.domain_initial_dot = domain_initial_dot
self.path = path
self.path_specified = path_specified
self.secure = secure
self.expires = expires
self.discard = discard
self.comment = comment
self.comment_url = comment_url
self.rfc2109 = rfc2109
self._rest = copy.copy(rest)
def has_nonstandard_attr(self, name):
return self._rest.has_key(name)
def get_nonstandard_attr(self, name, default=None):
return self._rest.get(name, default)
def set_nonstandard_attr(self, name, value):
self._rest[name] = value
def nonstandard_attr_keys(self):
return self._rest.keys()
def is_expired(self, now=None):
if now is None: now = time.time()
return (self.expires is not None) and (self.expires <= now)
def __str__(self):
if self.port is None: p = ""
else: p = ":"+self.port
limit = self.domain + p + self.path
if self.value is not None:
namevalue = "%s=%s" % (self.name, self.value)
else:
namevalue = self.name
return "<Cookie %s for %s>" % (namevalue, limit)
def __repr__(self):
args = []
for name in ["version", "name", "value",
"port", "port_specified",
"domain", "domain_specified", "domain_initial_dot",
"path", "path_specified",
"secure", "expires", "discard", "comment", "comment_url",
]:
attr = getattr(self, name)
args.append("%s=%s" % (name, repr(attr)))
args.append("rest=%s" % repr(self._rest))
args.append("rfc2109=%s" % repr(self.rfc2109))
return "Cookie(%s)" % ", ".join(args)
class CookiePolicy:
"""Defines which cookies get accepted from and returned to server.
May also modify cookies.
The subclass DefaultCookiePolicy defines the standard rules for Netscape
and RFC 2965 cookies -- override that if you want a customised policy.
As well as implementing set_ok and return_ok, implementations of this
interface must also supply the following attributes, indicating which
protocols should be used, and how. These can be read and set at any time,
though whether that makes complete sense from the protocol point of view is
doubtful.
Public attributes:
netscape: implement netscape protocol
rfc2965: implement RFC 2965 protocol
rfc2109_as_netscape:
WARNING: This argument will change or go away if is not accepted into
the Python standard library in this form!
If true, treat RFC 2109 cookies as though they were Netscape cookies. The
default is for this attribute to be None, which means treat 2109 cookies
as RFC 2965 cookies unless RFC 2965 handling is switched off (which it is,
by default), and as Netscape cookies otherwise.
hide_cookie2: don't add Cookie2 header to requests (the presence of
this header indicates to the server that we understand RFC 2965
cookies)
"""
def set_ok(self, cookie, request):
"""Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.extract_cookies.__doc__
"""
raise NotImplementedError()
def return_ok(self, cookie, request):
"""Return true if (and only if) cookie should be returned to server.
cookie: mechanize.Cookie object
request: object implementing the interface defined by
CookieJar.add_cookie_header.__doc__
"""
raise NotImplementedError()
def domain_return_ok(self, domain, request):
"""Return false if cookies should not be returned, given cookie domain.
This is here as an optimization, to remove the need for checking every
cookie with a particular domain (which may involve reading many files).
The default implementations of domain_return_ok and path_return_ok
(return True) leave all the work to return_ok.
If domain_return_ok returns true for the cookie domain, path_return_ok
is called for the cookie path. Otherwise, path_return_ok and return_ok
are never called for that cookie domain. If path_return_ok returns
true, return_ok is called with the Cookie object itself for a full
check. Otherwise, return_ok is never called for that cookie path.
Note that domain_return_ok is called for every *cookie* domain, not
just for the *request* domain. For example, the function might be
- called with both ".acme.com" and "www.acme.com" if the request domain is
- "www.acme.com". The same goes for path_return_ok.
+ called with both ".acme.com" and "www.acme.com" if the request domain
+ is "www.acme.com". The same goes for path_return_ok.
For argument documentation, see the docstring for return_ok.
"""
return True
def path_return_ok(self, path, request):
"""Return false if cookies should not be returned, given cookie path.
See the docstring for domain_return_ok.
"""
return True
class DefaultCookiePolicy(CookiePolicy):
"""Implements the standard rules for accepting and returning cookies.
Both RFC 2965 and Netscape cookies are covered. RFC 2965 handling is
switched off by default.
The easiest way to provide your own policy is to override this class and
call its methods in your overriden implementations before adding your own
additional checks.
import mechanize
class MyCookiePolicy(mechanize.DefaultCookiePolicy):
def set_ok(self, cookie, request):
if not mechanize.DefaultCookiePolicy.set_ok(
self, cookie, request):
return False
if i_dont_want_to_store_this_cookie():
return False
return True
In addition to the features required to implement the CookiePolicy
interface, this class allows you to block and allow domains from setting
and receiving cookies. There are also some strictness switches that allow
you to tighten up the rather loose Netscape protocol rules a little bit (at
the cost of blocking some benign cookies).
A domain blacklist and whitelist is provided (both off by default). Only
domains not in the blacklist and present in the whitelist (if the whitelist
is active) participate in cookie setting and returning. Use the
blocked_domains constructor argument, and blocked_domains and
set_blocked_domains methods (and the corresponding argument and methods for
allowed_domains). If you set a whitelist, you can turn it off again by
setting it to None.
Domains in block or allow lists that do not start with a dot must
string-compare equal. For example, "acme.com" matches a blacklist entry of
"acme.com", but "www.acme.com" does not. Domains that do start with a dot
are matched by more specific domains too. For example, both "www.acme.com"
and "www.munitions.acme.com" match ".acme.com" (but "acme.com" itself does
not). IP addresses are an exception, and must match exactly. For example,
if blocked_domains contains "192.168.1.2" and ".168.1.2" 192.168.1.2 is
blocked, but 193.168.1.2 is not.
Additional Public Attributes:
General strictness switches
strict_domain: don't allow sites to set two-component domains with
country-code top-level domains like .co.uk, .gov.uk, .co.nz. etc.
This is far from perfect and isn't guaranteed to work!
RFC 2965 protocol strictness switches
strict_rfc2965_unverifiable: follow RFC 2965 rules on unverifiable
transactions (usually, an unverifiable transaction is one resulting from
a redirect or an image hosted on another site); if this is false, cookies
are NEVER blocked on the basis of verifiability
Netscape protocol strictness switches
strict_ns_unverifiable: apply RFC 2965 rules on unverifiable transactions
even to Netscape cookies
strict_ns_domain: flags indicating how strict to be with domain-matching
rules for Netscape cookies:
DomainStrictNoDots: when setting cookies, host prefix must not contain a
dot (eg. www.foo.bar.com can't set a cookie for .bar.com, because
www.foo contains a dot)
DomainStrictNonDomain: cookies that did not explicitly specify a Domain
cookie-attribute can only be returned to a domain that string-compares
equal to the domain that set the cookie (eg. rockets.acme.com won't
be returned cookies from acme.com that had no Domain cookie-attribute)
DomainRFC2965Match: when setting cookies, require a full RFC 2965
domain-match
DomainLiberal and DomainStrict are the most useful combinations of the
above flags, for convenience
strict_ns_set_initial_dollar: ignore cookies in Set-Cookie: headers that
have names starting with '$'
strict_ns_set_path: don't allow setting cookies whose path doesn't
path-match request URI
"""
DomainStrictNoDots = 1
DomainStrictNonDomain = 2
DomainRFC2965Match = 4
DomainLiberal = 0
DomainStrict = DomainStrictNoDots|DomainStrictNonDomain
def __init__(self,
blocked_domains=None, allowed_domains=None,
netscape=True, rfc2965=False,
# WARNING: this argument will change or go away if is not
# accepted into the Python standard library in this form!
# default, ie. treat 2109 as netscape iff not rfc2965
rfc2109_as_netscape=None,
hide_cookie2=False,
strict_domain=False,
strict_rfc2965_unverifiable=True,
strict_ns_unverifiable=False,
strict_ns_domain=DomainLiberal,
strict_ns_set_initial_dollar=False,
strict_ns_set_path=False,
):
"""
Constructor arguments should be used as keyword arguments only.
blocked_domains: sequence of domain names that we never accept cookies
from, nor return cookies to
allowed_domains: if not None, this is a sequence of the only domains
for which we accept and return cookies
For other arguments, see CookiePolicy.__doc__ and
DefaultCookiePolicy.__doc__..
"""
self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if blocked_domains is not None:
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def blocked_domains(self):
"""Return the sequence of blocked domains (as a tuple)."""
return self._blocked_domains
def set_blocked_domains(self, blocked_domains):
"""Set the sequence of blocked domains."""
self._blocked_domains = tuple(blocked_domains)
def is_blocked(self, domain):
for blocked_domain in self._blocked_domains:
if user_domain_match(domain, blocked_domain):
return True
return False
def allowed_domains(self):
"""Return None, or the sequence of allowed domains (as a tuple)."""
return self._allowed_domains
def set_allowed_domains(self, allowed_domains):
"""Set the sequence of allowed domains, or None."""
if allowed_domains is not None:
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
def is_not_allowed(self, domain):
if self._allowed_domains is None:
return False
for allowed_domain in self._allowed_domains:
if user_domain_match(domain, allowed_domain):
return False
return True
def set_ok(self, cookie, request):
"""
If you override set_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).
"""
debug(" - checking cookie %s", cookie)
assert cookie.name is not None
for n in "version", "verifiability", "name", "path", "domain", "port":
fn_name = "set_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def set_ok_version(self, cookie, request):
if cookie.version is None:
# Version is always set to 0 by parse_ns_headers if it's a Netscape
# cookie, so this must be an invalid RFC 2965 cookie.
debug(" Set-Cookie2 without version attribute (%s)", cookie)
return False
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def set_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during "
"unverifiable transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during "
"unverifiable transaction")
return False
return True
def set_ok_name(self, cookie, request):
# Try and stop servers setting V0 cookies designed to hack other
# servers that know both V0 and V1 protocols.
if (cookie.version == 0 and self.strict_ns_set_initial_dollar and
cookie.name.startswith("$")):
debug(" illegal name (starts with '$'): '%s'", cookie.name)
return False
return True
def set_ok_path(self, cookie, request):
if cookie.path_specified:
req_path = request_path(request)
if ((cookie.version > 0 or
(cookie.version == 0 and self.strict_ns_set_path)) and
not req_path.startswith(cookie.path)):
debug(" path attribute %s is not a prefix of request "
"path %s", cookie.path, req_path)
return False
return True
def set_ok_countrycode_domain(self, cookie, request):
"""Return False if explicit cookie domain is not acceptable.
Called by set_ok_domain, for convenience of overriding by
subclasses.
"""
if cookie.domain_specified and self.strict_domain:
domain = cookie.domain
# since domain was specified, we know that:
assert domain.startswith(".")
if domain.count(".") == 2:
# domain like .foo.bar
i = domain.rfind(".")
tld = domain[i+1:]
sld = domain[1:i]
if (sld.lower() in [
"co", "ac",
"com", "edu", "org", "net", "gov", "mil", "int",
"aero", "biz", "cat", "coop", "info", "jobs", "mobi",
"museum", "name", "pro", "travel",
] and
len(tld) == 2):
# domain like .co.uk
return False
return True
def set_ok_domain(self, cookie, request):
if self.is_blocked(cookie.domain):
debug(" domain %s is in user block-list", cookie.domain)
return False
if self.is_not_allowed(cookie.domain):
debug(" domain %s is not in user allow-list", cookie.domain)
return False
if not self.set_ok_countrycode_domain(cookie, request):
debug(" country-code second level domain %s", cookie.domain)
return False
if cookie.domain_specified:
req_host, erhn = eff_request_host(request)
domain = cookie.domain
if domain.startswith("."):
undotted_domain = domain[1:]
else:
undotted_domain = domain
embedded_dots = (undotted_domain.find(".") >= 0)
if not embedded_dots and domain != ".local":
debug(" non-local domain %s contains no embedded dot",
domain)
return False
if cookie.version == 0:
if (not erhn.endswith(domain) and
(not erhn.startswith(".") and
not ("."+erhn).endswith(domain))):
debug(" effective request-host %s (even with added "
"initial dot) does not end end with %s",
erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainRFC2965Match)):
if not domain_match(erhn, domain):
debug(" effective request-host %s does not domain-match "
"%s", erhn, domain)
return False
if (cookie.version > 0 or
(self.strict_ns_domain & self.DomainStrictNoDots)):
host_prefix = req_host[:-len(domain)]
if (host_prefix.find(".") >= 0 and
not IPV4_RE.search(req_host)):
debug(" host prefix %s for domain %s contains a dot",
host_prefix, domain)
return False
return True
def set_ok_port(self, cookie, request):
if cookie.port_specified:
req_port = request_port(request)
if req_port is None:
req_port = "80"
else:
req_port = str(req_port)
for p in cookie.port.split(","):
try:
int(p)
except ValueError:
debug(" bad port %s (not numeric)", p)
return False
if p == req_port:
break
else:
debug(" request port (%s) not found in %s",
req_port, cookie.port)
return False
return True
def return_ok(self, cookie, request):
"""
If you override return_ok, be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to return).
"""
# Path has already been checked by path_return_ok, and domain blocking
# done by domain_return_ok.
debug(" - checking cookie %s", cookie)
- for n in "version", "verifiability", "secure", "expires", "port", "domain":
+ for n in ("version", "verifiability", "secure", "expires", "port",
+ "domain"):
fn_name = "return_ok_"+n
fn = getattr(self, fn_name)
if not fn(cookie, request):
return False
return True
def return_ok_version(self, cookie, request):
if cookie.version > 0 and not self.rfc2965:
debug(" RFC 2965 cookies are switched off")
return False
elif cookie.version == 0 and not self.netscape:
debug(" Netscape cookies are switched off")
return False
return True
def return_ok_verifiability(self, cookie, request):
if request.unverifiable and is_third_party(request):
if cookie.version > 0 and self.strict_rfc2965_unverifiable:
debug(" third-party RFC 2965 cookie during unverifiable "
"transaction")
return False
elif cookie.version == 0 and self.strict_ns_unverifiable:
debug(" third-party Netscape cookie during unverifiable "
"transaction")
return False
return True
def return_ok_secure(self, cookie, request):
if cookie.secure and request.get_type() != "https":
debug(" secure cookie with non-secure request")
return False
return True
def return_ok_expires(self, cookie, request):
if cookie.is_expired(self._now):
debug(" cookie expired")
return False
return True
def return_ok_port(self, cookie, request):
if cookie.port:
req_port = request_port(request)
if req_port is None:
req_port = "80"
for p in cookie.port.split(","):
if p == req_port:
break
else:
debug(" request port %s does not match cookie port %s",
req_port, cookie.port)
return False
return True
def return_ok_domain(self, cookie, request):
req_host, erhn = eff_request_host(request)
domain = cookie.domain
# strict check of non-domain cookies: Mozilla does this, MSIE5 doesn't
if (cookie.version == 0 and
(self.strict_ns_domain & self.DomainStrictNonDomain) and
not cookie.domain_specified and domain != erhn):
debug(" cookie with unspecified domain does not string-compare "
"equal to request domain")
return False
if cookie.version > 0 and not domain_match(erhn, domain):
debug(" effective request-host name %s does not domain-match "
"RFC 2965 cookie domain %s", erhn, domain)
return False
if cookie.version == 0 and not ("."+erhn).endswith(domain):
debug(" request-host %s does not match Netscape cookie domain "
"%s", req_host, domain)
return False
return True
def domain_return_ok(self, domain, request):
# Liberal check of domain. This is here as an optimization to avoid
# having to load lots of MSIE cookie files unless necessary.
# Munge req_host and erhn to always start with a dot, so as to err on
# the side of letting cookies through.
dotted_req_host, dotted_erhn = eff_request_host(request)
if not dotted_req_host.startswith("."):
dotted_req_host = "."+dotted_req_host
if not dotted_erhn.startswith("."):
dotted_erhn = "."+dotted_erhn
if not (dotted_req_host.endswith(domain) or
dotted_erhn.endswith(domain)):
#debug(" request domain %s does not match cookie domain %s",
# req_host, domain)
return False
if self.is_blocked(domain):
debug(" domain %s is in user block-list", domain)
return False
if self.is_not_allowed(domain):
debug(" domain %s is not in user allow-list", domain)
return False
return True
def path_return_ok(self, path, request):
debug("- checking cookie path=%s", path)
req_path = request_path(request)
if not req_path.startswith(path):
debug(" %s does not path-match %s", req_path, path)
return False
return True
def vals_sorted_by_key(adict):
keys = adict.keys()
keys.sort()
return map(adict.get, keys)
class MappingIterator:
"""Iterates over nested mapping, depth-first, in sorted order by key."""
def __init__(self, mapping):
self._s = [(vals_sorted_by_key(mapping), 0, None)] # LIFO stack
def __iter__(self): return self
def next(self):
# this is hairy because of lack of generators
while 1:
try:
vals, i, prev_item = self._s.pop()
except IndexError:
raise StopIteration()
if i < len(vals):
item = vals[i]
i = i + 1
self._s.append((vals, i, prev_item))
try:
item.items
except AttributeError:
# non-mapping
break
else:
# mapping
self._s.append((vals_sorted_by_key(item), 0, item))
continue
return item
# Used as second parameter to dict.get method, to distinguish absent
# dict key from one with a None value.
class Absent: pass
class CookieJar:
"""Collection of HTTP cookies.
You may not need to know about this class: try mechanize.urlopen().
The major methods are extract_cookies and add_cookie_header; these are all
you are likely to need.
CookieJar supports the iterator protocol:
for cookie in cookiejar:
# do something with cookie
Methods:
add_cookie_header(request)
extract_cookies(response, request)
make_cookies(response, request)
set_cookie_if_ok(cookie, request)
set_cookie(cookie)
clear_session_cookies()
clear_expired_cookies()
clear(domain=None, path=None, name=None)
Public attributes
policy: CookiePolicy object
"""
non_word_re = re.compile(r"\W")
quote_re = re.compile(r"([\"\\])")
strict_domain_re = re.compile(r"\.?[^.]*")
domain_re = re.compile(r"[^.]*")
dots_re = re.compile(r"^\.+")
def __init__(self, policy=None):
"""
See CookieJar.__doc__ for argument documentation.
"""
if policy is None:
policy = DefaultCookiePolicy()
self._policy = policy
self._cookies = {}
# for __getitem__ iteration in pre-2.2 Pythons
self._prev_getitem_index = 0
def set_policy(self, policy):
self._policy = policy
def _cookies_for_domain(self, domain, request):
cookies = []
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
cookies_by_path = self._cookies[domain]
for path in cookies_by_path.keys():
if not self._policy.path_return_ok(path, request):
continue
cookies_by_name = cookies_by_path[path]
for cookie in cookies_by_name.values():
if not self._policy.return_ok(cookie, request):
debug(" not returning cookie")
continue
debug(" it's a match")
cookies.append(cookie)
return cookies
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookie_attrs(self, cookies):
"""Return a list of cookie-attributes to be returned to server.
like ['foo="bar"; $Path="/"', ...]
The $Version attribute is also added when appropriate (currently only
once per request).
"""
# add cookies in order of most specific (ie. longest) path first
def decreasing_size(a, b): return cmp(len(b.path), len(a.path))
cookies.sort(decreasing_size)
version_set = False
attrs = []
for cookie in cookies:
# set version of Cookie header
# XXX
# What should it be if multiple matching Set-Cookie headers have
# different versions themselves?
# Answer: there is no answer; was supposed to be settled by
# RFC 2965 errata, but that may never appear...
version = cookie.version
if not version_set:
version_set = True
if version > 0:
attrs.append("$Version=%s" % version)
# quote cookie value if necessary
# (not for Netscape protocol, which already has any quotes
# intact, due to the poorly-specified Netscape Cookie: syntax)
if ((cookie.value is not None) and
self.non_word_re.search(cookie.value) and version > 0):
value = self.quote_re.sub(r"\\\1", cookie.value)
else:
value = cookie.value
# add cookie-attributes to be returned in Cookie header
if cookie.value is None:
attrs.append(cookie.name)
else:
attrs.append("%s=%s" % (cookie.name, value))
if version > 0:
if cookie.path_specified:
attrs.append('$Path="%s"' % cookie.path)
if cookie.domain.startswith("."):
domain = cookie.domain
if (not cookie.domain_initial_dot and
domain.startswith(".")):
domain = domain[1:]
attrs.append('$Domain="%s"' % domain)
if cookie.port is not None:
p = "$Port"
if cookie.port_specified:
p = p + ('="%s"' % cookie.port)
attrs.append(p)
return attrs
def add_cookie_header(self, request):
"""Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.
The request object (usually a urllib2.Request instance) must support
the methods get_full_url, get_host, get_type, has_header, get_header,
header_items and add_unredirected_header, as documented by urllib2, and
the port attribute (the port number). Actually,
RequestUpgradeProcessor will automatically upgrade your Request object
to one with has_header, get_header, header_items and
add_unredirected_header, if it lacks those methods, for compatibility
with pre-2.4 versions of urllib2.
"""
debug("add_cookie_header")
self._policy._now = self._now = int(time.time())
req_host, erhn = eff_request_host(request)
strict_non_domain = (
self._policy.strict_ns_domain & self._policy.DomainStrictNonDomain)
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if not request.has_header("Cookie"):
request.add_unredirected_header("Cookie", "; ".join(attrs))
# if necessary, advertise that we know RFC 2965
if self._policy.rfc2965 and not self._policy.hide_cookie2:
for cookie in cookies:
if cookie.version != 1 and not request.has_header("Cookie2"):
request.add_unredirected_header("Cookie2", '$Version="1"')
break
self.clear_expired_cookies()
def _normalized_cookie_tuples(self, attrs_set):
"""Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.
"""
cookie_tuples = []
boolean_attrs = "discard", "secure"
value_attrs = ("version",
"expires", "max-age",
"domain", "path", "port",
"comment", "commenturl")
for cookie_attrs in attrs_set:
name, value = cookie_attrs[0]
# Build dictionary of standard cookie-attributes (standard) and
# dictionary of other cookie-attributes (rest).
# Note: expiry time is normalised to seconds since epoch. V0
# cookies should have the Expires cookie-attribute, and V1 cookies
# should have Max-Age, but since V1 includes RFC 2109 cookies (and
# since V0 cookies may be a mish-mash of Netscape and RFC 2109), we
# accept either (but prefer Max-Age).
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for k, v in cookie_attrs[1:]:
lc = k.lower()
# don't lose case distinction for unknown fields
if lc in value_attrs or lc in boolean_attrs:
k = lc
if k in boolean_attrs and v is None:
# boolean cookie-attribute is present, but has no value
# (like "discard", rather than "port=80")
v = True
if standard.has_key(k):
# only first value is significant
continue
if k == "domain":
if v is None:
debug(" missing value for domain attribute")
bad_cookie = True
break
# RFC 2965 section 3.3.3
v = v.lower()
if k == "expires":
if max_age_set:
# Prefer max-age to expires (like Mozilla)
continue
if v is None:
debug(" missing or invalid value for expires "
"attribute: treating as session cookie")
continue
if k == "max-age":
max_age_set = True
try:
v = int(v)
except ValueError:
debug(" missing or invalid (non-numeric) value for "
"max-age attribute")
bad_cookie = True
break
# convert RFC 2965 Max-Age to seconds since epoch
# XXX Strictly you're supposed to follow RFC 2616
# age-calculation rules. Remember that zero Max-Age is a
# is a request to discard (old and new) cookie, though.
k = "expires"
v = self._now + v
if (k in value_attrs) or (k in boolean_attrs):
if (v is None and
k not in ["port", "comment", "commenturl"]):
debug(" missing value for %s attribute" % k)
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
def _cookie_from_cookie_tuple(self, tup, request):
# standard is dict of standard cookie-attributes, rest is dict of the
# rest of them
name, value, standard, rest = tup
domain = standard.get("domain", Absent)
path = standard.get("path", Absent)
port = standard.get("port", Absent)
expires = standard.get("expires", Absent)
# set the easy defaults
version = standard.get("version", None)
if version is not None: version = int(version)
secure = standard.get("secure", False)
# (discard is also set if expires is Absent)
discard = standard.get("discard", False)
comment = standard.get("comment", None)
comment_url = standard.get("commenturl", None)
# set default path
if path is not Absent and path != "":
path_specified = True
path = escape_path(path)
else:
path_specified = False
path = request_path(request)
i = path.rfind("/")
if i != -1:
if version == 0:
# Netscape spec parts company from reality here
path = path[:i]
else:
path = path[:i+1]
if len(path) == 0: path = "/"
# set default domain
domain_specified = domain is not Absent
# but first we have to remember whether it starts with a dot
domain_initial_dot = False
if domain_specified:
domain_initial_dot = bool(domain.startswith("."))
if domain is Absent:
req_host, erhn = eff_request_host(request)
domain = erhn
elif not domain.startswith("."):
domain = "."+domain
# set default port
port_specified = False
if port is not Absent:
if port is None:
# Port attr present, but has no value: default to request port.
# Cookie should then only be sent back on that port.
port = request_port(request)
else:
port_specified = True
port = re.sub(r"\s+", "", port)
else:
# No port attr present. Cookie can be sent back on any port.
port = None
# set default expires and discard
if expires is Absent:
expires = None
discard = True
elif expires <= self._now:
# Expiry date in past is request to delete cookie. This can't be
# in DefaultCookiePolicy, because can't delete cookies there.
try:
self.clear(domain, path, name)
except KeyError:
pass
debug("Expiring cookie, domain='%s', path='%s', name='%s'",
domain, path, name)
return None
return Cookie(version,
name, value,
port, port_specified,
domain, domain_specified, domain_initial_dot,
path, path_specified,
secure,
expires,
discard,
comment,
comment_url,
rest)
def _cookies_from_attrs_set(self, attrs_set, request):
cookie_tuples = self._normalized_cookie_tuples(attrs_set)
diff --git a/mechanize/_http.py b/mechanize/_http.py
index 5075da4..39060bc 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -1,733 +1,734 @@
"""HTTP related handlers.
Note that some other HTTP handlers live in more specific modules: _auth.py,
_gzip.py, etc.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import copy, time, tempfile, htmlentitydefs, re, logging, socket, \
urllib2, urllib, httplib, sgmllib
from urllib2 import URLError, HTTPError, BaseHandler
from cStringIO import StringIO
from _request import Request
from _util import isstringlike
from _response import closeable_response, response_seek_wrapper
from _html import unescape, unescape_charref
from _headersutil import is_html
from _clientcookie import CookieJar, request_host
import _rfc3986
debug = logging.getLogger("mechanize").debug
# monkeypatch urllib2.HTTPError to show URL
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
- html_headers = parse_head(response, self.head_parser_class())
+ html_headers = parse_head(response,
+ self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
import _opener
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
def set_opener(self, opener=None):
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
robotparser._debug("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
robotparser._debug("disallow all")
elif status >= 400:
self.allow_all = True
robotparser._debug("allow all")
elif status == 200 and lines:
robotparser._debug("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
else:
debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index 5fc8ff8..b7f559a 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,672 +1,672 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, sys, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
Browser state (including request, response, history, forms and links)
is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
- The "global" form object contains all controls that are not descendants of
- any FORM element.
+ The "global" form object contains all controls that are not descendants
+ of any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
r"""Return title, or None if there is no title element in the document.
Treatment of any tag children of attempts to follow Firefox and IE
(currently, tags are preserved).
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
diff --git a/mechanize/_msiecookiejar.py b/mechanize/_msiecookiejar.py
index f590a84..3a6602a 100644
--- a/mechanize/_msiecookiejar.py
+++ b/mechanize/_msiecookiejar.py
@@ -1,387 +1,388 @@
"""Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <[email protected]> (MSIE Perl code)
Copyright 2002-2006 John J Lee <[email protected]> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
if os.name == "nt":
import _winreg
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
- cookies.append({"KEY": key, "VALUE": value, "DOMAIN": domain,
- "PATH": path, "FLAGS": flags, "HIXP": hi_expire,
+ cookies.append({"KEY": key, "VALUE": value,
+ "DOMAIN": domain, "PATH": path,
+ "FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_filetime(filetime)
if expires < now:
discard = True
else:
discard = False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- eg. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# skip to start of first record
index.seek(size, 0)
sector = 128 # size of sector in bytes
while 1:
data = ""
# Cookies are usually in two contiguous sectors, so read in two
# sectors and adjust if not a Cookie.
to_read = 2 * sector
d = index.read(to_read)
if len(d) != to_read:
break
data = data + d
# Each record starts with a 4-byte signature and a count
# (little-endian DWORD) of sectors for the record.
sig, size, data = data[:4], data[4:8], data[8:]
size = struct.unpack("<L", size)[0]
to_read = (size - 2) * sector
## from urllib import quote
## print "data", quote(data)
## print "sig", quote(sig)
## print "size in sectors", size
## print "size in bytes", size*sector
## print "size in units of 16 bytes", (size*sector) / 16
## print "size to read in bytes", to_read
## print
if sig != "URL ":
assert (sig in ("HASH", "LEAK",
self.padding, "\x00\x00\x00\x00"),
"unrecognized MSIE index.dat record: %s" %
binary_to_str(sig))
if sig == "\x00\x00\x00\x00":
# assume we've got all the cookies, and stop
break
if sig == self.padding:
continue
# skip the rest of this record
assert to_read >= 0
if size != 2:
assert to_read != 0
index.seek(to_read, 1)
continue
# read in rest of record if necessary
if size > 2:
more_data = index.read(to_read)
if len(more_data) != to_read: break
data = data + more_data
cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
"(%s\@[\x21-\xFF]+\.txt)" % username)
m = re.search(cookie_re, data, re.I)
if m:
cookie_file = os.path.join(cookie_dir, m.group(2))
if not self.delayload:
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s",
cookie_file)
else:
domain = m.group(1)
i = domain.find("/")
if i != -1:
domain = domain[:i]
self._delayload_domains[domain] = (
cookie_file, ignore_discard, ignore_expires)
class MSIECookieJar(MSIEBase, FileCookieJar):
"""FileCookieJar that reads from the Windows MSIE cookies database.
MSIECookieJar can read the cookie files of Microsoft Internet Explorer
(MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
Windows 98. Other configurations may also work, but are untested. Saving
cookies in MSIE format is NOT supported. If you save cookies, they'll be
in the usual Set-Cookie3 format, which you can read back in using an
instance of the plain old CookieJar class. Don't save using the same
filename that you loaded cookies from, because you may succeed in
clobbering your MSIE cookies index file!
You should be able to have LWP share Internet Explorer's cookies like
this (note you need to supply a username to load_from_registry if you're on
Windows 9x or Windows ME):
cj = MSIECookieJar(delayload=1)
# find cookies index file in registry and load cookies from it
cj.load_from_registry()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
response = opener.open("http://example.com/")
Iterating over a delayloaded MSIECookieJar instance will not cause any
cookies to be read from disk. To force reading of all cookies from disk,
call read_all_cookies. Note that the following methods iterate over self:
clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
and as_string.
Additional methods:
load_from_registry(ignore_discard=False, ignore_expires=False,
username=None)
load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
read_all_cookies()
"""
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy)
def set_cookie(self, cookie):
if self.delayload:
self._delayload_domain(cookie.domain)
CookieJar.set_cookie(self, cookie)
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookies_for_domain(self, domain, request):
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
if self.delayload:
self._delayload_domain(domain)
return CookieJar._cookies_for_domain(self, domain, request)
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain)
def load(self, filename, ignore_discard=False, ignore_expires=False,
username=None):
"""Load cookies from an MSIE 'index.dat' cookies index file.
filename: full path to cookie index file
username: only required on win9x
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
index = open(filename, "rb")
try:
self._really_load(index, filename, ignore_discard, ignore_expires,
username)
finally:
index.close()
diff --git a/mechanize/_pullparser.py b/mechanize/_pullparser.py
index 4c1820c..4d8d9d3 100644
--- a/mechanize/_pullparser.py
+++ b/mechanize/_pullparser.py
@@ -1,389 +1,390 @@
"""A simple "pull API" for HTML parsing, after Perl's HTML::TokeParser.
Examples
This program extracts all links from a document. It will print one
line for each link, containing the URL and the textual description
between the <A>...</A> tags:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
for token in p.tags("a"):
if token.type == "endtag": continue
url = dict(token.attrs).get("href", "-")
text = p.get_compressed_text(endat=("endtag", "a"))
print "%s\t%s" % (url, text)
This program extracts the <TITLE> from the document:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
if p.get_tag("title"):
title = p.get_compressed_text()
print "Title: %s" % title
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 1998-2001 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses.
"""
import re, htmlentitydefs
import sgmllib, HTMLParser
from xml.sax import saxutils
from _html import unescape, unescape_charref
class NoMoreTokensError(Exception): pass
class Token:
"""Represents an HTML tag, declaration, processing instruction etc.
Behaves as both a tuple-like object (ie. iterable) and has attributes
.type, .data and .attrs.
>>> t = Token("starttag", "a", [("href", "http://www.python.org/")])
>>> t == ("starttag", "a", [("href", "http://www.python.org/")])
True
>>> (t.type, t.data) == ("starttag", "a")
True
>>> t.attrs == [("href", "http://www.python.org/")]
True
Public attributes
type: one of "starttag", "endtag", "startendtag", "charref", "entityref",
"data", "comment", "decl", "pi", after the corresponding methods of
HTMLParser.HTMLParser
data: For a tag, the tag name; otherwise, the relevant data carried by the
tag, as a string
attrs: list of (name, value) pairs representing HTML attributes
(or None if token does not represent an opening tag)
"""
def __init__(self, type, data, attrs=None):
self.type = type
self.data = data
self.attrs = attrs
def __iter__(self):
return iter((self.type, self.data, self.attrs))
def __eq__(self, other):
type, data, attrs = other
if (self.type == type and
self.data == data and
self.attrs == attrs):
return True
else:
return False
def __ne__(self, other): return not self.__eq__(other)
def __repr__(self):
args = ", ".join(map(repr, [self.type, self.data, self.attrs]))
return self.__class__.__name__+"(%s)" % args
def __str__(self):
"""
>>> print Token("starttag", "br")
<br>
>>> print Token("starttag", "a",
... [("href", "http://www.python.org/"), ("alt", '"foo"')])
<a href="http://www.python.org/" alt='"foo"'>
>>> print Token("startendtag", "br")
<br />
>>> print Token("startendtag", "br", [("spam", "eggs")])
<br spam="eggs" />
>>> print Token("endtag", "p")
</p>
>>> print Token("charref", "38")
&
>>> print Token("entityref", "amp")
&
>>> print Token("data", "foo\\nbar")
foo
bar
>>> print Token("comment", "Life is a bowl\\nof cherries.")
<!--Life is a bowl
of cherries.-->
>>> print Token("decl", "decl")
<!decl>
>>> print Token("pi", "pi")
<?pi>
"""
if self.attrs is not None:
attrs = "".join([" %s=%s" % (k, saxutils.quoteattr(v)) for
k, v in self.attrs])
else:
attrs = ""
if self.type == "starttag":
return "<%s%s>" % (self.data, attrs)
elif self.type == "startendtag":
return "<%s%s />" % (self.data, attrs)
elif self.type == "endtag":
return "</%s>" % self.data
elif self.type == "charref":
return "&#%s;" % self.data
elif self.type == "entityref":
return "&%s;" % self.data
elif self.type == "data":
return self.data
elif self.type == "comment":
return "<!--%s-->" % self.data
elif self.type == "decl":
return "<!%s>" % self.data
elif self.type == "pi":
return "<?%s>" % self.data
assert False
def iter_until_exception(fn, exception, *args, **kwds):
while 1:
try:
yield fn(*args, **kwds)
except exception:
raise StopIteration
class _AbstractParser:
chunk = 1024
compress_re = re.compile(r"\s+")
def __init__(self, fh, textify={"img": "alt", "applet": "alt"},
encoding="ascii", entitydefs=None):
"""
fh: file-like object (only a .read() method is required) from which to
read HTML to be parsed
textify: mapping used by .get_text() and .get_compressed_text() methods
to represent opening tags as text
encoding: encoding used to encode numeric character references by
.get_text() and .get_compressed_text() ("ascii" by default)
entitydefs: mapping like {"amp": "&", ...} containing HTML entity
definitions (a sensible default is used). This is used to unescape
entities in .get_text() (and .get_compressed_text()) and attribute
values. If the encoding can not represent the character, the entity
reference is left unescaped. Note that entity references (both
numeric - e.g. { or ઼ - and non-numeric - e.g. &) are
unescaped in attribute values and the return value of .get_text(), but
not in data outside of tags. Instead, entity references outside of
tags are represented as tokens. This is a bit odd, it's true :-/
If the element name of an opening tag matches a key in the textify
mapping then that tag is converted to text. The corresponding value is
used to specify which tag attribute to obtain the text from. textify
maps from element names to either:
- an HTML attribute name, in which case the HTML attribute value is
used as its text value along with the element name in square
brackets (eg."alt text goes here[IMG]", or, if the alt attribute
were missing, just "[IMG]")
- a callable object (eg. a function) which takes a Token and returns
the string to be used as its text value
If textify has no key for an element name, nothing is substituted for
the opening tag.
Public attributes:
encoding and textify: see above
"""
self._fh = fh
self._tokenstack = [] # FIFO
self.textify = textify
self.encoding = encoding
if entitydefs is None:
entitydefs = htmlentitydefs.name2codepoint
self._entitydefs = entitydefs
def __iter__(self): return self
def tags(self, *names):
return iter_until_exception(self.get_tag, NoMoreTokensError, *names)
def tokens(self, *tokentypes):
- return iter_until_exception(self.get_token, NoMoreTokensError, *tokentypes)
+ return iter_until_exception(self.get_token, NoMoreTokensError,
+ *tokentypes)
def next(self):
try:
return self.get_token()
except NoMoreTokensError:
raise StopIteration()
def get_token(self, *tokentypes):
"""Pop the next Token object from the stack of parsed tokens.
If arguments are given, they are taken to be token types in which the
caller is interested: tokens representing other elements will be
skipped. Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
while self._tokenstack:
token = self._tokenstack.pop(0)
if tokentypes:
if token.type in tokentypes:
return token
else:
return token
data = self._fh.read(self.chunk)
if not data:
raise NoMoreTokensError()
self.feed(data)
def unget_token(self, token):
"""Push a Token back onto the stack."""
self._tokenstack.insert(0, token)
def get_tag(self, *names):
"""Return the next Token that represents an opening or closing tag.
If arguments are given, they are taken to be element names in which the
caller is interested: tags representing other elements will be skipped.
Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
tok = self.get_token()
if tok.type not in ["starttag", "endtag", "startendtag"]:
continue
if names:
if tok.data in names:
return tok
else:
return tok
def get_text(self, endat=None):
"""Get some text.
endat: stop reading text at this tag (the tag is included in the
returned text); endtag is a tuple (type, name) where type is
"starttag", "endtag" or "startendtag", and name is the element name of
the tag (element names must be given in lower case)
If endat is not given, .get_text() will stop at the next opening or
closing tag, or when there are no more tokens (no exception is raised).
Note that .get_text() includes the text representation (if any) of the
opening tag, but pushes the opening tag back onto the stack. As a
result, if you want to call .get_text() again, you need to call
.get_tag() first (unless you want an empty string returned when you
next call .get_text()).
Entity references are translated using the value of the entitydefs
constructor argument (a mapping from names to characters like that
provided by the standard module htmlentitydefs). Named entity
references that are not in this mapping are left unchanged.
The textify attribute is used to translate opening tags into text: see
the class docstring.
"""
text = []
tok = None
while 1:
try:
tok = self.get_token()
except NoMoreTokensError:
# unget last token (not the one we just failed to get)
if tok: self.unget_token(tok)
break
if tok.type == "data":
text.append(tok.data)
elif tok.type == "entityref":
t = unescape("&%s;"%tok.data, self._entitydefs, self.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, self.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type in ["starttag", "startendtag"]:
alt = self.textify.get(tag_name)
if alt is not None:
if callable(alt):
text.append(alt(tok))
elif tok.attrs is not None:
for k, v in tok.attrs:
if k == alt:
text.append(v)
text.append("[%s]" % tag_name.upper())
if endat is None or endat == (tok.type, tag_name):
self.unget_token(tok)
break
return "".join(text)
def get_compressed_text(self, *args, **kwds):
"""
As .get_text(), but collapses each group of contiguous whitespace to a
single space character, and removes all initial and trailing
whitespace.
"""
text = self.get_text(*args, **kwds)
text = text.strip()
return self.compress_re.sub(" ", text)
def handle_startendtag(self, tag, attrs):
self._tokenstack.append(Token("startendtag", tag, attrs))
def handle_starttag(self, tag, attrs):
self._tokenstack.append(Token("starttag", tag, attrs))
def handle_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def handle_charref(self, name):
self._tokenstack.append(Token("charref", name))
def handle_entityref(self, name):
self._tokenstack.append(Token("entityref", name))
def handle_data(self, data):
self._tokenstack.append(Token("data", data))
def handle_comment(self, data):
self._tokenstack.append(Token("comment", data))
def handle_decl(self, decl):
self._tokenstack.append(Token("decl", decl))
def unknown_decl(self, data):
# XXX should this call self.error instead?
#self.error("unknown declaration: " + `data`)
self._tokenstack.append(Token("decl", data))
def handle_pi(self, data):
self._tokenstack.append(Token("pi", data))
def unescape_attr(self, name):
return unescape(name, self._entitydefs, self.encoding)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
escaped_attrs.append((key, self.unescape_attr(val)))
return escaped_attrs
class PullParser(_AbstractParser, HTMLParser.HTMLParser):
def __init__(self, *args, **kwds):
HTMLParser.HTMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token("starttag", tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def _test():
import doctest, _pullparser
return doctest.testmod(_pullparser)
if __name__ == "__main__":
_test()
diff --git a/mechanize/_useragent.py b/mechanize/_useragent.py
index 0bec126..766ac35 100644
--- a/mechanize/_useragent.py
+++ b/mechanize/_useragent.py
@@ -1,347 +1,348 @@
"""Convenient HTTP UserAgent class.
This is a subclass of urllib2.OpenerDirector.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import sys, warnings, urllib2
import _opener
import _urllib2
import _auth
import _gzip
import _response
class UserAgentBase(_opener.OpenerDirector):
"""Convenient user-agent class.
Do not use .add_handler() to add a handler for something already dealt with
by this code.
The only reason at present for the distinction between UserAgent and
UserAgentBase is so that classes that depend on .seek()able responses
(e.g. mechanize.Browser) can inherit from UserAgentBase. The subclass
UserAgent exposes a .set_seekable_responses() method that allows switching
off the adding of a .seek() method to responses.
Public attributes:
addheaders: list of (name, value) pairs specifying headers to send with
every request, unless they are overridden in the Request instance.
>>> ua = UserAgentBase()
>>> ua.addheaders = [
... ("User-agent", "Mozilla/5.0 (compatible)"),
... ("From", "[email protected]")]
"""
handler_classes = {
# scheme handlers
"http": _urllib2.HTTPHandler,
# CacheFTPHandler is buggy, at least in 2.3, so we don't use it
"ftp": _urllib2.FTPHandler,
"file": _urllib2.FileHandler,
# other handlers
"_unknown": _urllib2.UnknownHandler,
# HTTP{S,}Handler depend on HTTPErrorProcessor too
"_http_error": _urllib2.HTTPErrorProcessor,
"_http_request_upgrade": _urllib2.HTTPRequestUpgradeProcessor,
"_http_default_error": _urllib2.HTTPDefaultErrorHandler,
# feature handlers
"_basicauth": _urllib2.HTTPBasicAuthHandler,
"_digestauth": _urllib2.HTTPDigestAuthHandler,
"_redirect": _urllib2.HTTPRedirectHandler,
"_cookies": _urllib2.HTTPCookieProcessor,
"_refresh": _urllib2.HTTPRefreshProcessor,
"_equiv": _urllib2.HTTPEquivProcessor,
"_proxy": _urllib2.ProxyHandler,
"_proxy_basicauth": _urllib2.ProxyBasicAuthHandler,
"_proxy_digestauth": _urllib2.ProxyDigestAuthHandler,
"_robots": _urllib2.HTTPRobotRulesProcessor,
"_gzip": _gzip.HTTPGzipProcessor, # experimental!
# debug handlers
"_debug_redirect": _urllib2.HTTPRedirectDebugProcessor,
"_debug_response_body": _urllib2.HTTPResponseDebugProcessor,
}
default_schemes = ["http", "ftp", "file"]
default_others = ["_unknown", "_http_error", "_http_request_upgrade",
"_http_default_error",
]
default_features = ["_redirect", "_cookies",
"_refresh", "_equiv",
"_basicauth", "_digestauth",
"_proxy", "_proxy_basicauth", "_proxy_digestauth",
"_robots",
]
if hasattr(_urllib2, 'HTTPSHandler'):
handler_classes["https"] = _urllib2.HTTPSHandler
default_schemes.append("https")
def __init__(self):
_opener.OpenerDirector.__init__(self)
ua_handlers = self._ua_handlers = {}
for scheme in (self.default_schemes+
self.default_others+
self.default_features):
klass = self.handler_classes[scheme]
ua_handlers[scheme] = klass()
for handler in ua_handlers.itervalues():
self.add_handler(handler)
# Yuck.
# Ensure correct default constructor args were passed to
# HTTPRefreshProcessor and HTTPEquivProcessor.
if "_refresh" in ua_handlers:
self.set_handle_refresh(True)
if "_equiv" in ua_handlers:
self.set_handle_equiv(True)
# Ensure default password managers are installed.
pm = ppm = None
if "_basicauth" in ua_handlers or "_digestauth" in ua_handlers:
pm = _urllib2.HTTPPasswordMgrWithDefaultRealm()
if ("_proxy_basicauth" in ua_handlers or
"_proxy_digestauth" in ua_handlers):
ppm = _auth.HTTPProxyPasswordMgr()
self.set_password_manager(pm)
self.set_proxy_password_manager(ppm)
# set default certificate manager
if "https" in ua_handlers:
cm = _urllib2.HTTPSClientCertMgr()
self.set_client_cert_manager(cm)
def close(self):
_opener.OpenerDirector.close(self)
self._ua_handlers = None
# XXX
## def set_timeout(self, timeout):
## self._timeout = timeout
## def set_http_connection_cache(self, conn_cache):
## self._http_conn_cache = conn_cache
## def set_ftp_connection_cache(self, conn_cache):
## # XXX ATM, FTP has cache as part of handler; should it be separate?
## self._ftp_conn_cache = conn_cache
def set_handled_schemes(self, schemes):
"""Set sequence of URL scheme (protocol) strings.
For example: ua.set_handled_schemes(["http", "ftp"])
If this fails (with ValueError) because you've passed an unknown
scheme, the set of handled schemes will not be changed.
"""
want = {}
for scheme in schemes:
if scheme.startswith("_"):
raise ValueError("not a scheme '%s'" % scheme)
if scheme not in self.handler_classes:
raise ValueError("unknown scheme '%s'")
want[scheme] = None
# get rid of scheme handlers we don't want
for scheme, oldhandler in self._ua_handlers.items():
if scheme.startswith("_"): continue # not a scheme handler
if scheme not in want:
self._replace_handler(scheme, None)
else:
del want[scheme] # already got it
# add the scheme handlers that are missing
for scheme in want.keys():
self._set_handler(scheme, True)
def set_cookiejar(self, cookiejar):
"""Set a mechanize.CookieJar, or None."""
self._set_handler("_cookies", obj=cookiejar)
# XXX could use Greg Stein's httpx for some of this instead?
# or httplib2??
def set_proxies(self, proxies):
"""Set a dictionary mapping URL scheme to proxy specification, or None.
e.g. {"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com"}
"""
self._set_handler("_proxy", obj=proxies)
def add_password(self, url, user, password, realm=None):
self._password_manager.add_password(realm, url, user, password)
def add_proxy_password(self, user, password, hostport=None, realm=None):
self._proxy_password_manager.add_password(
realm, hostport, user, password)
def add_client_certificate(self, url, key_file, cert_file):
"""Add an SSL client certificate, for HTTPS client auth.
key_file and cert_file must be filenames of the key and certificate
files, in PEM format. You can use e.g. OpenSSL to convert a p12 (PKCS
12) file to PEM format:
openssl pkcs12 -clcerts -nokeys -in cert.p12 -out cert.pem
openssl pkcs12 -nocerts -in cert.p12 -out key.pem
Note that client certificate password input is very inflexible ATM. At
the moment this seems to be console only, which is presumably the
default behaviour of libopenssl. In future mechanize may support
third-party libraries that (I assume) allow more options here.
"""
self._client_cert_manager.add_key_cert(url, key_file, cert_file)
# the following are rarely useful -- use add_password / add_proxy_password
# instead
def set_password_manager(self, password_manager):
"""Set a mechanize.HTTPPasswordMgrWithDefaultRealm, or None."""
self._password_manager = password_manager
self._set_handler("_basicauth", obj=password_manager)
self._set_handler("_digestauth", obj=password_manager)
def set_proxy_password_manager(self, password_manager):
"""Set a mechanize.HTTPProxyPasswordMgr, or None."""
self._proxy_password_manager = password_manager
self._set_handler("_proxy_basicauth", obj=password_manager)
self._set_handler("_proxy_digestauth", obj=password_manager)
def set_client_cert_manager(self, cert_manager):
"""Set a mechanize.HTTPClientCertMgr, or None."""
self._client_cert_manager = cert_manager
handler = self._ua_handlers["https"]
handler.client_cert_manager = cert_manager
# these methods all take a boolean parameter
def set_handle_robots(self, handle):
"""Set whether to observe rules from robots.txt."""
self._set_handler("_robots", handle)
def set_handle_redirect(self, handle):
"""Set whether to handle HTTP 30x redirections."""
self._set_handler("_redirect", handle)
def set_handle_refresh(self, handle, max_time=30.0, honor_time=False):
"""Set whether to handle HTTP Refresh headers."""
self._set_handler("_refresh", handle, constructor_kwds=
{"max_time": max_time, "honor_time": honor_time})
def set_handle_equiv(self, handle, head_parser_class=None):
"""Set whether to treat HTML http-equiv headers like HTTP headers.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
if head_parser_class is not None:
constructor_kwds = {"head_parser_class": head_parser_class}
else:
constructor_kwds={}
self._set_handler("_equiv", handle, constructor_kwds=constructor_kwds)
def set_handle_gzip(self, handle):
"""Handle gzip transfer encoding.
"""
if handle:
warnings.warn(
"gzip transfer encoding is experimental!", stacklevel=2)
self._set_handler("_gzip", handle)
def set_debug_redirects(self, handle):
"""Log information about HTTP redirects (including refreshes).
Logging is performed using module logging. The logger name is
"mechanize.http_redirects". To actually print some debug output,
eg:
import sys, logging
logger = logging.getLogger("mechanize.http_redirects")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
Other logger names relevant to this module:
"mechanize.http_responses"
"mechanize.cookies" (or "cookielib" if running Python 2.4)
To turn on everything:
import sys, logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
"""
self._set_handler("_debug_redirect", handle)
def set_debug_responses(self, handle):
"""Log HTTP response bodies.
See docstring for .set_debug_redirects() for details of logging.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
self._set_handler("_debug_response_body", handle)
def set_debug_http(self, handle):
"""Print HTTP headers to sys.stdout."""
level = int(bool(handle))
for scheme in "http", "https":
h = self._ua_handlers.get(scheme)
if h is not None:
h.set_http_debuglevel(level)
def _set_handler(self, name, handle=None, obj=None,
constructor_args=(), constructor_kwds={}):
if handle is None:
handle = obj is not None
if handle:
handler_class = self.handler_classes[name]
if obj is not None:
newhandler = handler_class(obj)
else:
- newhandler = handler_class(*constructor_args, **constructor_kwds)
+ newhandler = handler_class(
+ *constructor_args, **constructor_kwds)
else:
newhandler = None
self._replace_handler(name, newhandler)
def _replace_handler(self, name, newhandler=None):
# first, if handler was previously added, remove it
if name is not None:
handler = self._ua_handlers.get(name)
if handler:
try:
self.handlers.remove(handler)
except ValueError:
pass
# then add the replacement, if any
if newhandler is not None:
self.add_handler(newhandler)
self._ua_handlers[name] = newhandler
class UserAgent(UserAgentBase):
def __init__(self):
UserAgentBase.__init__(self)
self._seekable = False
def set_seekable_responses(self, handle):
"""Make response objects .seek()able."""
self._seekable = bool(handle)
def open(self, fullurl, data=None):
if self._seekable:
def bound_open(fullurl, data=None):
return UserAgentBase.open(self, fullurl, data)
response = _opener.wrapped_open(
bound_open, _response.seek_wrapped_response, fullurl, data)
else:
response = UserAgentBase.open(self, fullurl, data)
return response
diff --git a/mechanize/_util.py b/mechanize/_util.py
index e2c9d3a..3516e82 100644
--- a/mechanize/_util.py
+++ b/mechanize/_util.py
@@ -1,279 +1,280 @@
"""Utility functions and date/time routines.
Copyright 2002-2006 John J Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
import re, string, time, warnings
def deprecation(message):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def hide_deprecations():
warnings.filterwarnings('ignore', category=DeprecationWarning)
def reset_deprecations():
warnings.filterwarnings('default', category=DeprecationWarning)
def isstringlike(x):
try: x+""
except: return False
else: return True
## def caller():
## try:
## raise SyntaxError
## except:
## import sys
## return sys.exc_traceback.tb_frame.f_back.f_back.f_code.co_name
from calendar import timegm
# Date/time conversion routines for formats used by the HTTP protocol.
EPOCH = 1970
def my_timegm(tt):
year, month, mday, hour, min, sec = tt[:6]
if ((year >= EPOCH) and (1 <= month <= 12) and (1 <= mday <= 31) and
(0 <= hour <= 24) and (0 <= min <= 59) and (0 <= sec <= 61)):
return timegm(tt)
else:
return None
days = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
months = ["Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
months_lower = []
for month in months: months_lower.append(month.lower())
def time2isoz(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like "YYYY-MM-DD hh:mm:ssZ",
representing Universal Time (UTC, aka GMT). An example of this format is:
1994-11-24 08:49:37Z
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec = time.gmtime(t)[:6]
return "%04d-%02d-%02d %02d:%02d:%02dZ" % (
year, mon, mday, hour, min, sec)
def time2netscape(t=None):
"""Return a string representing time in seconds since epoch, t.
If the function is called without an argument, it will use the current
time.
The format of the returned string is like this:
Wed, DD-Mon-YYYY HH:MM:SS GMT
"""
if t is None: t = time.time()
year, mon, mday, hour, min, sec, wday = time.gmtime(t)[:7]
return "%s %02d-%s-%04d %02d:%02d:%02d GMT" % (
days[wday], mday, months[mon-1], year, hour, min, sec)
UTC_ZONES = {"GMT": None, "UTC": None, "UT": None, "Z": None}
timezone_re = re.compile(r"^([-+])?(\d\d?):?(\d\d)?$")
def offset_from_tz_string(tz):
offset = None
if UTC_ZONES.has_key(tz):
offset = 0
else:
m = timezone_re.search(tz)
if m:
offset = 3600 * int(m.group(2))
if m.group(3):
offset = offset + 60 * int(m.group(3))
if m.group(1) == '-':
offset = -offset
return offset
def _str2time(day, mon, yr, hr, min, sec, tz):
# translate month name to number
# month numbers start with 1 (January)
try:
mon = months_lower.index(mon.lower())+1
except ValueError:
# maybe it's already a number
try:
imon = int(mon)
except ValueError:
return None
if 1 <= imon <= 12:
mon = imon
else:
return None
# make sure clock elements are defined
if hr is None: hr = 0
if min is None: min = 0
if sec is None: sec = 0
yr = int(yr)
day = int(day)
hr = int(hr)
min = int(min)
sec = int(sec)
if yr < 1000:
# find "obvious" year
cur_yr = time.localtime(time.time())[0]
m = cur_yr % 100
tmp = yr
yr = yr + cur_yr - m
m = m - tmp
if abs(m) > 50:
if m > 0: yr = yr + 100
else: yr = yr - 100
# convert UTC time tuple to seconds since epoch (not timezone-adjusted)
t = my_timegm((yr, mon, day, hr, min, sec, tz))
if t is not None:
# adjust time using timezone string, to get absolute time since epoch
if tz is None:
tz = "UTC"
tz = tz.upper()
offset = offset_from_tz_string(tz)
if offset is None:
return None
t = t - offset
return t
-strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) (\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
+strict_re = re.compile(r"^[SMTWF][a-z][a-z], (\d\d) ([JFMASOND][a-z][a-z]) "
+ r"(\d\d\d\d) (\d\d):(\d\d):(\d\d) GMT$")
wkday_re = re.compile(
r"^(?:Sun|Mon|Tue|Wed|Thu|Fri|Sat)[a-z]*,?\s*", re.I)
loose_http_re = re.compile(
r"""^
(\d\d?) # day
(?:\s+|[-\/])
(\w+) # month
(?:\s+|[-\/])
(\d+) # year
(?:
(?:\s+|:) # separator before clock
(\d\d?):(\d\d) # hour:min
(?::(\d\d))? # optional seconds
)? # optional clock
\s*
([-+]?\d{2,4}|(?![APap][Mm]\b)[A-Za-z]+)? # timezone
\s*
(?:\(\w+\))? # ASCII representation of timezone in parens.
\s*$""", re.X)
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = strict_re.search(text)
if m:
g = m.groups()
mon = months_lower.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return my_timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = wkday_re.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = loose_http_re.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
iso_re = re.compile(
"""^
(\d{4}) # year
[-\/]?
(\d\d?) # numerical month
[-\/]?
(\d\d?) # day
(?:
(?:\s+|[-:Tt]) # separator before clock
(\d\d?):?(\d\d) # hour:min
(?::?(\d\d(?:\.\d*)?))? # optional seconds (and fractional)
)? # optional clock
\s*
([-+]?\d\d?:?(:?\d\d)?
|Z|z)? # timezone (Z is "zero meridian", i.e. GMT)
\s*$""", re.X)
def iso2time(text):
"""
As for http2time, but parses the ISO 8601 formats:
1994-02-03 14:15:29 -0100 -- ISO 8601 format
1994-02-03 14:15:29 -- zone is optional
1994-02-03 -- only date
1994-02-03T14:15:29 -- Use T as separator
19940203T141529Z -- ISO 8601 compact format
19940203 -- only date
"""
# clean up
text = text.lstrip()
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = iso_re.search(text)
if m is not None:
# XXX there's an extra bit of the timezone I'm ignoring here: is
# this the right thing to do?
yr, mon, day, hr, min, sec, tz, _ = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
|
Almad/Mechanize
|
79879fb63daf5e9409ec42afebfdcd1f1b7cf23b
|
Docstring grammar fix.
|
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index 7c163e8..8ba52c4 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,672 +1,672 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, sys, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
- The browser state (including .request, .response(), history, forms and
- links) are all left unchanged by calling this function.
+ Browser state (including request, response, history, forms and links)
+ is left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants of
any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
"""Return title, or None if there is no title element in the document.
Tags are stripped or textified as described in docs for
PullParser.get_text() method of pullparser module.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
|
Almad/Mechanize
|
2f7cd0a47ceb5c9b8a70adea2dfda9270efdeb3c
|
Remove some out-of-date docstring text.
|
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index b5ac64d..7c163e8 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,678 +1,672 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, sys, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
- """Set whether to add Referer header to each request.
-
- This base class does not implement this feature (so don't turn this on
- if you're using this base class directly), but the subclass
- mechanize.Browser does.
-
- """
+ """Set whether to add Referer header to each request."""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
The browser state (including .request, .response(), history, forms and
links) are all left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants of
any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
"""Return title, or None if there is no title element in the document.
Tags are stripped or textified as described in docs for
PullParser.get_text() method of pullparser module.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
|
Almad/Mechanize
|
843e56fd796f470bd3c94f6dfc5d51a6ab7c94bc
|
Make title parsing follow Firefox behaviour wrt child elements (previously the behaviour differed between Factory and RobustFactory).
|
diff --git a/mechanize/_html.py b/mechanize/_html.py
index 90f9ded..9e7521b 100644
--- a/mechanize/_html.py
+++ b/mechanize/_html.py
@@ -1,607 +1,634 @@
"""HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import re, copy, htmlentitydefs
import sgmllib, HTMLParser, ClientForm
import _request
from _headersutil import split_header_words, is_html as _is_html
import _rfc3986
DEFAULT_ENCODING = "latin-1"
+COMPRESS_RE = re.compile(r"\s+")
+
# the base classe is purely for backwards compatibility
class ParseError(ClientForm.ParseError): pass
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
return v
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
# idea for this argument-processing trick is from Peter Otten
class Args:
def __init__(self, args_map):
self.dictionary = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
name = attrs.get("name")
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for eg.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying ClientForm.HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ClientForm.ParseResponse
argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
import ClientForm
self.select_default = select_default
if form_parser_class is None:
form_parser_class = ClientForm.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
import ClientForm
encoding = self.encoding
try:
forms = ClientForm.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
except ClientForm.ParseError, exc:
raise ParseError(exc)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
+ def _get_title_text(self, parser):
+ text = []
+ tok = None
+ while 1:
+ try:
+ tok = parser.get_token()
+ except NoMoreTokensError:
+ break
+ if tok.type == "data":
+ text.append(str(tok))
+ elif tok.type == "entityref":
+ t = unescape("&%s;" % tok.data,
+ parser._entitydefs, parser.encoding)
+ text.append(t)
+ elif tok.type == "charref":
+ t = unescape_charref(tok.data, parser.encoding)
+ text.append(t)
+ elif tok.type in ["starttag", "endtag", "startendtag"]:
+ tag_name = tok.data
+ if tok.type == "endtag" and tag_name == "title":
+ break
+ text.append(str(tok))
+ return COMPRESS_RE.sub(" ", "".join(text).strip())
+
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
- return p.get_text()
+ return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
# bizarre import gymnastics for bundled BeautifulSoup
import _beautifulsoup
import ClientForm
RobustFormParser, NestingRobustFormParser = ClientForm._create_bs_classes(
_beautifulsoup.BeautifulSoup, _beautifulsoup.ICantBelieveItsBeautifulSoup
)
# monkeypatch sgmllib to fix http://www.python.org/sf/803422 :-(
import sgmllib
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
- compress_re = re.compile(r"\s+")
+ compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _beautifulsoup
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
import _beautifulsoup
bs = self._bs
base_url = self._base_url
encoding = self._encoding
gen = bs.recursiveChildGenerator()
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
import ClientForm
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
import _beautifulsoup
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
- return title.firstText(lambda t: True)
+ inner_html = "".join([str(node) for node in title.contents])
+ return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set urllib2.Request class.
ClientForm.HTMLForm instances returned by .forms() will return
instances of this class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by urllib2.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over ClientForm.HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
import _beautifulsoup
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
diff --git a/mechanize/_pullparser.py b/mechanize/_pullparser.py
index 746295d..4c1820c 100644
--- a/mechanize/_pullparser.py
+++ b/mechanize/_pullparser.py
@@ -1,334 +1,389 @@
"""A simple "pull API" for HTML parsing, after Perl's HTML::TokeParser.
Examples
This program extracts all links from a document. It will print one
line for each link, containing the URL and the textual description
between the <A>...</A> tags:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
for token in p.tags("a"):
if token.type == "endtag": continue
url = dict(token.attrs).get("href", "-")
text = p.get_compressed_text(endat=("endtag", "a"))
print "%s\t%s" % (url, text)
This program extracts the <TITLE> from the document:
import pullparser, sys
f = file(sys.argv[1])
p = pullparser.PullParser(f)
if p.get_tag("title"):
title = p.get_compressed_text()
print "Title: %s" % title
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 1998-2001 Gisle Aas (original libwww-perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses.
"""
import re, htmlentitydefs
import sgmllib, HTMLParser
+from xml.sax import saxutils
from _html import unescape, unescape_charref
class NoMoreTokensError(Exception): pass
class Token:
"""Represents an HTML tag, declaration, processing instruction etc.
Behaves as both a tuple-like object (ie. iterable) and has attributes
.type, .data and .attrs.
>>> t = Token("starttag", "a", [("href", "http://www.python.org/")])
>>> t == ("starttag", "a", [("href", "http://www.python.org/")])
True
>>> (t.type, t.data) == ("starttag", "a")
True
>>> t.attrs == [("href", "http://www.python.org/")]
True
Public attributes
type: one of "starttag", "endtag", "startendtag", "charref", "entityref",
"data", "comment", "decl", "pi", after the corresponding methods of
HTMLParser.HTMLParser
data: For a tag, the tag name; otherwise, the relevant data carried by the
tag, as a string
attrs: list of (name, value) pairs representing HTML attributes
(or None if token does not represent an opening tag)
"""
def __init__(self, type, data, attrs=None):
self.type = type
self.data = data
self.attrs = attrs
def __iter__(self):
return iter((self.type, self.data, self.attrs))
def __eq__(self, other):
type, data, attrs = other
if (self.type == type and
self.data == data and
self.attrs == attrs):
return True
else:
return False
def __ne__(self, other): return not self.__eq__(other)
def __repr__(self):
args = ", ".join(map(repr, [self.type, self.data, self.attrs]))
return self.__class__.__name__+"(%s)" % args
+ def __str__(self):
+ """
+ >>> print Token("starttag", "br")
+ <br>
+ >>> print Token("starttag", "a",
+ ... [("href", "http://www.python.org/"), ("alt", '"foo"')])
+ <a href="http://www.python.org/" alt='"foo"'>
+ >>> print Token("startendtag", "br")
+ <br />
+ >>> print Token("startendtag", "br", [("spam", "eggs")])
+ <br spam="eggs" />
+ >>> print Token("endtag", "p")
+ </p>
+ >>> print Token("charref", "38")
+ &
+ >>> print Token("entityref", "amp")
+ &
+ >>> print Token("data", "foo\\nbar")
+ foo
+ bar
+ >>> print Token("comment", "Life is a bowl\\nof cherries.")
+ <!--Life is a bowl
+ of cherries.-->
+ >>> print Token("decl", "decl")
+ <!decl>
+ >>> print Token("pi", "pi")
+ <?pi>
+ """
+ if self.attrs is not None:
+ attrs = "".join([" %s=%s" % (k, saxutils.quoteattr(v)) for
+ k, v in self.attrs])
+ else:
+ attrs = ""
+ if self.type == "starttag":
+ return "<%s%s>" % (self.data, attrs)
+ elif self.type == "startendtag":
+ return "<%s%s />" % (self.data, attrs)
+ elif self.type == "endtag":
+ return "</%s>" % self.data
+ elif self.type == "charref":
+ return "&#%s;" % self.data
+ elif self.type == "entityref":
+ return "&%s;" % self.data
+ elif self.type == "data":
+ return self.data
+ elif self.type == "comment":
+ return "<!--%s-->" % self.data
+ elif self.type == "decl":
+ return "<!%s>" % self.data
+ elif self.type == "pi":
+ return "<?%s>" % self.data
+ assert False
+
+
def iter_until_exception(fn, exception, *args, **kwds):
while 1:
try:
yield fn(*args, **kwds)
except exception:
raise StopIteration
class _AbstractParser:
chunk = 1024
compress_re = re.compile(r"\s+")
def __init__(self, fh, textify={"img": "alt", "applet": "alt"},
encoding="ascii", entitydefs=None):
"""
fh: file-like object (only a .read() method is required) from which to
read HTML to be parsed
textify: mapping used by .get_text() and .get_compressed_text() methods
to represent opening tags as text
encoding: encoding used to encode numeric character references by
.get_text() and .get_compressed_text() ("ascii" by default)
entitydefs: mapping like {"amp": "&", ...} containing HTML entity
definitions (a sensible default is used). This is used to unescape
entities in .get_text() (and .get_compressed_text()) and attribute
values. If the encoding can not represent the character, the entity
reference is left unescaped. Note that entity references (both
numeric - e.g. { or ઼ - and non-numeric - e.g. &) are
unescaped in attribute values and the return value of .get_text(), but
not in data outside of tags. Instead, entity references outside of
tags are represented as tokens. This is a bit odd, it's true :-/
If the element name of an opening tag matches a key in the textify
mapping then that tag is converted to text. The corresponding value is
used to specify which tag attribute to obtain the text from. textify
maps from element names to either:
- an HTML attribute name, in which case the HTML attribute value is
used as its text value along with the element name in square
brackets (eg."alt text goes here[IMG]", or, if the alt attribute
were missing, just "[IMG]")
- a callable object (eg. a function) which takes a Token and returns
the string to be used as its text value
If textify has no key for an element name, nothing is substituted for
the opening tag.
Public attributes:
encoding and textify: see above
"""
self._fh = fh
self._tokenstack = [] # FIFO
self.textify = textify
self.encoding = encoding
if entitydefs is None:
entitydefs = htmlentitydefs.name2codepoint
self._entitydefs = entitydefs
def __iter__(self): return self
def tags(self, *names):
return iter_until_exception(self.get_tag, NoMoreTokensError, *names)
def tokens(self, *tokentypes):
return iter_until_exception(self.get_token, NoMoreTokensError, *tokentypes)
def next(self):
try:
return self.get_token()
except NoMoreTokensError:
raise StopIteration()
def get_token(self, *tokentypes):
"""Pop the next Token object from the stack of parsed tokens.
If arguments are given, they are taken to be token types in which the
caller is interested: tokens representing other elements will be
skipped. Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
while self._tokenstack:
token = self._tokenstack.pop(0)
if tokentypes:
if token.type in tokentypes:
return token
else:
return token
data = self._fh.read(self.chunk)
if not data:
raise NoMoreTokensError()
self.feed(data)
def unget_token(self, token):
"""Push a Token back onto the stack."""
self._tokenstack.insert(0, token)
def get_tag(self, *names):
"""Return the next Token that represents an opening or closing tag.
If arguments are given, they are taken to be element names in which the
caller is interested: tags representing other elements will be skipped.
Element names must be given in lower case.
Raises NoMoreTokensError.
"""
while 1:
tok = self.get_token()
if tok.type not in ["starttag", "endtag", "startendtag"]:
continue
if names:
if tok.data in names:
return tok
else:
return tok
def get_text(self, endat=None):
"""Get some text.
endat: stop reading text at this tag (the tag is included in the
returned text); endtag is a tuple (type, name) where type is
"starttag", "endtag" or "startendtag", and name is the element name of
the tag (element names must be given in lower case)
If endat is not given, .get_text() will stop at the next opening or
closing tag, or when there are no more tokens (no exception is raised).
Note that .get_text() includes the text representation (if any) of the
opening tag, but pushes the opening tag back onto the stack. As a
result, if you want to call .get_text() again, you need to call
.get_tag() first (unless you want an empty string returned when you
next call .get_text()).
Entity references are translated using the value of the entitydefs
constructor argument (a mapping from names to characters like that
provided by the standard module htmlentitydefs). Named entity
references that are not in this mapping are left unchanged.
The textify attribute is used to translate opening tags into text: see
the class docstring.
"""
text = []
tok = None
while 1:
try:
tok = self.get_token()
except NoMoreTokensError:
# unget last token (not the one we just failed to get)
if tok: self.unget_token(tok)
break
if tok.type == "data":
text.append(tok.data)
elif tok.type == "entityref":
t = unescape("&%s;"%tok.data, self._entitydefs, self.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, self.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type in ["starttag", "startendtag"]:
alt = self.textify.get(tag_name)
if alt is not None:
if callable(alt):
text.append(alt(tok))
elif tok.attrs is not None:
for k, v in tok.attrs:
if k == alt:
text.append(v)
text.append("[%s]" % tag_name.upper())
if endat is None or endat == (tok.type, tag_name):
self.unget_token(tok)
break
return "".join(text)
def get_compressed_text(self, *args, **kwds):
"""
As .get_text(), but collapses each group of contiguous whitespace to a
single space character, and removes all initial and trailing
whitespace.
"""
text = self.get_text(*args, **kwds)
text = text.strip()
return self.compress_re.sub(" ", text)
def handle_startendtag(self, tag, attrs):
self._tokenstack.append(Token("startendtag", tag, attrs))
def handle_starttag(self, tag, attrs):
self._tokenstack.append(Token("starttag", tag, attrs))
def handle_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def handle_charref(self, name):
self._tokenstack.append(Token("charref", name))
def handle_entityref(self, name):
self._tokenstack.append(Token("entityref", name))
def handle_data(self, data):
self._tokenstack.append(Token("data", data))
def handle_comment(self, data):
self._tokenstack.append(Token("comment", data))
def handle_decl(self, decl):
self._tokenstack.append(Token("decl", decl))
def unknown_decl(self, data):
# XXX should this call self.error instead?
#self.error("unknown declaration: " + `data`)
self._tokenstack.append(Token("decl", data))
def handle_pi(self, data):
self._tokenstack.append(Token("pi", data))
def unescape_attr(self, name):
return unescape(name, self._entitydefs, self.encoding)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
escaped_attrs.append((key, self.unescape_attr(val)))
return escaped_attrs
class PullParser(_AbstractParser, HTMLParser.HTMLParser):
def __init__(self, *args, **kwds):
HTMLParser.HTMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
class TolerantPullParser(_AbstractParser, sgmllib.SGMLParser):
def __init__(self, *args, **kwds):
sgmllib.SGMLParser.__init__(self)
_AbstractParser.__init__(self, *args, **kwds)
def unknown_starttag(self, tag, attrs):
attrs = self.unescape_attrs(attrs)
self._tokenstack.append(Token("starttag", tag, attrs))
def unknown_endtag(self, tag):
self._tokenstack.append(Token("endtag", tag))
def _test():
import doctest, _pullparser
return doctest.testmod(_pullparser)
if __name__ == "__main__":
_test()
diff --git a/test/test_html.doctest b/test/test_html.doctest
index 9efa7fb..4355c10 100644
--- a/test/test_html.doctest
+++ b/test/test_html.doctest
@@ -1,215 +1,253 @@
>>> import mechanize
>>> from mechanize._response import test_html_response
>>> from mechanize._html import LinksFactory, FormsFactory, TitleFactory, \
... MechanizeBs, \
... RobustLinksFactory, RobustFormsFactory, RobustTitleFactory
mechanize.ParseError should be raised on parsing erroneous HTML.
For backwards compatibility, mechanize.ParseError derives from
exception classes that mechanize used to raise, prior to version
0.1.6.
>>> import sgmllib
>>> import HTMLParser
>>> import ClientForm
>>> issubclass(mechanize.ParseError, sgmllib.SGMLParseError)
True
>>> issubclass(mechanize.ParseError, HTMLParser.HTMLParseError)
True
>>> issubclass(mechanize.ParseError, ClientForm.ParseError)
True
>>> def create_response(error=True):
... extra = ""
... if error:
... extra = "<!!!>"
... html = """\
... <html>
... <head>
... <title>Title</title>
... %s
... </head>
... <body>
... <p>Hello world
... </body>
... </html>
... """ % extra
... return test_html_response(html)
>>> f = LinksFactory()
>>> f.set_response(create_response(), "http://example.com", "latin-1")
>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = FormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = TitleFactory()
>>> f.set_response(create_response(), "latin-1")
>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
Accessing attributes on Factory may also raise ParseError
>>> def factory_getattr(attr_name):
... fact = mechanize.DefaultFactory()
... fact.set_response(create_response())
... getattr(fact, attr_name)
>>> factory_getattr("title") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> factory_getattr("global_form") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
BeautifulSoup ParseErrors:
XXX If I could come up with examples that break links and forms
parsing, I'd uncomment these!
>>> def create_soup(html):
... r = test_html_response(html)
... return MechanizeBs("latin-1", r.read())
#>>> f = RobustLinksFactory()
#>>> html = """\
#... <a href="a">
#... <frame src="b">
#... <a href="c">
#... <iframe src="d">
#... </a>
#... </area>
#... </frame>
#... """
#>>> f.set_soup(create_soup(html), "http://example.com", "latin-1")
#>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
>>> html = """\
... <table>
... <tr><td>
... <input name='broken'>
... </td>
... </form>
... </tr>
... </form>
... """
>>> f = RobustFormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
#>>> f = RobustTitleFactory()
#>>> f.set_soup(create_soup(""), "latin-1")
#>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
Utility class for caching forms etc.
>>> from mechanize._html import CachingGeneratorFunction
>>> i = [1]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1]
>>> list(func())
[1]
>>> i = [1, 2, 3]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1, 2, 3]
>>> i = func()
>>> i.next()
1
>>> i.next()
2
>>> i.next()
3
>>> i = func()
>>> j = func()
>>> i.next()
1
>>> j.next()
1
>>> i.next()
2
>>> j.next()
2
>>> j.next()
3
>>> i.next()
3
>>> i.next()
Traceback (most recent call last):
...
StopIteration
>>> j.next()
Traceback (most recent call last):
...
StopIteration
Link text parsing
>>> def get_first_link_text_bs(html):
... factory = RobustLinksFactory()
... soup = MechanizeBs("utf-8", html)
... factory.set_soup(soup, "http://example.com/", "utf-8")
... return list(factory.links())[0].text
>>> def get_first_link_text_sgmllib(html):
... factory = LinksFactory()
... response = test_html_response(html)
... factory.set_response(response, "http://example.com/", "utf-8")
... return list(factory.links())[0].text
Whitespace gets compressed down to single spaces. Tags are removed.
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><a href="http://example.com/">The quick\tbrown fox jumps
... over the <i><b>lazy</b></i> dog </a>
... </body></html>
... """)
>>> get_first_link_text_bs(html)
'The quick brown fox jumps over the lazy dog'
>>> get_first_link_text_sgmllib(html)
'The quick brown fox jumps over the lazy dog'
Empty <a> links have empty link text
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><a href="http://example.com/"></a>
... </body></html>
... """)
>>> get_first_link_text_bs(html)
''
>>> get_first_link_text_sgmllib(html)
''
But for backwards-compatibility, empty non-<a> links have None link text
>>> html = ("""\
... <html><head><title>Title</title></head><body>
... <p><frame src="http://example.com/"></frame>
... </body></html>
... """)
>>> print get_first_link_text_bs(html)
None
>>> print get_first_link_text_sgmllib(html)
None
+
+
+Title parsing. We follow Firefox's behaviour with regard to child
+elements (haven't tested IE).
+
+>>> def get_title_bs(html):
+... factory = RobustTitleFactory()
+... soup = MechanizeBs("utf-8", html)
+... factory.set_soup(soup, "utf-8")
+... return factory.title()
+
+>>> def get_title_sgmllib(html):
+... factory = TitleFactory()
+... response = test_html_response(html)
+... factory.set_response(response, "utf-8")
+... return factory.title()
+
+>>> html = ("""\
+... <html><head>
+... <title>Title</title>
+... </head><body><p>Blah.<p></body></html>
+... """)
+>>> get_title_bs(html)
+'Title'
+>>> get_title_sgmllib(html)
+'Title'
+
+>>> html = ("""\
+... <html><head>
+... <title> Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script>
+... tle &&
+... </title>
+... </head><body><p>Blah.<p></body></html>
+... """)
+>>> get_title_bs(html)
+'Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script> tle &&'
+>>> get_title_sgmllib(html)
+'Ti<script type="text/strange">alert("this is valid HTML -- yuck!")</script> tle &&'
|
Almad/Mechanize
|
3ff5659eff909311a1abf0bafa91d764b4710abd
|
Make test_browser.BrowserTests.test_empty() run with all factory classes
|
diff --git a/test/test_browser.py b/test/test_browser.py
index 1bc318f..ce136d4 100644
--- a/test/test_browser.py
+++ b/test/test_browser.py
@@ -1,770 +1,774 @@
#!/usr/bin/env python
"""Tests for mechanize.Browser."""
import sys, os, random
from unittest import TestCase
import StringIO, re, urllib2
import mechanize
from mechanize._response import test_html_response
FACTORY_CLASSES = [mechanize.DefaultFactory, mechanize.RobustFactory]
# XXX these 'mock' classes are badly in need of simplification / removal
# (note this stuff is also used by test_useragent.py and test_browser.doctest)
class MockMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return apply(self.handle, (self.meth_name, self.action)+args)
class MockHeaders(dict):
def getheaders(self, name):
name = name.lower()
return [v for k, v in self.iteritems() if name == k.lower()]
class MockResponse:
closeable_response = None
def __init__(self, url="http://example.com/", data=None, info=None):
self.url = url
self.fp = StringIO.StringIO(data)
if info is None: info = {}
self._info = MockHeaders(info)
def info(self): return self._info
def geturl(self): return self.url
def read(self, size=-1): return self.fp.read(size)
def seek(self, whence):
assert whence == 0
self.fp.seek(0)
def close(self): pass
def get_data(self): pass
def make_mock_handler(response_class=MockResponse):
class MockHandler:
processor_order = 500
handler_order = -1
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for name, action in methods:
if name.endswith("_open"):
meth = MockMethod(name, action, self.handle)
else:
meth = MockMethod(name, action, self.process)
setattr(self.__class__, name, meth)
def handle(self, fn_name, response, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if response:
if isinstance(response, urllib2.HTTPError):
raise response
r = response
r.seek(0)
else:
r = response_class()
req = args[0]
r.url = req.get_full_url()
return r
def process(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if fn_name.endswith("_request"):
return args[0]
else:
return args[1]
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
return MockHandler
class TestBrowser(mechanize.Browser):
default_features = []
default_others = []
default_schemes = []
class TestBrowser2(mechanize.Browser):
# XXX better name!
# As TestBrowser, this is neutered so doesn't know about protocol handling,
# but still knows what to do with unknown schemes, etc., because
# UserAgent's default_others list is left intact, including classes like
# UnknownHandler
default_features = []
default_schemes = []
class BrowserTests(TestCase):
def test_referer(self):
b = TestBrowser()
url = "http://www.example.com/"
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
<form name="form1">
<input type="hidden" name="foo" value="bar"></input>
<input type="submit"></input>
</form>
<a href="http://example.com/foo/bar.html" name="apples"></a>
<a href="https://example.com/spam/eggs.html" name="secure"></a>
<a href="blah://example.com/" name="pears"></a>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
# Referer not added by .open()...
req = mechanize.Request(url)
b.open(req)
self.assert_(req.get_header("Referer") is None)
# ...even if we're visiting a document
b.open(req)
self.assert_(req.get_header("Referer") is None)
# Referer added by .click_link() and .click()
b.select_form("form1")
req2 = b.click()
self.assertEqual(req2.get_header("Referer"), url)
r2 = b.open(req2)
req3 = b.click_link(name="apples")
self.assertEqual(req3.get_header("Referer"), url+"?foo=bar")
# Referer not added when going from https to http URL
b.add_handler(make_mock_handler()([("https_open", r)]))
r3 = b.open(req3)
req4 = b.click_link(name="secure")
self.assertEqual(req4.get_header("Referer"),
"http://example.com/foo/bar.html")
r4 = b.open(req4)
req5 = b.click_link(name="apples")
self.assert_(not req5.has_header("Referer"))
# Referer not added for non-http, non-https requests
b.add_handler(make_mock_handler()([("blah_open", r)]))
req6 = b.click_link(name="pears")
self.assert_(not req6.has_header("Referer"))
# Referer not added when going from non-http, non-https URL
r4 = b.open(req6)
req7 = b.click_link(name="apples")
self.assert_(not req7.has_header("Referer"))
# XXX Referer added for redirect
def test_encoding(self):
import mechanize
from StringIO import StringIO
import urllib, mimetools
# always take first encoding, since that's the one from the real HTTP
# headers, rather than from HTTP-EQUIV
b = mechanize.Browser()
for s, ct in [("", mechanize._html.DEFAULT_ENCODING),
("Foo: Bar\r\n\r\n", mechanize._html.DEFAULT_ENCODING),
("Content-Type: text/html; charset=UTF-8\r\n\r\n",
"UTF-8"),
("Content-Type: text/html; charset=UTF-8\r\n"
"Content-Type: text/html; charset=KOI8-R\r\n\r\n",
"UTF-8"),
]:
msg = mimetools.Message(StringIO(s))
r = urllib.addinfourl(StringIO(""), msg, "http://www.example.com/")
b.set_response(r)
self.assertEqual(b.encoding(), ct)
def test_history(self):
import mechanize
from mechanize import _response
def same_response(ra, rb):
return ra.wrapped is rb.wrapped
class Handler(mechanize.BaseHandler):
def http_open(self, request):
r = _response.test_response(url=request.get_full_url())
# these tests aren't interested in auto-.reload() behaviour of
# .back(), so read the response to prevent that happening
r.get_data()
return r
b = TestBrowser2()
b.add_handler(Handler())
self.assertRaises(mechanize.BrowserStateError, b.back)
r1 = b.open("http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back)
r2 = b.open("http://example.com/foo")
self.assert_(same_response(b.back(), r1))
r3 = b.open("http://example.com/bar")
r4 = b.open("http://example.com/spam")
self.assert_(same_response(b.back(), r3))
self.assert_(same_response(b.back(), r1))
self.assertEquals(b.geturl(), "http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back)
# reloading does a real HTTP fetch rather than using history cache
r5 = b.reload()
self.assert_(not same_response(r5, r1))
# .geturl() gets fed through to b.response
self.assertEquals(b.geturl(), "http://example.com/")
# can go back n times
r6 = b.open("spam")
self.assertEquals(b.geturl(), "http://example.com/spam")
r7 = b.open("/spam")
self.assert_(same_response(b.response(), r7))
self.assertEquals(b.geturl(), "http://example.com/spam")
self.assert_(same_response(b.back(2), r5))
self.assertEquals(b.geturl(), "http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back, 2)
r8 = b.open("/spam")
# even if we get an HTTPError, history, .response() and .request should
# still get updated
class Handler2(mechanize.BaseHandler):
def https_open(self, request):
r = urllib2.HTTPError(
"https://example.com/bad", 503, "Oops",
MockHeaders(), StringIO.StringIO())
return r
b.add_handler(Handler2())
self.assertRaises(urllib2.HTTPError, b.open, "https://example.com/badreq")
self.assertEqual(b.response().geturl(), "https://example.com/bad")
self.assertEqual(b.request.get_full_url(), "https://example.com/badreq")
self.assert_(same_response(b.back(), r8))
# .close() should make use of Browser methods and attributes complain
# noisily, since they should not be called after .close()
b.form = "blah"
b.close()
for attr in ("form open error retrieve add_handler "
"request response set_response geturl reload back "
"clear_history set_cookie links forms viewing_html "
"encoding title select_form click submit click_link "
"follow_link find_link".split()
):
self.assert_(getattr(b, attr) is None)
def test_reload_read_incomplete(self):
import mechanize
from mechanize._response import test_response
class Browser(TestBrowser):
def __init__(self):
TestBrowser.__init__(self)
self.reloaded = False
def reload(self):
self.reloaded = True
TestBrowser.reload(self)
br = Browser()
data = "<html><head><title></title></head><body>%s</body></html>"
data = data % ("The quick brown fox jumps over the lazy dog."*100)
class Handler(mechanize.BaseHandler):
def http_open(self, requst):
return test_response(data, [("content-type", "text/html")])
br.add_handler(Handler())
# .reload() on .back() if the whole response hasn't already been read
# (.read_incomplete is True)
r = br.open("http://example.com")
r.read(10)
br.open('http://www.example.com/blah')
self.failIf(br.reloaded)
br.back()
self.assert_(br.reloaded)
# don't reload if already read
br.reloaded = False
br.response().read()
br.open('http://www.example.com/blah')
br.back()
self.failIf(br.reloaded)
def test_viewing_html(self):
# XXX not testing multiple Content-Type headers
import mechanize
url = "http://example.com/"
for allow_xhtml in False, True:
for ct, expect in [
(None, False),
("text/plain", False),
("text/html", True),
# don't try to handle XML until we can do it right!
("text/xhtml", allow_xhtml),
("text/xml", allow_xhtml),
("application/xml", allow_xhtml),
("application/xhtml+xml", allow_xhtml),
("text/html; charset=blah", True),
(" text/html ; charset=ook ", True),
]:
b = TestBrowser(mechanize.DefaultFactory(
i_want_broken_xhtml_support=allow_xhtml))
hdrs = {}
if ct is not None:
hdrs["Content-Type"] = ct
b.add_handler(make_mock_handler()([("http_open",
MockResponse(url, "", hdrs))]))
r = b.open(url)
self.assertEqual(b.viewing_html(), expect)
for allow_xhtml in False, True:
for ext, expect in [
(".htm", True),
(".html", True),
# don't try to handle XML until we can do it right!
(".xhtml", allow_xhtml),
(".html?foo=bar&a=b;whelk#kool", True),
(".txt", False),
(".xml", False),
("", False),
]:
b = TestBrowser(mechanize.DefaultFactory(
i_want_broken_xhtml_support=allow_xhtml))
url = "http://example.com/foo"+ext
b.add_handler(make_mock_handler()(
[("http_open", MockResponse(url, "", {}))]))
r = b.open(url)
self.assertEqual(b.viewing_html(), expect)
def test_empty(self):
+ for factory_class in FACTORY_CLASSES:
+ self._test_empty(factory_class())
+
+ def _test_empty(self, factory):
import mechanize
url = "http://example.com/"
- b = TestBrowser()
+ b = TestBrowser(factory=factory)
self.assert_(b.response() is None)
# To open a relative reference (often called a "relative URL"), you
# have to have already opened a URL for it "to be relative to".
self.assertRaises(mechanize.BrowserStateError, b.open, "relative_ref")
# we can still clear the history even if we've not visited any URL
b.clear_history()
# most methods raise BrowserStateError...
def test_state_error(method_names):
for attr in method_names:
method = getattr(b, attr)
#print attr
self.assertRaises(mechanize.BrowserStateError, method)
self.assertRaises(mechanize.BrowserStateError, b.select_form,
name="blah")
self.assertRaises(mechanize.BrowserStateError, b.find_link,
name="blah")
# ...if not visiting a URL...
test_state_error(("geturl reload back viewing_html encoding "
"click links forms title select_form".split()))
self.assertRaises(mechanize.BrowserStateError, b.set_cookie, "foo=bar")
self.assertRaises(mechanize.BrowserStateError, b.submit, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.click_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.follow_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.find_link, nr=0)
# ...and lots do so if visiting a non-HTML URL
b.add_handler(make_mock_handler()(
[("http_open", MockResponse(url, "", {}))]))
r = b.open(url)
self.assert_(not b.viewing_html())
test_state_error("click links forms title select_form".split())
self.assertRaises(mechanize.BrowserStateError, b.submit, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.click_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.follow_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.find_link, nr=0)
b = TestBrowser()
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
self.assertEqual(b.title(), "Title")
self.assertEqual(len(list(b.links())), 0)
self.assertEqual(len(list(b.forms())), 0)
self.assertRaises(ValueError, b.select_form)
self.assertRaises(mechanize.FormNotFoundError, b.select_form,
name="blah")
self.assertRaises(mechanize.FormNotFoundError, b.select_form,
predicate=lambda form: form is not b.global_form())
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
name="blah")
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
predicate=lambda x: True)
def test_forms(self):
for factory_class in FACTORY_CLASSES:
self._test_forms(factory_class())
def _test_forms(self, factory):
import mechanize
url = "http://example.com"
b = TestBrowser(factory=factory)
r = test_html_response(
url=url,
headers=[("content-type", "text/html")],
data="""\
<html>
<head><title>Title</title></head>
<body>
<form name="form1">
<input type="text"></input>
<input type="checkbox" name="cheeses" value="cheddar"></input>
<input type="checkbox" name="cheeses" value="edam"></input>
<input type="submit" name="one"></input>
</form>
<a href="http://example.com/foo/bar.html" name="apples">
<form name="form2">
<input type="submit" name="two">
</form>
</body>
</html>
"""
)
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
forms = list(b.forms())
self.assertEqual(len(forms), 2)
for got, expect in zip([f.name for f in forms], [
"form1", "form2"]):
self.assertEqual(got, expect)
self.assertRaises(mechanize.FormNotFoundError, b.select_form, "foo")
# no form is set yet
self.assertRaises(AttributeError, getattr, b, "possible_items")
b.select_form("form1")
# now unknown methods are fed through to selected ClientForm.HTMLForm
self.assertEqual(
[i.name for i in b.find_control("cheeses").items],
["cheddar", "edam"])
b["cheeses"] = ["cheddar", "edam"]
self.assertEqual(b.click_pairs(), [
("cheeses", "cheddar"), ("cheeses", "edam"), ("one", "")])
b.select_form(nr=1)
self.assertEqual(b.name, "form2")
self.assertEqual(b.click_pairs(), [("two", "")])
def test_link_encoding(self):
for factory_class in FACTORY_CLASSES:
self._test_link_encoding(factory_class())
def _test_link_encoding(self, factory):
import urllib
import mechanize
from mechanize._rfc3986 import clean_url
url = "http://example.com/"
for encoding in ["UTF-8", "latin-1"]:
encoding_decl = "; charset=%s" % encoding
b = TestBrowser(factory=factory)
r = MockResponse(url, """\
<a href="http://example.com/foo/bar——.html"
name="name0——">blah——</a>
""", #"
{"content-type": "text/html%s" % encoding_decl})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
Link = mechanize.Link
try:
mdashx2 = u"\u2014".encode(encoding)*2
except UnicodeError:
mdashx2 = '——'
qmdashx2 = clean_url(mdashx2, encoding)
# base_url, url, text, tag, attrs
exp = Link(url, "http://example.com/foo/bar%s.html" % qmdashx2,
"blah"+mdashx2, "a",
[("href", "http://example.com/foo/bar%s.html" % mdashx2),
("name", "name0%s" % mdashx2)])
# nr
link = b.find_link()
## print
## print exp
## print link
self.assertEqual(link, exp)
def test_link_whitespace(self):
from mechanize import Link
for factory_class in FACTORY_CLASSES:
base_url = "http://example.com/"
url = " http://example.com/foo.html%20+ "
stripped_url = url.strip()
html = '<a href="%s"></a>' % url
b = TestBrowser(factory=factory_class())
r = MockResponse(base_url, html, {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(base_url)
link = b.find_link(nr=0)
self.assertEqual(
link,
Link(base_url, stripped_url, "", "a", [("href", url)])
)
def test_links(self):
for factory_class in FACTORY_CLASSES:
self._test_links(factory_class())
def _test_links(self, factory):
import mechanize
from mechanize import Link
url = "http://example.com/"
b = TestBrowser(factory=factory)
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
<a href="http://example.com/foo/bar.html" name="apples"></a>
<a name="pears"></a>
<a href="spam" name="pears"></a>
<area href="blah" name="foo"></area>
<form name="form2">
<input type="submit" name="two">
</form>
<frame name="name" href="href" src="src"></frame>
<iframe name="name2" href="href" src="src"></iframe>
<a name="name3" href="one">yada yada</a>
<a name="pears" href="two" weird="stuff">rhubarb</a>
<a></a>
<iframe src="foo"></iframe>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
exp_links = [
# base_url, url, text, tag, attrs
Link(url, "http://example.com/foo/bar.html", "", "a",
[("href", "http://example.com/foo/bar.html"),
("name", "apples")]),
Link(url, "spam", "", "a", [("href", "spam"), ("name", "pears")]),
Link(url, "blah", None, "area",
[("href", "blah"), ("name", "foo")]),
Link(url, "src", None, "frame",
[("name", "name"), ("href", "href"), ("src", "src")]),
Link(url, "src", None, "iframe",
[("name", "name2"), ("href", "href"), ("src", "src")]),
Link(url, "one", "yada yada", "a",
[("name", "name3"), ("href", "one")]),
Link(url, "two", "rhubarb", "a",
[("name", "pears"), ("href", "two"), ("weird", "stuff")]),
Link(url, "foo", None, "iframe",
[("src", "foo")]),
]
links = list(b.links())
self.assertEqual(len(links), len(exp_links))
for got, expect in zip(links, exp_links):
self.assertEqual(got, expect)
# nr
l = b.find_link()
self.assertEqual(l.url, "http://example.com/foo/bar.html")
l = b.find_link(nr=1)
self.assertEqual(l.url, "spam")
# text
l = b.find_link(text="yada yada")
self.assertEqual(l.url, "one")
self.assertRaises(mechanize.LinkNotFoundError,
b.find_link, text="da ya")
l = b.find_link(text_regex=re.compile("da ya"))
self.assertEqual(l.url, "one")
l = b.find_link(text_regex="da ya")
self.assertEqual(l.url, "one")
# name
l = b.find_link(name="name3")
self.assertEqual(l.url, "one")
l = b.find_link(name_regex=re.compile("oo"))
self.assertEqual(l.url, "blah")
l = b.find_link(name_regex="oo")
self.assertEqual(l.url, "blah")
# url
l = b.find_link(url="spam")
self.assertEqual(l.url, "spam")
l = b.find_link(url_regex=re.compile("pam"))
self.assertEqual(l.url, "spam")
l = b.find_link(url_regex="pam")
self.assertEqual(l.url, "spam")
# tag
l = b.find_link(tag="area")
self.assertEqual(l.url, "blah")
# predicate
l = b.find_link(predicate=
lambda l: dict(l.attrs).get("weird") == "stuff")
self.assertEqual(l.url, "two")
# combinations
l = b.find_link(name="pears", nr=1)
self.assertEqual(l.text, "rhubarb")
l = b.find_link(url="src", nr=0, name="name2")
self.assertEqual(l.tag, "iframe")
self.assertEqual(l.url, "src")
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
url="src", nr=1, name="name2")
l = b.find_link(tag="a", predicate=
lambda l: dict(l.attrs).get("weird") == "stuff")
self.assertEqual(l.url, "two")
# .links()
self.assertEqual(list(b.links(url="src")), [
Link(url, url="src", text=None, tag="frame",
attrs=[("name", "name"), ("href", "href"), ("src", "src")]),
Link(url, url="src", text=None, tag="iframe",
attrs=[("name", "name2"), ("href", "href"), ("src", "src")]),
])
def test_base_uri(self):
import mechanize
url = "http://example.com/"
for html, urls in [
(
"""<base href="http://www.python.org/foo/">
<a href="bar/baz.html"></a>
<a href="/bar/baz.html"></a>
<a href="http://example.com/bar %2f%2Fblah;/baz@~._-.html"></a>
""",
[
"http://www.python.org/foo/bar/baz.html",
"http://www.python.org/bar/baz.html",
"http://example.com/bar%20%2f%2Fblah;/baz@~._-.html",
]),
(
"""<a href="bar/baz.html"></a>
<a href="/bar/baz.html"></a>
<a href="http://example.com/bar/baz.html"></a>
""",
[
"http://example.com/bar/baz.html",
"http://example.com/bar/baz.html",
"http://example.com/bar/baz.html",
]
),
]:
b = TestBrowser()
r = MockResponse(url, html, {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
self.assertEqual([link.absolute_url for link in b.links()], urls)
def test_set_cookie(self):
class CookieTestBrowser(TestBrowser):
default_features = list(TestBrowser.default_features)+["_cookies"]
# have to be visiting HTTP/HTTPS URL
url = "ftp://example.com/"
br = CookieTestBrowser()
r = mechanize.make_response(
"<html><head><title>Title</title></head><body></body></html>",
[("content-type", "text/html")],
url,
200, "OK",
)
br.add_handler(make_mock_handler()([("http_open", r)]))
handler = br._ua_handlers["_cookies"]
cj = handler.cookiejar
self.assertRaises(mechanize.BrowserStateError,
br.set_cookie, "foo=bar")
self.assertEqual(len(cj), 0)
url = "http://example.com/"
br = CookieTestBrowser()
r = mechanize.make_response(
"<html><head><title>Title</title></head><body></body></html>",
[("content-type", "text/html")],
url,
200, "OK",
)
br.add_handler(make_mock_handler()([("http_open", r)]))
handler = br._ua_handlers["_cookies"]
cj = handler.cookiejar
# have to be visiting a URL
self.assertRaises(mechanize.BrowserStateError,
br.set_cookie, "foo=bar")
self.assertEqual(len(cj), 0)
# normal case
br.open(url)
br.set_cookie("foo=bar")
self.assertEqual(len(cj), 1)
self.assertEqual(cj._cookies["example.com"]["/"]["foo"].value, "bar")
class ResponseTests(TestCase):
def test_set_response(self):
import copy
from mechanize import response_seek_wrapper
br = TestBrowser()
url = "http://example.com/"
html = """<html><body><a href="spam">click me</a></body></html>"""
headers = {"content-type": "text/html"}
r = response_seek_wrapper(MockResponse(url, html, headers))
br.add_handler(make_mock_handler()([("http_open", r)]))
r = br.open(url)
self.assertEqual(r.read(), html)
r.seek(0)
self.assertEqual(copy.copy(r).read(), html)
self.assertEqual(list(br.links())[0].url, "spam")
newhtml = """<html><body><a href="eggs">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, "spam")
r.seek(0)
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "eggs")
def test_str(self):
import mimetools
from mechanize import _response
br = TestBrowser()
self.assertEqual(
str(br),
"<TestBrowser (not visiting a URL)>"
)
fp = StringIO.StringIO('<html><form name="f"><input /></form></html>')
headers = mimetools.Message(
StringIO.StringIO("Content-type: text/html"))
response = _response.response_seek_wrapper(
_response.closeable_response(
fp, headers, "http://example.com/", 200, "OK"))
br.set_response(response)
self.assertEqual(
str(br),
"<TestBrowser visiting http://example.com/>"
)
br.select_form(nr=0)
self.assertEqual(
str(br),
"""\
<TestBrowser visiting http://example.com/
selected form:
<f GET http://example.com/ application/x-www-form-urlencoded
<TextControl(<None>=)>>
>""")
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
678db8989df3213efe9b39cbd2da88c04e3e533f
|
* Add a functional test for Refresh * Update docstrings / comments re local functional testing server
|
diff --git a/functional_tests.py b/functional_tests.py
index 93e02f9..483dc88 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,421 +1,433 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
-import os, sys
+import os, sys, urllib
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
-# XXX
-# document twisted.web2 install (I forgot how I did it -- reinstall!)
-# implement remaining stuff used by functional_tests.py
-# in twisted-localserver.py:
-# - 302 followed by 404 response
-# - helper cgi script for cookies &c.
-
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
+ def test_refresh(self):
+ def refresh_request(seconds):
+ uri = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
+ val = urllib.quote_plus('%d; url="%s"' % (seconds, self.uri))
+ return uri + ("?refresh=%s" % val)
+ r = self.browser.open(refresh_request(5))
+ self.assertEqual(r.geturl(), self.uri)
+ # Refresh with pause > 30 seconds is ignored by default (these long
+ # refreshes tend to be there only because the website owner wants you
+ # to see the latest news, or whatever -- they're not essential to the
+ # operation of the site, and not really useful or appropriate when
+ # scraping).
+ refresh_uri = refresh_request(60)
+ r = self.browser.open(refresh_uri)
+ self.assertEqual(r.geturl(), refresh_uri)
+ # allow long refreshes (note we don't actually wait 60 seconds by default)
+ self.browser.set_handle_refresh(True, max_time=None)
+ r = self.browser.open(refresh_request(60))
+ self.assertEqual(r.geturl(), self.uri)
+
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_local_file(self):
# Since the file: URL scheme is not well standardised, Browser has a
# special method to open files by name, for convenience:
br = mechanize.Browser()
response = br.open_local_file("mechanize/_mechanize.py")
self.assert_("def open_local_file(self, filename):" in
response.get_data())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
- Note not all the functional tests use the local server yet
- -- some currently always access the internet regardless of
- this option and the --uri option.
+ If this option doesn't work on Windows/Mac, somebody please
+ tell me about it, or I'll never find out...
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/test-tools/cookietest.cgi b/test-tools/cookietest.cgi
index b66d20c..c171904 100755
--- a/test-tools/cookietest.cgi
+++ b/test-tools/cookietest.cgi
@@ -1,42 +1,54 @@
#!/usr/bin/python
# -*-python-*-
# This is used by functional_tests.py
+#import cgitb; cgitb.enable()
+
print "Content-Type: text/html"
print "Set-Cookie: foo=bar\n"
-import sys, os, string, cgi, Cookie
+import sys, os, string, cgi, Cookie, urllib
+from xml.sax import saxutils
from types import ListType
print "<html><head><title>Cookies and form submission parameters</title>"
cookie = Cookie.SimpleCookie()
cookieHdr = os.environ.get("HTTP_COOKIE", "")
cookie.load(cookieHdr)
-if not cookie.has_key("foo"):
+form = cgi.FieldStorage()
+refresh_value = None
+if form.has_key("refresh"):
+ refresh = form["refresh"]
+ if not isinstance(refresh, ListType):
+ refresh_value = refresh.value
+if refresh_value is not None:
+ print '<meta http-equiv="refresh" content=%s>' % (
+ saxutils.quoteattr(urllib.unquote_plus(refresh_value)))
+elif not cookie.has_key("foo"):
print '<meta http-equiv="refresh" content="5">'
+
print "</head>"
print "<p>Received cookies:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_COOKIE", ""))
print "</pre>"
if cookie.has_key("foo"):
print "Your browser supports cookies!"
print "<p>Referer:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_REFERER", ""))
print "</pre>"
-form = cgi.FieldStorage()
print "<p>Received parameters:</p>"
print "<pre>"
for k in form.keys():
v = form[k]
if isinstance(v, ListType):
vs = []
for item in v:
vs.append(item.value)
text = string.join(vs, ", ")
else:
text = v.value
print "%s: %s" % (cgi.escape(k), cgi.escape(text))
print "</pre></html>"
diff --git a/test-tools/testprogram.py b/test-tools/testprogram.py
index ee5ef75..fa12ff8 100644
--- a/test-tools/testprogram.py
+++ b/test-tools/testprogram.py
@@ -1,314 +1,314 @@
"""Local server and cgitb support."""
import cgitb
#cgitb.enable(format="text")
import sys, os, traceback, logging, glob, time
from unittest import defaultTestLoader, TextTestRunner, TestSuite, TestCase, \
_TextTestResult
class ServerProcess:
def __init__(self, filename, name=None):
if filename is None:
raise ValueError('filename arg must be a string')
if name is None:
name = filename
self.name = os.path.basename(name)
self.port = None
self.report_hook = lambda msg: None
self._filename = filename
def _get_args(self):
"""Return list of command line arguments.
Override me.
"""
return []
def start(self):
self.report_hook("starting (%s)" % (
[sys.executable, self._filename]+self._get_args()))
self._pid = os.spawnv(
os.P_NOWAIT,
sys.executable,
[sys.executable, self._filename]+self._get_args())
self.report_hook("waiting for startup")
self._wait_for_startup()
self.report_hook("running")
def _wait_for_startup(self):
import socket
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(1.0)
try:
sock.connect(('127.0.0.1', self.port))
finally:
sock.close()
backoff(connect, (socket.error,))
def stop(self):
"""Kill process (forcefully if necessary)."""
if os.name == 'nt':
kill_windows(self._pid, self.report_hook)
else:
kill_posix(self._pid, self.report_hook)
def backoff(func, errors,
initial_timeout=1., hard_timeout=60., factor=1.2):
starttime = time.time()
timeout = initial_timeout
while time.time() < starttime + hard_timeout - 0.01:
try:
func()
except errors, exc:
time.sleep(timeout)
timeout *= factor
hard_limit = hard_timeout - (time.time() - starttime)
timeout = min(timeout, hard_limit)
else:
break
else:
raise
def kill_windows(handle, report_hook):
try:
import win32api
except ImportError:
import ctypes
ctypes.windll.kernel32.TerminateProcess(int(handle), -1)
else:
win32api.TerminateProcess(int(handle), -1)
def kill_posix(pid, report_hook):
import signal
os.kill(pid, signal.SIGTERM)
timeout = 10.
starttime = time.time()
report_hook("waiting for exit")
def do_nothing(*args):
pass
old_handler = signal.signal(signal.SIGCHLD, do_nothing)
try:
while time.time() < starttime + timeout - 0.01:
pid, sts = os.waitpid(pid, os.WNOHANG)
if pid != 0:
# exited, or error
break
newtimeout = timeout - (time.time() - starttime) - 1.
time.sleep(newtimeout) # wait for signal
else:
report_hook("forcefully killing")
try:
os.kill(pid, signal.SIGKILL)
except OSError, exc:
if exc.errno != errno.ECHILD:
raise
finally:
signal.signal(signal.SIGCHLD, old_handler)
class TwistedServerProcess(ServerProcess):
def __init__(self, name=None):
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
path = os.path.join(top_level_dir, "test-tools/twisted-localserver.py")
ServerProcess.__init__(self, path, name)
def _get_args(self):
return [str(self.port)]
class CgitbTextResult(_TextTestResult):
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return cgitb.text((exctype, value, tb))
return cgitb.text((exctype, value, tb))
class CgitbTextTestRunner(TextTestRunner):
def _makeResult(self):
return CgitbTextResult(self.stream, self.descriptions, self.verbosity)
def add_uri_attribute_to_test_cases(suite, uri):
for test in suite._tests:
if isinstance(test, TestCase):
test.uri = uri
else:
try:
add_uri_attribute_to_test_cases(test, uri)
except AttributeError:
pass
class TestProgram:
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Note not all the functional tests take note of the --uri argument yet --
some currently always access the internet regardless of the --uri and
--run-local-server options.
Options:
-l, --run-local-server
Run a local Twisted HTTP server for the functional
tests. You need Twisted installed for this to work.
The server is run on the port given in the --uri
option. If --run-local-server is given but no --uri is
given, http://127.0.0.1:8000 is used as the base URI.
Also, if you're on Windows and don't have pywin32 or
ctypes installed, this option won't work, and you'll
have to start up test-tools/localserver.py manually.
--uri=URL Base URI for functional tests
(test.py does not access the network, unless you tell
it to run module functional_tests;
functional_tests.py does access the network)
e.g. --uri=http://127.0.0.1:8000/
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
The following options are only available through test.py (you can still run the
functional tests through test.py, just give 'functional_tests' as the module
name to run):
-u Skip plain (non-doctest) unittests
-d Skip doctests
-c Run coverage (requires coverage.py, seems buggy)
-t Display tracebacks using cgitb's text mode
"""
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s test_cookies
- run module 'test_cookies'
%(progName)s test_cookies.CookieTests
- run all 'test*' test methods in test_cookies.CookieTests
%(progName)s test_cookies.CookieTests.test_expires
- run test_cookies.CookieTests.test_expires
%(progName)s functional_tests
- run the functional tests
%(progName)s -l functional_tests
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
"""
def __init__(self, moduleNames, localServerProcess, defaultTest=None,
argv=None, testRunner=None, testLoader=defaultTestLoader,
- defaultUri="http://wwwsearch.sf.net/",
+ defaultUri="http://wwwsearch.sourceforge.net/",
usageExamples=USAGE_EXAMPLES,
):
self.modules = []
for moduleName in moduleNames:
module = __import__(moduleName)
for part in moduleName.split('.')[1:]:
module = getattr(module, part)
self.modules.append(module)
self.uri = None
self._defaultUri = defaultUri
if argv is None:
argv = sys.argv
self.verbosity = 1
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.usageExamples = usageExamples
self.runLocalServer = False
self.parseArgs(argv)
if self.runLocalServer:
import urllib
from mechanize._rfc3986 import urlsplit
authority = urlsplit(self.uri)[1]
host, port = urllib.splitport(authority)
if port is None:
port = "80"
try:
port = int(port)
except:
self.usageExit("port in --uri value must be an integer "
"(try --uri=http://127.0.0.1:8000/)")
self._serverProcess = localServerProcess
def report(msg):
print "%s: %s" % (localServerProcess.name, msg)
localServerProcess.port = port
localServerProcess.report_hook = report
def usageExit(self, msg=None):
if msg: print msg
print (self.USAGE + self.usageExamples) % self.__dict__
sys.exit(2)
def parseArgs(self, argv):
import getopt
try:
options, args = getopt.getopt(
argv[1:],
'hHvql',
['help','verbose','quiet', 'uri=', 'run-local-server'],
)
uri = None
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('--uri',):
uri = value
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-l', '--run-local-server'):
self.runLocalServer = True
if uri is None:
if self.runLocalServer:
uri = "http://127.0.0.1:8000"
else:
uri = self._defaultUri
self.uri = uri
if len(args) == 0 and self.defaultTest is None:
suite = TestSuite()
for module in self.modules:
test = self.testLoader.loadTestsFromModule(module)
suite.addTest(test)
self.test = suite
add_uri_attribute_to_test_cases(self.test, self.uri)
return
if len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
add_uri_attribute_to_test_cases(self.test, self.uri)
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
if self.testRunner is None:
self.testRunner = TextTestRunner(verbosity=self.verbosity)
if self.runLocalServer:
self._serverProcess.start()
try:
result = self.testRunner.run(self.test)
finally:
if self.runLocalServer:
self._serverProcess.stop()
return result
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index 8ca8a66..730b9db 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,124 +1,124 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
-You need Twisted XXX version to run it:
+You need twisted.web2 to run it. On ubuntu feisty, you can install it like so:
-XXX installation instructions
+sudo apt-get install python-twisted-web2
"""
import sys, re
from twisted.web2 import server, http, resource, channel, \
http_headers, responsecode, twcgi
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
REFERER_TEST_HTML = """\
<html>
<head>
<title>mechanize Referer (sic) test page</title>
</head>
<body>
<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
</body>
</html>"""
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
def _make_page(parent, name, text,
content_type="text/html",
leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
page.addSlash = not leaf
setattr(parent, "child_"+name, page)
return page
def make_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=False)
def make_leaf_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=True)
def make_redirect(parent, name, location_relative_ref):
redirect = resource.RedirectResource(path=location_relative_ref)
setattr(parent, "child_"+name, redirect)
return redirect
def make_cgi_bin(parent, name, dir_name):
cgi_bin = twcgi.CGIDirectory(dir_name)
setattr(parent, "child_"+name, cgi_bin)
return cgi_bin
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
make_leaf_page(root, "robots.txt",
"User-Agent: *\nDisallow: /norobots",
"text/plain")
make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
make_leaf_page(bits, "cctest2.txt",
"Hello ClientCookie functional test suite.",
"text/plain")
make_leaf_page(bits, "referertest.html", REFERER_TEST_HTML)
make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
make_redirect(root, "redirected", "/doesnotexist")
make_cgi_bin(root, "cgi-bin", "test-tools")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
24686d51cf4ebb0871e95eded456547d7554775e
|
Change default mechanize.UserAgent (hence mechanize.Browser) Refresh behaviour: * Don't follow Refreshes > 30 seconds * honor_time is now False by default
|
diff --git a/mechanize/_useragent.py b/mechanize/_useragent.py
index 272605c..0bec126 100644
--- a/mechanize/_useragent.py
+++ b/mechanize/_useragent.py
@@ -1,347 +1,347 @@
"""Convenient HTTP UserAgent class.
This is a subclass of urllib2.OpenerDirector.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import sys, warnings, urllib2
import _opener
import _urllib2
import _auth
import _gzip
import _response
class UserAgentBase(_opener.OpenerDirector):
"""Convenient user-agent class.
Do not use .add_handler() to add a handler for something already dealt with
by this code.
The only reason at present for the distinction between UserAgent and
UserAgentBase is so that classes that depend on .seek()able responses
(e.g. mechanize.Browser) can inherit from UserAgentBase. The subclass
UserAgent exposes a .set_seekable_responses() method that allows switching
off the adding of a .seek() method to responses.
Public attributes:
addheaders: list of (name, value) pairs specifying headers to send with
every request, unless they are overridden in the Request instance.
>>> ua = UserAgentBase()
>>> ua.addheaders = [
... ("User-agent", "Mozilla/5.0 (compatible)"),
... ("From", "[email protected]")]
"""
handler_classes = {
# scheme handlers
"http": _urllib2.HTTPHandler,
# CacheFTPHandler is buggy, at least in 2.3, so we don't use it
"ftp": _urllib2.FTPHandler,
"file": _urllib2.FileHandler,
# other handlers
"_unknown": _urllib2.UnknownHandler,
# HTTP{S,}Handler depend on HTTPErrorProcessor too
"_http_error": _urllib2.HTTPErrorProcessor,
"_http_request_upgrade": _urllib2.HTTPRequestUpgradeProcessor,
"_http_default_error": _urllib2.HTTPDefaultErrorHandler,
# feature handlers
"_basicauth": _urllib2.HTTPBasicAuthHandler,
"_digestauth": _urllib2.HTTPDigestAuthHandler,
"_redirect": _urllib2.HTTPRedirectHandler,
"_cookies": _urllib2.HTTPCookieProcessor,
"_refresh": _urllib2.HTTPRefreshProcessor,
"_equiv": _urllib2.HTTPEquivProcessor,
"_proxy": _urllib2.ProxyHandler,
"_proxy_basicauth": _urllib2.ProxyBasicAuthHandler,
"_proxy_digestauth": _urllib2.ProxyDigestAuthHandler,
"_robots": _urllib2.HTTPRobotRulesProcessor,
"_gzip": _gzip.HTTPGzipProcessor, # experimental!
# debug handlers
"_debug_redirect": _urllib2.HTTPRedirectDebugProcessor,
"_debug_response_body": _urllib2.HTTPResponseDebugProcessor,
}
default_schemes = ["http", "ftp", "file"]
default_others = ["_unknown", "_http_error", "_http_request_upgrade",
"_http_default_error",
]
default_features = ["_redirect", "_cookies",
"_refresh", "_equiv",
"_basicauth", "_digestauth",
"_proxy", "_proxy_basicauth", "_proxy_digestauth",
"_robots",
]
if hasattr(_urllib2, 'HTTPSHandler'):
handler_classes["https"] = _urllib2.HTTPSHandler
default_schemes.append("https")
def __init__(self):
_opener.OpenerDirector.__init__(self)
ua_handlers = self._ua_handlers = {}
for scheme in (self.default_schemes+
self.default_others+
self.default_features):
klass = self.handler_classes[scheme]
ua_handlers[scheme] = klass()
for handler in ua_handlers.itervalues():
self.add_handler(handler)
# Yuck.
# Ensure correct default constructor args were passed to
# HTTPRefreshProcessor and HTTPEquivProcessor.
if "_refresh" in ua_handlers:
self.set_handle_refresh(True)
if "_equiv" in ua_handlers:
self.set_handle_equiv(True)
# Ensure default password managers are installed.
pm = ppm = None
if "_basicauth" in ua_handlers or "_digestauth" in ua_handlers:
pm = _urllib2.HTTPPasswordMgrWithDefaultRealm()
if ("_proxy_basicauth" in ua_handlers or
"_proxy_digestauth" in ua_handlers):
ppm = _auth.HTTPProxyPasswordMgr()
self.set_password_manager(pm)
self.set_proxy_password_manager(ppm)
# set default certificate manager
if "https" in ua_handlers:
cm = _urllib2.HTTPSClientCertMgr()
self.set_client_cert_manager(cm)
def close(self):
_opener.OpenerDirector.close(self)
self._ua_handlers = None
# XXX
## def set_timeout(self, timeout):
## self._timeout = timeout
## def set_http_connection_cache(self, conn_cache):
## self._http_conn_cache = conn_cache
## def set_ftp_connection_cache(self, conn_cache):
## # XXX ATM, FTP has cache as part of handler; should it be separate?
## self._ftp_conn_cache = conn_cache
def set_handled_schemes(self, schemes):
"""Set sequence of URL scheme (protocol) strings.
For example: ua.set_handled_schemes(["http", "ftp"])
If this fails (with ValueError) because you've passed an unknown
scheme, the set of handled schemes will not be changed.
"""
want = {}
for scheme in schemes:
if scheme.startswith("_"):
raise ValueError("not a scheme '%s'" % scheme)
if scheme not in self.handler_classes:
raise ValueError("unknown scheme '%s'")
want[scheme] = None
# get rid of scheme handlers we don't want
for scheme, oldhandler in self._ua_handlers.items():
if scheme.startswith("_"): continue # not a scheme handler
if scheme not in want:
self._replace_handler(scheme, None)
else:
del want[scheme] # already got it
# add the scheme handlers that are missing
for scheme in want.keys():
self._set_handler(scheme, True)
def set_cookiejar(self, cookiejar):
"""Set a mechanize.CookieJar, or None."""
self._set_handler("_cookies", obj=cookiejar)
# XXX could use Greg Stein's httpx for some of this instead?
# or httplib2??
def set_proxies(self, proxies):
"""Set a dictionary mapping URL scheme to proxy specification, or None.
e.g. {"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com"}
"""
self._set_handler("_proxy", obj=proxies)
def add_password(self, url, user, password, realm=None):
self._password_manager.add_password(realm, url, user, password)
def add_proxy_password(self, user, password, hostport=None, realm=None):
self._proxy_password_manager.add_password(
realm, hostport, user, password)
def add_client_certificate(self, url, key_file, cert_file):
"""Add an SSL client certificate, for HTTPS client auth.
key_file and cert_file must be filenames of the key and certificate
files, in PEM format. You can use e.g. OpenSSL to convert a p12 (PKCS
12) file to PEM format:
openssl pkcs12 -clcerts -nokeys -in cert.p12 -out cert.pem
openssl pkcs12 -nocerts -in cert.p12 -out key.pem
Note that client certificate password input is very inflexible ATM. At
the moment this seems to be console only, which is presumably the
default behaviour of libopenssl. In future mechanize may support
third-party libraries that (I assume) allow more options here.
"""
self._client_cert_manager.add_key_cert(url, key_file, cert_file)
# the following are rarely useful -- use add_password / add_proxy_password
# instead
def set_password_manager(self, password_manager):
"""Set a mechanize.HTTPPasswordMgrWithDefaultRealm, or None."""
self._password_manager = password_manager
self._set_handler("_basicauth", obj=password_manager)
self._set_handler("_digestauth", obj=password_manager)
def set_proxy_password_manager(self, password_manager):
"""Set a mechanize.HTTPProxyPasswordMgr, or None."""
self._proxy_password_manager = password_manager
self._set_handler("_proxy_basicauth", obj=password_manager)
self._set_handler("_proxy_digestauth", obj=password_manager)
def set_client_cert_manager(self, cert_manager):
"""Set a mechanize.HTTPClientCertMgr, or None."""
self._client_cert_manager = cert_manager
handler = self._ua_handlers["https"]
handler.client_cert_manager = cert_manager
# these methods all take a boolean parameter
def set_handle_robots(self, handle):
"""Set whether to observe rules from robots.txt."""
self._set_handler("_robots", handle)
def set_handle_redirect(self, handle):
"""Set whether to handle HTTP 30x redirections."""
self._set_handler("_redirect", handle)
- def set_handle_refresh(self, handle, max_time=None, honor_time=True):
+ def set_handle_refresh(self, handle, max_time=30.0, honor_time=False):
"""Set whether to handle HTTP Refresh headers."""
self._set_handler("_refresh", handle, constructor_kwds=
{"max_time": max_time, "honor_time": honor_time})
def set_handle_equiv(self, handle, head_parser_class=None):
"""Set whether to treat HTML http-equiv headers like HTTP headers.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
if head_parser_class is not None:
constructor_kwds = {"head_parser_class": head_parser_class}
else:
constructor_kwds={}
self._set_handler("_equiv", handle, constructor_kwds=constructor_kwds)
def set_handle_gzip(self, handle):
"""Handle gzip transfer encoding.
"""
if handle:
warnings.warn(
"gzip transfer encoding is experimental!", stacklevel=2)
self._set_handler("_gzip", handle)
def set_debug_redirects(self, handle):
"""Log information about HTTP redirects (including refreshes).
Logging is performed using module logging. The logger name is
"mechanize.http_redirects". To actually print some debug output,
eg:
import sys, logging
logger = logging.getLogger("mechanize.http_redirects")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
Other logger names relevant to this module:
"mechanize.http_responses"
"mechanize.cookies" (or "cookielib" if running Python 2.4)
To turn on everything:
import sys, logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
"""
self._set_handler("_debug_redirect", handle)
def set_debug_responses(self, handle):
"""Log HTTP response bodies.
See docstring for .set_debug_redirects() for details of logging.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
self._set_handler("_debug_response_body", handle)
def set_debug_http(self, handle):
"""Print HTTP headers to sys.stdout."""
level = int(bool(handle))
for scheme in "http", "https":
h = self._ua_handlers.get(scheme)
if h is not None:
h.set_http_debuglevel(level)
def _set_handler(self, name, handle=None, obj=None,
constructor_args=(), constructor_kwds={}):
if handle is None:
handle = obj is not None
if handle:
handler_class = self.handler_classes[name]
if obj is not None:
newhandler = handler_class(obj)
else:
newhandler = handler_class(*constructor_args, **constructor_kwds)
else:
newhandler = None
self._replace_handler(name, newhandler)
def _replace_handler(self, name, newhandler=None):
# first, if handler was previously added, remove it
if name is not None:
handler = self._ua_handlers.get(name)
if handler:
try:
self.handlers.remove(handler)
except ValueError:
pass
# then add the replacement, if any
if newhandler is not None:
self.add_handler(newhandler)
self._ua_handlers[name] = newhandler
class UserAgent(UserAgentBase):
def __init__(self):
UserAgentBase.__init__(self)
self._seekable = False
def set_seekable_responses(self, handle):
"""Make response objects .seek()able."""
self._seekable = bool(handle)
def open(self, fullurl, data=None):
if self._seekable:
def bound_open(fullurl, data=None):
return UserAgentBase.open(self, fullurl, data)
response = _opener.wrapped_open(
bound_open, _response.seek_wrapped_response, fullurl, data)
else:
response = UserAgentBase.open(self, fullurl, data)
return response
|
Almad/Mechanize
|
5a06e199ab9702cef62f5beef31fe294179b0129
|
* Log skipped Refreshes * Add some more Refresh tests
|
diff --git a/mechanize/_http.py b/mechanize/_http.py
index d73f3f4..5075da4 100644
--- a/mechanize/_http.py
+++ b/mechanize/_http.py
@@ -31,699 +31,703 @@ debug = logging.getLogger("mechanize").debug
## def urllib2_str(self):
## return 'HTTP Error %s: %s (%s)' % (
## self.code, self.msg, self.geturl())
## urllib2.HTTPError.__str__ = urllib2_str
CHUNK = 1024 # size of chunks fed to HTML HEAD parser, in bytes
DEFAULT_ENCODING = 'latin-1'
# This adds "refresh" to the list of redirectables and provides a redirection
# algorithm that doesn't go into a loop in the presence of cookies
# (Python 2.4 has this new algorithm, 2.3 doesn't).
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
# Implementation notes:
# To avoid the server sending us into an infinite loop, the request
# object needs to track what URLs we have already seen. Do this by
# adding a handler-specific attribute to the Request object. The value
# of the dict is used to count the number of times the same URL has
# been visited. This is needed because visiting the same URL twice
# does not necessarily imply a loop, thanks to state introduced by
# cookies.
# Always unhandled redirection codes:
# 300 Multiple Choices: should not handle this here.
# 304 Not Modified: no need to handle here: only of interest to caches
# that do conditional GETs
# 305 Use Proxy: probably not worth dealing with here
# 306 Unused: what was this for in the previous versions of protocol??
def redirect_request(self, newurl, req, fp, code, msg, headers):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a redirection
response is received. If a redirection should take place, return a
new Request to allow http_error_30x to perform the redirect;
otherwise, return None to indicate that an HTTPError should be
raised.
"""
if code in (301, 302, 303, "refresh") or \
(code == 307 and not req.has_data()):
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# XXX really refresh redirections should be visiting; tricky to
# fix, so this will wait until post-stable release
new = Request(newurl,
headers=req.headers,
origin_req_host=req.get_origin_req_host(),
unverifiable=True,
visit=False,
)
new._origin_req = getattr(req, "_origin_req", req)
return new
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if headers.has_key('location'):
newurl = headers.getheaders('location')[0]
elif headers.has_key('uri'):
newurl = headers.getheaders('uri')[0]
else:
return
newurl = _rfc3986.clean_url(newurl, "latin-1")
newurl = _rfc3986.urljoin(req.get_full_url(), newurl)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(newurl, req, fp, code, msg, headers)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new)
http_error_301 = http_error_303 = http_error_307 = http_error_302
http_error_refresh = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
# XXX would self.reset() work, instead of raising this exception?
class EndOfHeadError(Exception): pass
class AbstractHeadParser:
# only these elements are allowed in or before HEAD of document
head_elems = ("html", "head",
"title", "base",
"script", "style", "meta", "link", "object")
_entitydefs = htmlentitydefs.name2codepoint
_encoding = DEFAULT_ENCODING
def __init__(self):
self.http_equiv = []
def start_meta(self, attrs):
http_equiv = content = None
for key, value in attrs:
if key == "http-equiv":
http_equiv = self.unescape_attr_if_required(value)
elif key == "content":
content = self.unescape_attr_if_required(value)
if http_equiv is not None and content is not None:
self.http_equiv.append((http_equiv, content))
def end_head(self):
raise EndOfHeadError()
def handle_entityref(self, name):
#debug("%s", name)
self.handle_data(unescape(
'&%s;' % name, self._entitydefs, self._encoding))
def handle_charref(self, name):
#debug("%s", name)
self.handle_data(unescape_charref(name, self._encoding))
def unescape_attr(self, name):
#debug("%s", name)
return unescape(name, self._entitydefs, self._encoding)
def unescape_attrs(self, attrs):
#debug("%s", attrs)
escaped_attrs = {}
for key, val in attrs.items():
escaped_attrs[key] = self.unescape_attr(val)
return escaped_attrs
def unknown_entityref(self, ref):
self.handle_data("&%s;" % ref)
def unknown_charref(self, ref):
self.handle_data("&#%s;" % ref)
try:
import HTMLParser
except ImportError:
pass
else:
class XHTMLCompatibleHeadParser(AbstractHeadParser,
HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'start_' + tag)
except AttributeError:
try:
method = getattr(self, 'do_' + tag)
except AttributeError:
pass # unknown tag
else:
method(attrs)
else:
method(attrs)
def handle_endtag(self, tag):
if tag not in self.head_elems:
raise EndOfHeadError()
try:
method = getattr(self, 'end_' + tag)
except AttributeError:
pass # unknown tag
else:
method()
def unescape(self, name):
# Use the entitydefs passed into constructor, not
# HTMLParser.HTMLParser's entitydefs.
return self.unescape_attr(name)
def unescape_attr_if_required(self, name):
return name # HTMLParser.HTMLParser already did it
class HeadParser(AbstractHeadParser, sgmllib.SGMLParser):
def _not_called(self):
assert False
def __init__(self):
sgmllib.SGMLParser.__init__(self)
AbstractHeadParser.__init__(self)
def handle_starttag(self, tag, method, attrs):
if tag not in self.head_elems:
raise EndOfHeadError()
if tag == "meta":
method(attrs)
def unknown_starttag(self, tag, attrs):
self.handle_starttag(tag, self._not_called, attrs)
def handle_endtag(self, tag, method):
if tag in self.head_elems:
method()
else:
raise EndOfHeadError()
def unescape_attr_if_required(self, name):
return self.unescape_attr(name)
def parse_head(fileobj, parser):
"""Return a list of key, value pairs."""
while 1:
data = fileobj.read(CHUNK)
try:
parser.feed(data)
except EndOfHeadError:
break
if len(data) != CHUNK:
# this should only happen if there is no HTML body, or if
# CHUNK is big
break
return parser.http_equiv
class HTTPEquivProcessor(BaseHandler):
"""Append META HTTP-EQUIV headers to regular HTTP headers."""
handler_order = 300 # before handlers that look at HTTP headers
def __init__(self, head_parser_class=HeadParser,
i_want_broken_xhtml_support=False,
):
self.head_parser_class = head_parser_class
self._allow_xhtml = i_want_broken_xhtml_support
def http_response(self, request, response):
if not hasattr(response, "seek"):
response = response_seek_wrapper(response)
http_message = response.info()
url = response.geturl()
ct_hdrs = http_message.getheaders("content-type")
if is_html(ct_hdrs, url, self._allow_xhtml):
try:
try:
html_headers = parse_head(response, self.head_parser_class())
finally:
response.seek(0)
except (HTMLParser.HTMLParseError,
sgmllib.SGMLParseError):
pass
else:
for hdr, val in html_headers:
# add a header
http_message.dict[hdr.lower()] = val
text = hdr + ": " + val
for line in text.split("\n"):
http_message.headers.append(line + "\n")
return response
https_response = http_response
class HTTPCookieProcessor(BaseHandler):
"""Handle HTTP cookies.
Public attributes:
cookiejar: CookieJar instance
"""
def __init__(self, cookiejar=None):
if cookiejar is None:
cookiejar = CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
try:
import robotparser
except ImportError:
pass
else:
class MechanizeRobotFileParser(robotparser.RobotFileParser):
def __init__(self, url='', opener=None):
import _opener
robotparser.RobotFileParser.__init__(self, url)
self._opener = opener
def set_opener(self, opener=None):
if opener is None:
opener = _opener.OpenerDirector()
self._opener = opener
def read(self):
"""Reads the robots.txt URL and feeds it to the parser."""
if self._opener is None:
self.set_opener()
req = Request(self.url, unverifiable=True, visit=False)
try:
f = self._opener.open(req)
except HTTPError, f:
pass
except (IOError, socket.error, OSError), exc:
robotparser._debug("ignoring error opening %r: %s" %
(self.url, exc))
return
lines = []
line = f.readline()
while line:
lines.append(line.strip())
line = f.readline()
status = f.code
if status == 401 or status == 403:
self.disallow_all = True
robotparser._debug("disallow all")
elif status >= 400:
self.allow_all = True
robotparser._debug("allow all")
elif status == 200 and lines:
robotparser._debug("parse lines")
self.parse(lines)
class RobotExclusionError(urllib2.HTTPError):
def __init__(self, request, *args):
apply(urllib2.HTTPError.__init__, (self,)+args)
self.request = request
class HTTPRobotRulesProcessor(BaseHandler):
# before redirections, after everything else
handler_order = 800
try:
from httplib import HTTPMessage
except:
from mimetools import Message
http_response_class = Message
else:
http_response_class = HTTPMessage
def __init__(self, rfp_class=MechanizeRobotFileParser):
self.rfp_class = rfp_class
self.rfp = None
self._host = None
def http_request(self, request):
scheme = request.get_type()
if scheme not in ["http", "https"]:
# robots exclusion only applies to HTTP
return request
if request.get_selector() == "/robots.txt":
# /robots.txt is always OK to fetch
return request
host = request.get_host()
# robots.txt requests don't need to be allowed by robots.txt :-)
origin_req = getattr(request, "_origin_req", None)
if (origin_req is not None and
origin_req.get_selector() == "/robots.txt" and
origin_req.get_host() == host
):
return request
if host != self._host:
self.rfp = self.rfp_class()
try:
self.rfp.set_opener(self.parent)
except AttributeError:
debug("%r instance does not support set_opener" %
self.rfp.__class__)
self.rfp.set_url(scheme+"://"+host+"/robots.txt")
self.rfp.read()
self._host = host
ua = request.get_header("User-agent", "")
if self.rfp.can_fetch(ua, request.get_full_url()):
return request
else:
# XXX This should really have raised URLError. Too late now...
msg = "request disallowed by robots.txt"
raise RobotExclusionError(
request,
request.get_full_url(),
403, msg,
self.http_response_class(StringIO()), StringIO(msg))
https_request = http_request
class HTTPRefererProcessor(BaseHandler):
"""Add Referer header to requests.
This only makes sense if you use each RefererProcessor for a single
chain of requests only (so, for example, if you use a single
HTTPRefererProcessor to fetch a series of URLs extracted from a single
page, this will break).
There's a proper implementation of this in mechanize.Browser.
"""
def __init__(self):
self.referer = None
def http_request(self, request):
if ((self.referer is not None) and
not request.has_header("Referer")):
request.add_unredirected_header("Referer", self.referer)
return request
def http_response(self, request, response):
self.referer = response.geturl()
return response
https_request = http_request
https_response = http_response
def clean_refresh_url(url):
# e.g. Firefox 1.5 does (something like) this
if ((url.startswith('"') and url.endswith('"')) or
(url.startswith("'") and url.endswith("'"))):
url = url[1:-1]
return _rfc3986.clean_url(url, "latin-1") # XXX encoding
def parse_refresh_header(refresh):
"""
>>> parse_refresh_header("1; url=http://example.com/")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1; url='http://example.com/'")
(1.0, 'http://example.com/')
>>> parse_refresh_header("1")
(1.0, None)
>>> parse_refresh_header("blah")
Traceback (most recent call last):
ValueError: invalid literal for float(): blah
"""
ii = refresh.find(";")
if ii != -1:
pause, newurl_spec = float(refresh[:ii]), refresh[ii+1:]
jj = newurl_spec.find("=")
key = None
if jj != -1:
key, newurl = newurl_spec[:jj], newurl_spec[jj+1:]
newurl = clean_refresh_url(newurl)
if key is None or key.strip().lower() != "url":
raise ValueError()
else:
pause, newurl = float(refresh), None
return pause, newurl
class HTTPRefreshProcessor(BaseHandler):
"""Perform HTTP Refresh redirections.
Note that if a non-200 HTTP code has occurred (for example, a 30x
redirect), this processor will do nothing.
By default, only zero-time Refresh headers are redirected. Use the
max_time attribute / constructor argument to allow Refresh with longer
pauses. Use the honor_time attribute / constructor argument to control
whether the requested pause is honoured (with a time.sleep()) or
skipped in favour of immediate redirection.
Public attributes:
max_time: see above
honor_time: see above
"""
handler_order = 1000
def __init__(self, max_time=0, honor_time=True):
self.max_time = max_time
self.honor_time = honor_time
+ self._sleep = time.sleep
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code == 200 and hdrs.has_key("refresh"):
refresh = hdrs.getheaders("refresh")[0]
try:
pause, newurl = parse_refresh_header(refresh)
except ValueError:
debug("bad Refresh header: %r" % refresh)
return response
+
if newurl is None:
newurl = response.geturl()
if (self.max_time is None) or (pause <= self.max_time):
if pause > 1E-3 and self.honor_time:
- time.sleep(pause)
+ self._sleep(pause)
hdrs["location"] = newurl
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response,
"refresh", msg, hdrs)
+ else:
+ debug("Refresh header ignored: %r" % refresh)
return response
https_response = http_response
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses.
The purpose of this handler is to to allow other response processors a
look-in by removing the call to parent.error() from
AbstractHTTPHandler.
For non-200 error codes, this just passes the job on to the
Handler.<proto>_error_<code> methods, via the OpenerDirector.error
method. Eventually, urllib2.HTTPDefaultErrorHandler will raise an
HTTPError if no other handler handles the error.
"""
handler_order = 1000 # after all other processors
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
if code != 200:
# hardcoded http is NOT a bug
response = self.parent.error(
"http", request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
# why these error methods took the code, msg, headers args in the first
# place rather than a response object, I don't know, but to avoid
# multiple wrapping, we're discarding them
if isinstance(fp, urllib2.HTTPError):
response = fp
else:
response = urllib2.HTTPError(
req.get_full_url(), code, msg, hdrs, fp)
assert code == response.code
assert msg == response.msg
assert hdrs == response.hdrs
raise response
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
scheme, sel = urllib.splittype(request.get_selector())
sel_host, sel_path = urllib.splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host or host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
h = http_class(host) # will parse host:port
h.set_debuglevel(self._debuglevel)
headers = dict(req.headers)
headers.update(req.unredirected_hdrs)
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
[(name.title(), val) for name, val in headers.items()])
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
r = h.getresponse()
except socket.error, err: # XXX what error?
raise URLError(err)
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r)
resp = closeable_response(fp, r.msg, req.get_full_url(),
r.status, r.reason)
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSConnectionFactory:
def __init__(self, key_file, cert_file):
self._key_file = key_file
self._cert_file = cert_file
def __call__(self, hostport):
return httplib.HTTPSConnection(
hostport,
key_file=self._key_file, cert_file=self._cert_file)
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, client_cert_manager=None):
AbstractHTTPHandler.__init__(self)
self.client_cert_manager = client_cert_manager
def https_open(self, req):
if self.client_cert_manager is not None:
key_file, cert_file = self.client_cert_manager.find_key_cert(
req.get_full_url())
conn_factory = HTTPSConnectionFactory(key_file, cert_file)
else:
conn_factory = httplib.HTTPSConnection
return self.do_open(conn_factory, req)
https_request = AbstractHTTPHandler.do_request_
diff --git a/test/test_urllib2.py b/test/test_urllib2.py
index dc25410..58bfea6 100644
--- a/test/test_urllib2.py
+++ b/test/test_urllib2.py
@@ -410,871 +410,913 @@ def sanepathname2url(path):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class MockRobotFileParserClass:
def __init__(self):
self.calls = []
self._can_fetch = True
def clear(self):
self.calls = []
def __call__(self):
self.calls.append("__call__")
return self
def set_url(self, url):
self.calls.append(("set_url", url))
def set_opener(self, opener):
self.calls.append(("set_opener", opener))
def read(self):
self.calls.append("read")
def can_fetch(self, ua, url):
self.calls.append(("can_fetch", ua, url))
return self._can_fetch
class MockPasswordManager:
def add_password(self, realm, uri, user, password):
self.realm = realm
self.url = uri
self.user = user
self.password = password
def find_user_password(self, realm, authuri):
self.target_realm = realm
self.target_url = authuri
return self.user, self.password
class HandlerTests(unittest.TestCase):
if hasattr(sys, "version_info") and sys.version_info > (2, 1, 3, "final", 0):
def test_ftp(self):
import ftplib, socket
data = "rheum rhaponicum"
h = NullFTPHandler(data)
o = h.parent = MockOpener()
for url, host, port, type_, dirs, filename, mimetype in [
("ftp://localhost/foo/bar/baz.html",
"localhost", ftplib.FTP_PORT, "I",
["foo", "bar"], "baz.html", "text/html"),
# XXXX Bug: FTPHandler tries to gethostbyname "localhost:80",
# with the port still there.
#("ftp://localhost:80/foo/bar/",
# "localhost", 80, "D",
# ["foo", "bar"], "", None),
# XXXX bug: second use of splitattr() in FTPHandler should be
# splitvalue()
#("ftp://localhost/baz.gif;type=a",
# "localhost", ftplib.FTP_PORT, "A",
# [], "baz.gif", "image/gif"),
]:
r = h.ftp_open(Request(url))
# ftp authentication not yet implemented by FTPHandler
self.assert_(h.user == h.passwd == "")
self.assert_(h.host == socket.gethostbyname(host))
self.assert_(h.port == port)
self.assert_(h.dirs == dirs)
self.assert_(h.ftpwrapper.filename == filename)
self.assert_(h.ftpwrapper.filetype == type_)
headers = r.info()
self.assert_(headers["Content-type"] == mimetype)
self.assert_(int(headers["Content-length"]) == len(data))
def test_file(self):
import time, rfc822, socket
h = mechanize.FileHandler()
o = h.parent = MockOpener()
#TESTFN = test_support.TESTFN
TESTFN = "test.txt"
urlpath = sanepathname2url(os.path.abspath(TESTFN))
towrite = "hello, world\n"
try:
fqdn = socket.gethostbyname(socket.gethostname())
except socket.gaierror:
fqdn = "localhost"
for url in [
"file://localhost%s" % urlpath,
"file://%s" % urlpath,
"file://%s%s" % (socket.gethostbyname('localhost'), urlpath),
"file://%s%s" % (fqdn, urlpath)
]:
f = open(TESTFN, "wb")
try:
try:
f.write(towrite)
finally:
f.close()
r = h.file_open(Request(url))
try:
data = r.read()
headers = r.info()
newurl = r.geturl()
finally:
r.close()
stats = os.stat(TESTFN)
modified = rfc822.formatdate(stats.st_mtime)
finally:
os.remove(TESTFN)
self.assertEqual(data, towrite)
self.assertEqual(headers["Content-type"], "text/plain")
self.assertEqual(headers["Content-length"], "13")
self.assertEqual(headers["Last-modified"], modified)
for url in [
"file://localhost:80%s" % urlpath,
# XXXX bug: these fail with socket.gaierror, should be URLError
## "file://%s:80%s/%s" % (socket.gethostbyname('localhost'),
## os.getcwd(), TESTFN),
## "file://somerandomhost.ontheinternet.com%s/%s" %
## (os.getcwd(), TESTFN),
]:
try:
f = open(TESTFN, "wb")
try:
f.write(towrite)
finally:
f.close()
self.assertRaises(mechanize.URLError,
h.file_open, Request(url))
finally:
os.remove(TESTFN)
h = mechanize.FileHandler()
o = h.parent = MockOpener()
# XXXX why does // mean ftp (and /// mean not ftp!), and where
# is file: scheme specified? I think this is really a bug, and
# what was intended was to distinguish between URLs like:
# file:/blah.txt (a file)
# file://localhost/blah.txt (a file)
# file:///blah.txt (a file)
# file://ftp.example.com/blah.txt (an ftp URL)
for url, ftp in [
("file://ftp.example.com//foo.txt", True),
("file://ftp.example.com///foo.txt", False),
# XXXX bug: fails with OSError, should be URLError
("file://ftp.example.com/foo.txt", False),
]:
req = Request(url)
try:
h.file_open(req)
# XXXX remove OSError when bug fixed
except (mechanize.URLError, OSError):
self.assert_(not ftp)
else:
self.assert_(o.req is req)
self.assertEqual(req.type, "ftp")
def test_http(self):
h = AbstractHTTPHandler()
o = h.parent = MockOpener()
url = "http://example.com/"
for method, data in [("GET", None), ("POST", "blah")]:
req = Request(url, data, {"Foo": "bar"})
req.add_unredirected_header("Spam", "eggs")
http = MockHTTPClass()
r = h.do_open(http, req)
# result attributes
r.read; r.readline # wrapped MockFile methods
r.info; r.geturl # addinfourl methods
r.code, r.msg == 200, "OK" # added from MockHTTPClass.getreply()
hdrs = r.info()
hdrs.get; hdrs.has_key # r.info() gives dict from .getreply()
self.assert_(r.geturl() == url)
self.assert_(http.host == "example.com")
self.assert_(http.level == 0)
self.assert_(http.method == method)
self.assert_(http.selector == "/")
http.req_headers.sort()
self.assert_(http.req_headers == [
("Connection", "close"),
("Foo", "bar"), ("Spam", "eggs")])
self.assert_(http.data == data)
# check socket.error converted to URLError
http.raise_on_endheaders = True
self.assertRaises(mechanize.URLError, h.do_open, http, req)
# check adding of standard headers
o.addheaders = [("Spam", "eggs")]
for data in "", None: # POST, GET
req = Request("http://example.com/", data)
r = MockResponse(200, "OK", {}, "")
newreq = h.do_request_(req)
if data is None: # GET
self.assert_(not req.unredirected_hdrs.has_key("Content-length"))
self.assert_(not req.unredirected_hdrs.has_key("Content-type"))
else: # POST
# No longer true, due to workarouhd for buggy httplib
# in Python versions < 2.4:
#self.assert_(req.unredirected_hdrs["Content-length"] == "0")
self.assert_(req.unredirected_hdrs["Content-type"] ==
"application/x-www-form-urlencoded")
# XXX the details of Host could be better tested
self.assert_(req.unredirected_hdrs["Host"] == "example.com")
self.assert_(req.unredirected_hdrs["Spam"] == "eggs")
# don't clobber existing headers
req.add_unredirected_header("Content-length", "foo")
req.add_unredirected_header("Content-type", "bar")
req.add_unredirected_header("Host", "baz")
req.add_unredirected_header("Spam", "foo")
newreq = h.do_request_(req)
self.assert_(req.unredirected_hdrs["Content-length"] == "foo")
self.assert_(req.unredirected_hdrs["Content-type"] == "bar")
self.assert_(req.unredirected_hdrs["Host"] == "baz")
self.assert_(req.unredirected_hdrs["Spam"] == "foo")
def test_request_upgrade(self):
import urllib2
new_req_class = hasattr(urllib2.Request, "has_header")
h = HTTPRequestUpgradeProcessor()
o = h.parent = MockOpener()
# urllib2.Request gets upgraded, unless it's the new Request
# class from 2.4
req = urllib2.Request("http://example.com/")
newreq = h.http_request(req)
if new_req_class:
self.assert_(newreq is req)
else:
self.assert_(newreq is not req)
if new_req_class:
self.assert_(newreq.__class__ is not Request)
else:
self.assert_(newreq.__class__ is Request)
# ClientCookie._urllib2_support.Request doesn't get upgraded
req = Request("http://example.com/")
newreq = h.http_request(req)
self.assert_(newreq is req)
self.assert_(newreq.__class__ is Request)
def test_referer(self):
h = HTTPRefererProcessor()
o = h.parent = MockOpener()
# normal case
url = "http://example.com/"
req = Request(url)
r = MockResponse(200, "OK", {}, "", url)
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(h.referer == url)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == url)
# don't clobber existing Referer
ref = "http://set.by.user.com/"
req.add_unredirected_header("Referer", ref)
newreq = h.http_request(req)
self.assert_(req is newreq)
self.assert_(req.unredirected_hdrs["Referer"] == ref)
def test_errors(self):
from mechanize import _response
h = HTTPErrorProcessor()
o = h.parent = MockOpener()
req = Request("http://example.com")
# 200 OK is passed through
r = _response.test_response()
newr = h.http_response(req, r)
self.assert_(r is newr)
self.assert_(not hasattr(o, "proto")) # o.error not called
# anything else calls o.error (and MockOpener returns None, here)
r = _response.test_response(code=201, msg="Created")
self.assert_(h.http_response(req, r) is None)
self.assert_(o.proto == "http") # o.error called
self.assert_(o.args == (req, r, 201, "Created", AlwaysEqual()))
def test_raise_http_errors(self):
# HTTPDefaultErrorHandler should raise HTTPError if no error handler
# handled the error response
from mechanize import _response
h = mechanize.HTTPDefaultErrorHandler()
url = "http://example.com"; code = 500; msg = "Error"
request = mechanize.Request(url)
response = _response.test_response(url=url, code=code, msg=msg)
# case 1. it's not an HTTPError
try:
h.http_error_default(
request, response, code, msg, response.info())
except mechanize.HTTPError, exc:
self.assert_(exc is not response)
self.assert_(exc.fp is response)
else:
self.assert_(False)
# case 2. response object is already an HTTPError, so just re-raise it
error = mechanize.HTTPError(
url, code, msg, "fake headers", response)
try:
h.http_error_default(
request, error, code, msg, error.info())
except mechanize.HTTPError, exc:
self.assert_(exc is error)
else:
self.assert_(False)
def test_robots(self):
# XXX useragent
try:
import robotparser
except ImportError:
return # skip test
else:
from mechanize import HTTPRobotRulesProcessor
opener = OpenerDirector()
rfpc = MockRobotFileParserClass()
h = HTTPRobotRulesProcessor(rfpc)
opener.add_handler(h)
url = "http://example.com:80/foo/bar.html"
req = Request(url)
# first time: initialise and set up robots.txt parser before checking
# whether OK to fetch URL
h.http_request(req)
self.assert_(rfpc.calls == [
"__call__",
("set_opener", opener),
("set_url", "http://example.com:80/robots.txt"),
"read",
("can_fetch", "", url),
])
# second time: just use existing parser
rfpc.clear()
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# different URL on same server: same again
rfpc.clear()
url = "http://example.com:80/blah.html"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
("can_fetch", "", url),
])
# disallowed URL
rfpc.clear()
rfpc._can_fetch = False
url = "http://example.com:80/rhubarb.html"
req = Request(url)
try:
h.http_request(req)
except mechanize.HTTPError, e:
self.assert_(e.request == req)
self.assert_(e.code == 403)
# new host: reload robots.txt (even though the host and port are
# unchanged, we treat this as a new host because
# "example.com" != "example.com:80")
rfpc.clear()
rfpc._can_fetch = True
url = "http://example.com/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
"__call__",
("set_opener", opener),
("set_url", "http://example.com/robots.txt"),
"read",
("can_fetch", "", url),
])
# https url -> should fetch robots.txt from https url too
rfpc.clear()
url = "https://example.org/rhubarb.html"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [
"__call__",
("set_opener", opener),
("set_url", "https://example.org/robots.txt"),
"read",
("can_fetch", "", url),
])
# non-HTTP URL -> ignore robots.txt
rfpc.clear()
url = "ftp://example.com/"
req = Request(url)
h.http_request(req)
self.assert_(rfpc.calls == [])
def test_redirected_robots_txt(self):
# redirected robots.txt fetch shouldn't result in another attempted
# robots.txt fetch to check the redirection is allowed!
import mechanize
from mechanize import build_opener, HTTPHandler, \
HTTPDefaultErrorHandler, HTTPRedirectHandler, \
HTTPRobotRulesProcessor
class MockHTTPHandler(mechanize.BaseHandler):
def __init__(self):
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if req.get_full_url() == "http://example.com/robots.txt":
hdr = "Location: http://example.com/en/robots.txt\r\n\r\n"
msg = mimetools.Message(StringIO(hdr))
return self.parent.error(
"http", req, test_response(), 302, "Blah", msg)
else:
return test_response("Allow: *", [], req.get_full_url())
hh = MockHTTPHandler()
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
rh = HTTPRobotRulesProcessor()
o = build_test_opener(hh, hdeh, hrh, rh)
o.open("http://example.com/")
self.assertEqual([req.get_full_url() for req in hh.requests],
["http://example.com/robots.txt",
"http://example.com/en/robots.txt",
"http://example.com/",
])
def test_cookies(self):
cj = MockCookieJar()
h = HTTPCookieProcessor(cj)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = MockResponse(200, "OK", {}, "")
newreq = h.http_request(req)
self.assert_(cj.ach_req is req is newreq)
self.assert_(req.origin_req_host == "example.com")
self.assert_(cj.ach_u == False)
newr = h.http_response(req, r)
self.assert_(cj.ec_req is req)
self.assert_(cj.ec_r is r is newr)
self.assert_(cj.ec_u == False)
def test_seekable(self):
hide_deprecations()
try:
h = SeekableProcessor()
finally:
reset_deprecations()
o = h.parent = MockOpener()
req = mechanize.Request("http://example.com/")
class MockUnseekableResponse:
code = 200
msg = "OK"
def info(self): pass
def geturl(self): return ""
r = MockUnseekableResponse()
newr = h.any_response(req, r)
self.assert_(not hasattr(r, "seek"))
self.assert_(hasattr(newr, "seek"))
def test_http_equiv(self):
from mechanize import _response
h = HTTPEquivProcessor()
o = h.parent = MockOpener()
data = ('<html><head>'
'<meta http-equiv="Refresh" content="spam&eggs">'
'</head></html>'
)
headers = [("Foo", "Bar"),
("Content-type", "text/html"),
("Refresh", "blah"),
]
url = "http://example.com/"
req = Request(url)
r = _response.make_response(data, headers, url, 200, "OK")
newr = h.http_response(req, r)
new_headers = newr.info()
self.assertEqual(new_headers["Foo"], "Bar")
self.assertEqual(new_headers["Refresh"], "spam&eggs")
self.assertEqual(new_headers.getheaders("Refresh"),
["blah", "spam&eggs"])
def test_refresh(self):
# XXX test processor constructor optional args
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
for val, valid in [
('0; url="http://example.com/foo/"', True),
("2", True),
# in the past, this failed with UnboundLocalError
('0; "http://example.com/foo/"', False),
]:
o = h.parent = MockOpener()
req = Request("http://example.com/")
headers = http_message({"refresh": val})
r = MockResponse(200, "OK", headers, "", "http://example.com/")
newr = h.http_response(req, r)
if valid:
self.assertEqual(o.proto, "http")
self.assertEqual(o.args, (req, r, "refresh", "OK", headers))
+ def test_refresh_honor_time(self):
+ class SleepTester:
+ def __init__(self, test, seconds):
+ self._test = test
+ if seconds is 0:
+ seconds = None # don't expect a sleep for 0 seconds
+ self._expected = seconds
+ self._got = None
+ def sleep(self, seconds):
+ self._got = seconds
+ def verify(self):
+ self._test.assertEqual(self._expected, self._got)
+ class Opener:
+ called = False
+ def error(self, *args, **kwds):
+ self.called = True
+ def test(rp, header, refresh_after):
+ expect_refresh = refresh_after is not None
+ opener = Opener()
+ rp.parent = opener
+ st = SleepTester(self, refresh_after)
+ rp._sleep = st.sleep
+ rp.http_response(Request("http://example.com"),
+ test_response(headers=[("Refresh", header)]),
+ )
+ self.assertEqual(expect_refresh, opener.called)
+ st.verify()
+
+ # by default, only zero-time refreshes are honoured
+ test(HTTPRefreshProcessor(), "0", 0)
+ test(HTTPRefreshProcessor(), "2", None)
+
+ # if requested, more than zero seconds are allowed
+ test(HTTPRefreshProcessor(max_time=None), "2", 2)
+ test(HTTPRefreshProcessor(max_time=30), "2", 2)
+
+ # no sleep if we don't "honor_time"
+ test(HTTPRefreshProcessor(max_time=30, honor_time=False), "2", 0)
+
+ # request for too-long wait before refreshing --> no refresh occurs
+ test(HTTPRefreshProcessor(max_time=30), "60", None)
+
def test_redirect(self):
from_url = "http://example.com/a.html"
to_url = "http://example.com/b.html"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
# ordinary redirect behaviour
for code in 301, 302, 303, 307, "refresh":
for data in None, "blah\nblah\n":
method = getattr(h, "http_error_%s" % code)
req = Request(from_url, data)
req.add_header("Nonsense", "viking=withhold")
req.add_unredirected_header("Spam", "spam")
req.origin_req_host = "example.com" # XXX
try:
method(req, MockFile(), code, "Blah",
http_message({"location": to_url}))
except mechanize.HTTPError:
# 307 in response to POST requires user OK
self.assert_(code == 307 and data is not None)
self.assert_(o.req.get_full_url() == to_url)
try:
self.assert_(o.req.get_method() == "GET")
except AttributeError:
self.assert_(not o.req.has_data())
self.assert_(o.req.headers["Nonsense"] == "viking=withhold")
self.assert_(not o.req.headers.has_key("Spam"))
self.assert_(not o.req.unredirected_hdrs.has_key("Spam"))
# loop detection
def redirect(h, req, url=to_url):
h.http_error_302(req, MockFile(), 302, "Blah",
http_message({"location": url}))
# Note that the *original* request shares the same record of
# redirections with the sub-requests caused by the redirections.
# detect infinite loop redirect of a URL to itself
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/")
count = count + 1
except mechanize.HTTPError:
# don't stop until max_repeats, because cookies may introduce state
self.assert_(count == HTTPRedirectHandler.max_repeats)
# detect endless non-repeating chain of redirects
req = Request(from_url)
req.origin_req_host = "example.com"
count = 0
try:
while 1:
redirect(h, req, "http://example.com/%d" % count)
count = count + 1
except mechanize.HTTPError:
self.assert_(count == HTTPRedirectHandler.max_redirections)
def test_redirect_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRedirectHandler()
o = h.parent = MockOpener()
req = Request(from_url)
h.http_error_302(req, test_html_response(), 302, "Blah",
http_message({"location": bad_to_url}),
)
self.assertEqual(o.req.get_full_url(), good_to_url)
def test_refresh_bad_uri(self):
# bad URIs should be cleaned up before redirection
from mechanize._response import test_html_response
from_url = "http://example.com/a.html"
bad_to_url = "http://example.com/b. |html"
good_to_url = "http://example.com/b.%20%7Chtml"
h = HTTPRefreshProcessor(max_time=None, honor_time=False)
o = h.parent = MockOpener()
req = Request("http://example.com/")
r = test_html_response(
headers=[("refresh", '0; url="%s"' % bad_to_url)])
newr = h.http_response(req, r)
headers = o.args[-1]
self.assertEqual(headers["Location"], good_to_url)
def test_cookie_redirect(self):
# cookies shouldn't leak into redirected requests
import mechanize
from mechanize import CookieJar, build_opener, HTTPHandler, \
HTTPCookieProcessor, HTTPError, HTTPDefaultErrorHandler, \
HTTPRedirectHandler
from test_cookies import interact_netscape
cj = CookieJar()
interact_netscape(cj, "http://www.example.com/", "spam=eggs")
hh = MockHTTPHandler(302, "Location: http://www.cracker.com/\r\n\r\n")
hdeh = HTTPDefaultErrorHandler()
hrh = HTTPRedirectHandler()
cp = HTTPCookieProcessor(cj)
o = build_test_opener(hh, hdeh, hrh, cp)
o.open("http://www.example.com/")
self.assert_(not hh.req.has_header("Cookie"))
def test_proxy(self):
o = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
o.add_handler(ph)
meth_spec = [
[("http_open", "return response")]
]
handlers = add_ordered_mock_handlers(o, meth_spec)
o._maybe_reindex_handlers()
req = Request("http://acme.example.com/")
self.assertEqual(req.get_host(), "acme.example.com")
r = o.open(req)
self.assertEqual(req.get_host(), "proxy.example.com:3128")
self.assertEqual([(handlers[0], "http_open")],
[tup[0:2] for tup in o.calls])
def test_basic_auth(self):
opener = OpenerDirector()
password_manager = MockPasswordManager()
auth_handler = mechanize.HTTPBasicAuthHandler(password_manager)
realm = "ACME Widget Store"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
def test_proxy_basic_auth(self):
opener = OpenerDirector()
ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
opener.add_handler(ph)
password_manager = MockPasswordManager()
auth_handler = mechanize.ProxyBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
407, 'Proxy-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(auth_handler)
opener.add_handler(http_handler)
self._test_basic_auth(opener, auth_handler, "Proxy-authorization",
realm, http_handler, password_manager,
"http://acme.example.com:3128/protected",
"proxy.example.com:3128",
)
def test_basic_and_digest_auth_handlers(self):
# HTTPDigestAuthHandler threw an exception if it couldn't handle a 40*
# response (http://python.org/sf/1479302), where it should instead
# return None to allow another handler (especially
# HTTPBasicAuthHandler) to handle the response.
# Also (http://python.org/sf/1479302, RFC 2617 section 1.2), we must
# try digest first (since it's the strongest auth scheme), so we record
# order of calls here to check digest comes first:
class RecordingOpenerDirector(OpenerDirector):
def __init__(self):
OpenerDirector.__init__(self)
self.recorded = []
def record(self, info):
self.recorded.append(info)
class TestDigestAuthHandler(mechanize.HTTPDigestAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("digest")
mechanize.HTTPDigestAuthHandler.http_error_401(self,
*args, **kwds)
class TestBasicAuthHandler(mechanize.HTTPBasicAuthHandler):
def http_error_401(self, *args, **kwds):
self.parent.record("basic")
mechanize.HTTPBasicAuthHandler.http_error_401(self,
*args, **kwds)
opener = RecordingOpenerDirector()
password_manager = MockPasswordManager()
digest_handler = TestDigestAuthHandler(password_manager)
basic_handler = TestBasicAuthHandler(password_manager)
realm = "ACME Networks"
http_handler = MockHTTPHandler(
401, 'WWW-Authenticate: Basic realm="%s"\r\n\r\n' % realm)
opener.add_handler(digest_handler)
opener.add_handler(basic_handler)
opener.add_handler(http_handler)
opener._maybe_reindex_handlers()
# check basic auth isn't blocked by digest handler failing
self._test_basic_auth(opener, basic_handler, "Authorization",
realm, http_handler, password_manager,
"http://acme.example.com/protected",
"http://acme.example.com/protected",
)
# check digest was tried before basic (twice, because
# _test_basic_auth called .open() twice)
self.assertEqual(opener.recorded, ["digest", "basic"]*2)
def _test_basic_auth(self, opener, auth_handler, auth_header,
realm, http_handler, password_manager,
request_url, protected_url):
import base64, httplib
user, password = "wile", "coyote"
# .add_password() fed through to password manager
auth_handler.add_password(realm, request_url, user, password)
self.assertEqual(realm, password_manager.realm)
self.assertEqual(request_url, password_manager.url)
self.assertEqual(user, password_manager.user)
self.assertEqual(password, password_manager.password)
r = opener.open(request_url)
# should have asked the password manager for the username/password
self.assertEqual(password_manager.target_realm, realm)
self.assertEqual(password_manager.target_url, protected_url)
# expect one request without authorization, then one with
self.assertEqual(len(http_handler.requests), 2)
self.failIf(http_handler.requests[0].has_header(auth_header))
userpass = '%s:%s' % (user, password)
auth_hdr_value = 'Basic '+base64.encodestring(userpass).strip()
self.assertEqual(http_handler.requests[1].get_header(auth_header),
auth_hdr_value)
# if the password manager can't find a password, the handler won't
# handle the HTTP auth error
password_manager.user = password_manager.password = None
http_handler.reset()
r = opener.open(request_url)
self.assertEqual(len(http_handler.requests), 1)
self.failIf(http_handler.requests[0].has_header(auth_header))
class HeadParserTests(unittest.TestCase):
def test(self):
# XXX XHTML
from mechanize import HeadParser
htmls = [
("""<meta http-equiv="refresh" content="1; http://example.com/">
""",
[("refresh", "1; http://example.com/")]
),
("""
<html><head>
<meta http-equiv="refresh" content="1; http://example.com/">
<meta name="spam" content="eggs">
<meta http-equiv="foo" content="bar">
<p> <!-- p is not allowed in head, so parsing should stop here-->
<meta http-equiv="moo" content="cow">
</html>
""",
[("refresh", "1; http://example.com/"), ("foo", "bar")]),
("""<meta http-equiv="refresh">
""",
[])
]
for html, result in htmls:
self.assertEqual(parse_head(StringIO.StringIO(html), HeadParser()), result)
def build_test_opener(*handler_instances):
opener = OpenerDirector()
for h in handler_instances:
opener.add_handler(h)
return opener
class MockHTTPHandler(mechanize.BaseHandler):
# useful for testing redirections and auth
# sends supplied headers and code as first response
# sends 200 OK as second response
def __init__(self, code, headers):
self.code = code
self.headers = headers
self.reset()
def reset(self):
self._count = 0
self.requests = []
def http_open(self, req):
import mimetools, httplib, copy
from StringIO import StringIO
self.requests.append(copy.deepcopy(req))
if self._count == 0:
self._count = self._count + 1
msg = mimetools.Message(StringIO(self.headers))
return self.parent.error(
"http", req, test_response(), self.code, "Blah", msg)
else:
self.req = req
return test_response("", [], req.get_full_url())
class MyHTTPHandler(HTTPHandler): pass
class FooHandler(mechanize.BaseHandler):
def foo_open(self): pass
class BarHandler(mechanize.BaseHandler):
def bar_open(self): pass
class A:
def a(self): pass
class B(A):
def a(self): pass
def b(self): pass
class C(A):
def c(self): pass
class D(C, B):
def a(self): pass
def d(self): pass
class FunctionTests(unittest.TestCase):
def test_build_opener(self):
o = build_opener(FooHandler, BarHandler)
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# can take a mix of classes and instances
o = build_opener(FooHandler, BarHandler())
self.opener_has_handler(o, FooHandler)
self.opener_has_handler(o, BarHandler)
# subclasses of default handlers override default handlers
o = build_opener(MyHTTPHandler)
self.opener_has_handler(o, MyHTTPHandler)
# a particular case of overriding: default handlers can be passed
# in explicitly
o = build_opener()
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler)
self.opener_has_handler(o, HTTPHandler)
o = build_opener(HTTPHandler())
self.opener_has_handler(o, HTTPHandler)
def opener_has_handler(self, opener, handler_class):
for h in opener.handlers:
if h.__class__ == handler_class:
break
else:
self.assert_(False)
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
|
Almad/Mechanize
|
9c3214bb89a384ec08b896e8173c05f901ba94eb
|
* Use SO_REUSEADDR for test server. * Raise exception if local server fails to start.
|
diff --git a/test-tools/testprogram.py b/test-tools/testprogram.py
index 627e753..ee5ef75 100644
--- a/test-tools/testprogram.py
+++ b/test-tools/testprogram.py
@@ -1,311 +1,314 @@
"""Local server and cgitb support."""
import cgitb
#cgitb.enable(format="text")
import sys, os, traceback, logging, glob, time
from unittest import defaultTestLoader, TextTestRunner, TestSuite, TestCase, \
_TextTestResult
class ServerProcess:
def __init__(self, filename, name=None):
if filename is None:
raise ValueError('filename arg must be a string')
if name is None:
name = filename
self.name = os.path.basename(name)
self.port = None
self.report_hook = lambda msg: None
self._filename = filename
def _get_args(self):
"""Return list of command line arguments.
Override me.
"""
return []
def start(self):
self.report_hook("starting (%s)" % (
[sys.executable, self._filename]+self._get_args()))
self._pid = os.spawnv(
os.P_NOWAIT,
sys.executable,
[sys.executable, self._filename]+self._get_args())
self.report_hook("waiting for startup")
self._wait_for_startup()
self.report_hook("running")
def _wait_for_startup(self):
import socket
def connect():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(1.0)
try:
sock.connect(('127.0.0.1', self.port))
finally:
sock.close()
backoff(connect, (socket.error,))
def stop(self):
"""Kill process (forcefully if necessary)."""
if os.name == 'nt':
kill_windows(self._pid, self.report_hook)
else:
kill_posix(self._pid, self.report_hook)
def backoff(func, errors,
initial_timeout=1., hard_timeout=60., factor=1.2):
starttime = time.time()
timeout = initial_timeout
while time.time() < starttime + hard_timeout - 0.01:
try:
func()
except errors, exc:
time.sleep(timeout)
timeout *= factor
hard_limit = hard_timeout - (time.time() - starttime)
timeout = min(timeout, hard_limit)
else:
break
+ else:
+ raise
def kill_windows(handle, report_hook):
try:
import win32api
except ImportError:
import ctypes
ctypes.windll.kernel32.TerminateProcess(int(handle), -1)
else:
win32api.TerminateProcess(int(handle), -1)
def kill_posix(pid, report_hook):
import signal
os.kill(pid, signal.SIGTERM)
timeout = 10.
starttime = time.time()
report_hook("waiting for exit")
def do_nothing(*args):
pass
old_handler = signal.signal(signal.SIGCHLD, do_nothing)
try:
while time.time() < starttime + timeout - 0.01:
pid, sts = os.waitpid(pid, os.WNOHANG)
if pid != 0:
# exited, or error
break
newtimeout = timeout - (time.time() - starttime) - 1.
time.sleep(newtimeout) # wait for signal
else:
report_hook("forcefully killing")
try:
os.kill(pid, signal.SIGKILL)
except OSError, exc:
if exc.errno != errno.ECHILD:
raise
finally:
signal.signal(signal.SIGCHLD, old_handler)
class TwistedServerProcess(ServerProcess):
def __init__(self, name=None):
top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
path = os.path.join(top_level_dir, "test-tools/twisted-localserver.py")
ServerProcess.__init__(self, path, name)
def _get_args(self):
return [str(self.port)]
class CgitbTextResult(_TextTestResult):
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
return cgitb.text((exctype, value, tb))
return cgitb.text((exctype, value, tb))
class CgitbTextTestRunner(TextTestRunner):
def _makeResult(self):
return CgitbTextResult(self.stream, self.descriptions, self.verbosity)
def add_uri_attribute_to_test_cases(suite, uri):
for test in suite._tests:
if isinstance(test, TestCase):
test.uri = uri
else:
try:
add_uri_attribute_to_test_cases(test, uri)
except AttributeError:
pass
class TestProgram:
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = """\
Usage: %(progName)s [options] [test] [...]
Note not all the functional tests take note of the --uri argument yet --
some currently always access the internet regardless of the --uri and
--run-local-server options.
Options:
-l, --run-local-server
Run a local Twisted HTTP server for the functional
tests. You need Twisted installed for this to work.
The server is run on the port given in the --uri
option. If --run-local-server is given but no --uri is
given, http://127.0.0.1:8000 is used as the base URI.
Also, if you're on Windows and don't have pywin32 or
ctypes installed, this option won't work, and you'll
have to start up test-tools/localserver.py manually.
--uri=URL Base URI for functional tests
(test.py does not access the network, unless you tell
it to run module functional_tests;
functional_tests.py does access the network)
e.g. --uri=http://127.0.0.1:8000/
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
The following options are only available through test.py (you can still run the
functional tests through test.py, just give 'functional_tests' as the module
name to run):
-u Skip plain (non-doctest) unittests
-d Skip doctests
-c Run coverage (requires coverage.py, seems buggy)
-t Display tracebacks using cgitb's text mode
"""
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s test_cookies
- run module 'test_cookies'
%(progName)s test_cookies.CookieTests
- run all 'test*' test methods in test_cookies.CookieTests
%(progName)s test_cookies.CookieTests.test_expires
- run test_cookies.CookieTests.test_expires
%(progName)s functional_tests
- run the functional tests
%(progName)s -l functional_tests
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
"""
def __init__(self, moduleNames, localServerProcess, defaultTest=None,
argv=None, testRunner=None, testLoader=defaultTestLoader,
defaultUri="http://wwwsearch.sf.net/",
usageExamples=USAGE_EXAMPLES,
):
self.modules = []
for moduleName in moduleNames:
module = __import__(moduleName)
for part in moduleName.split('.')[1:]:
module = getattr(module, part)
self.modules.append(module)
self.uri = None
self._defaultUri = defaultUri
if argv is None:
argv = sys.argv
self.verbosity = 1
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.usageExamples = usageExamples
self.runLocalServer = False
self.parseArgs(argv)
if self.runLocalServer:
import urllib
from mechanize._rfc3986 import urlsplit
authority = urlsplit(self.uri)[1]
host, port = urllib.splitport(authority)
if port is None:
port = "80"
try:
port = int(port)
except:
self.usageExit("port in --uri value must be an integer "
"(try --uri=http://127.0.0.1:8000/)")
self._serverProcess = localServerProcess
def report(msg):
print "%s: %s" % (localServerProcess.name, msg)
localServerProcess.port = port
localServerProcess.report_hook = report
def usageExit(self, msg=None):
if msg: print msg
print (self.USAGE + self.usageExamples) % self.__dict__
sys.exit(2)
def parseArgs(self, argv):
import getopt
try:
options, args = getopt.getopt(
argv[1:],
'hHvql',
['help','verbose','quiet', 'uri=', 'run-local-server'],
)
uri = None
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('--uri',):
uri = value
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-l', '--run-local-server'):
self.runLocalServer = True
if uri is None:
if self.runLocalServer:
uri = "http://127.0.0.1:8000"
else:
uri = self._defaultUri
self.uri = uri
if len(args) == 0 and self.defaultTest is None:
suite = TestSuite()
for module in self.modules:
test = self.testLoader.loadTestsFromModule(module)
suite.addTest(test)
self.test = suite
add_uri_attribute_to_test_cases(self.test, self.uri)
return
if len(args) > 0:
self.testNames = args
else:
self.testNames = (self.defaultTest,)
self.createTests()
add_uri_attribute_to_test_cases(self.test, self.uri)
except getopt.error, msg:
self.usageExit(msg)
def createTests(self):
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
if self.testRunner is None:
self.testRunner = TextTestRunner(verbosity=self.verbosity)
if self.runLocalServer:
self._serverProcess.start()
try:
result = self.testRunner.run(self.test)
finally:
if self.runLocalServer:
self._serverProcess.stop()
return result
|
Almad/Mechanize
|
0d6c5ed162f521e3a382df20cff21eb60a62693a
|
Fix BeautifulSoup RobustLinksFactory (hence RobustFactory) link text parsing for case of link text containing tags ([email protected])
|
diff --git a/mechanize/_html.py b/mechanize/_html.py
index 2d562c9..90f9ded 100644
--- a/mechanize/_html.py
+++ b/mechanize/_html.py
@@ -1,607 +1,607 @@
"""HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import re, copy, htmlentitydefs
import sgmllib, HTMLParser, ClientForm
import _request
from _headersutil import split_header_words, is_html as _is_html
import _rfc3986
DEFAULT_ENCODING = "latin-1"
# the base classe is purely for backwards compatibility
class ParseError(ClientForm.ParseError): pass
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
return v
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
# idea for this argument-processing trick is from Peter Otten
class Args:
def __init__(self, args_map):
self.dictionary = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
name = attrs.get("name")
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for eg.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying ClientForm.HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ClientForm.ParseResponse
argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
import ClientForm
self.select_default = select_default
if form_parser_class is None:
form_parser_class = ClientForm.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
import ClientForm
encoding = self.encoding
try:
forms = ClientForm.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
except ClientForm.ParseError, exc:
raise ParseError(exc)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return p.get_text()
except sgmllib.SGMLParseError, exc:
raise ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
# bizarre import gymnastics for bundled BeautifulSoup
import _beautifulsoup
import ClientForm
RobustFormParser, NestingRobustFormParser = ClientForm._create_bs_classes(
_beautifulsoup.BeautifulSoup, _beautifulsoup.ICantBelieveItsBeautifulSoup
)
# monkeypatch sgmllib to fix http://www.python.org/sf/803422 :-(
import sgmllib
sgmllib.charref = re.compile("&#(x?[0-9a-fA-F]+)[^0-9a-fA-F]")
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = re.compile(r"\s+")
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _beautifulsoup
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
import _beautifulsoup
bs = self._bs
base_url = self._base_url
encoding = self._encoding
gen = bs.recursiveChildGenerator()
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
- text = link.firstText(lambda t: True)
- if text is _beautifulsoup.Null:
+ text = link.fetchText(lambda t: True)
+ if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
- text = self.compress_re.sub(" ", text.strip())
+ text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
import ClientForm
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
import _beautifulsoup
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
return title.firstText(lambda t: True)
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set urllib2.Request class.
ClientForm.HTMLForm instances returned by .forms() will return
instances of this class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by urllib2.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over ClientForm.HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
import _beautifulsoup
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
diff --git a/test/test_html.doctest b/test/test_html.doctest
index 9bd1ab5..9efa7fb 100644
--- a/test/test_html.doctest
+++ b/test/test_html.doctest
@@ -1,163 +1,215 @@
>>> import mechanize
>>> from mechanize._response import test_html_response
>>> from mechanize._html import LinksFactory, FormsFactory, TitleFactory, \
... MechanizeBs, \
... RobustLinksFactory, RobustFormsFactory, RobustTitleFactory
mechanize.ParseError should be raised on parsing erroneous HTML.
For backwards compatibility, mechanize.ParseError derives from
exception classes that mechanize used to raise, prior to version
0.1.6.
>>> import sgmllib
>>> import HTMLParser
>>> import ClientForm
>>> issubclass(mechanize.ParseError, sgmllib.SGMLParseError)
True
>>> issubclass(mechanize.ParseError, HTMLParser.HTMLParseError)
True
>>> issubclass(mechanize.ParseError, ClientForm.ParseError)
True
>>> def create_response(error=True):
... extra = ""
... if error:
... extra = "<!!!>"
... html = """\
... <html>
... <head>
... <title>Title</title>
... %s
... </head>
... <body>
... <p>Hello world
... </body>
... </html>
... """ % extra
... return test_html_response(html)
>>> f = LinksFactory()
>>> f.set_response(create_response(), "http://example.com", "latin-1")
>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = FormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> f = TitleFactory()
>>> f.set_response(create_response(), "latin-1")
>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
Accessing attributes on Factory may also raise ParseError
>>> def factory_getattr(attr_name):
... fact = mechanize.DefaultFactory()
... fact.set_response(create_response())
... getattr(fact, attr_name)
>>> factory_getattr("title") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
>>> factory_getattr("global_form") # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
BeautifulSoup ParseErrors:
XXX If I could come up with examples that break links and forms
parsing, I'd uncomment these!
>>> def create_soup(html):
... r = test_html_response(html)
... return MechanizeBs("latin-1", r.read())
#>>> f = RobustLinksFactory()
#>>> html = """\
#... <a href="a">
#... <frame src="b">
#... <a href="c">
#... <iframe src="d">
#... </a>
#... </area>
#... </frame>
#... """
#>>> f.set_soup(create_soup(html), "http://example.com", "latin-1")
#>>> list(f.links()) # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
>>> html = """\
... <table>
... <tr><td>
... <input name='broken'>
... </td>
... </form>
... </tr>
... </form>
... """
>>> f = RobustFormsFactory()
>>> f.set_response(create_response(), "latin-1")
>>> list(f.forms()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ParseError:
#>>> f = RobustTitleFactory()
#>>> f.set_soup(create_soup(""), "latin-1")
#>>> f.title() # doctest: +IGNORE_EXCEPTION_DETAIL
#Traceback (most recent call last):
#ParseError:
Utility class for caching forms etc.
>>> from mechanize._html import CachingGeneratorFunction
>>> i = [1]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1]
>>> list(func())
[1]
>>> i = [1, 2, 3]
>>> func = CachingGeneratorFunction(i)
>>> list(func())
[1, 2, 3]
>>> i = func()
>>> i.next()
1
>>> i.next()
2
>>> i.next()
3
>>> i = func()
>>> j = func()
>>> i.next()
1
>>> j.next()
1
>>> i.next()
2
>>> j.next()
2
>>> j.next()
3
>>> i.next()
3
>>> i.next()
Traceback (most recent call last):
...
StopIteration
>>> j.next()
Traceback (most recent call last):
...
StopIteration
+
+
+Link text parsing
+
+>>> def get_first_link_text_bs(html):
+... factory = RobustLinksFactory()
+... soup = MechanizeBs("utf-8", html)
+... factory.set_soup(soup, "http://example.com/", "utf-8")
+... return list(factory.links())[0].text
+
+>>> def get_first_link_text_sgmllib(html):
+... factory = LinksFactory()
+... response = test_html_response(html)
+... factory.set_response(response, "http://example.com/", "utf-8")
+... return list(factory.links())[0].text
+
+Whitespace gets compressed down to single spaces. Tags are removed.
+
+>>> html = ("""\
+... <html><head><title>Title</title></head><body>
+... <p><a href="http://example.com/">The quick\tbrown fox jumps
+... over the <i><b>lazy</b></i> dog </a>
+... </body></html>
+... """)
+>>> get_first_link_text_bs(html)
+'The quick brown fox jumps over the lazy dog'
+>>> get_first_link_text_sgmllib(html)
+'The quick brown fox jumps over the lazy dog'
+
+Empty <a> links have empty link text
+
+>>> html = ("""\
+... <html><head><title>Title</title></head><body>
+... <p><a href="http://example.com/"></a>
+... </body></html>
+... """)
+>>> get_first_link_text_bs(html)
+''
+>>> get_first_link_text_sgmllib(html)
+''
+
+But for backwards-compatibility, empty non-<a> links have None link text
+
+>>> html = ("""\
+... <html><head><title>Title</title></head><body>
+... <p><frame src="http://example.com/"></frame>
+... </body></html>
+... """)
+>>> print get_first_link_text_bs(html)
+None
+>>> print get_first_link_text_sgmllib(html)
+None
|
Almad/Mechanize
|
f88d4bfae183b09c2c62078511ffaf050185b4c0
|
Fix docstring typo and remove empty docstring
|
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index a0bf6e1..b5ac64d 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,679 +1,678 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, sys, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request.
This base class does not implement this feature (so don't turn this on
if you're using this base class directly), but the subclass
mechanize.Browser does.
"""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
The browser state (including .request, .response(), history, forms and
links) are all left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
- for example, it is not appropriate to set the cookie based on the
+ for example, if it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants of
any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
- """"""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
"""Return title, or None if there is no title element in the document.
Tags are stripped or textified as described in docs for
PullParser.get_text() method of pullparser module.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form. The "global form" (consisting of all form controls not contained
in any FORM element) is considered not to be part of this sequence and
to have no name, so will not be matched unless both name and nr are
None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
global_form = self._factory.global_form
if nr is None and name is None and \
predicate is not None and predicate(global_form):
self.form = global_form
return
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
|
Almad/Mechanize
|
6ed6c6ca57705ce5e70c0b9fd0499f31bcf2a337
|
Fix selection of global form using .select_form() ([email protected])
|
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index 8fa8cae..a0bf6e1 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,670 +1,679 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import urllib2, sys, copy, re, os, urllib
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
def sanepathname2url(path):
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request.
This base class does not implement this feature (so don't turn this on
if you're using this base class directly), but the subclass
mechanize.Browser does.
"""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
The browser state (including .request, .response(), history, forms and
links) are all left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
def open_local_file(self, filename):
path = sanepathname2url(os.path.abspath(filename))
url = 'file://'+path
return self.open(url)
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants of
any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
""""""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
"""Return title, or None if there is no title element in the document.
Tags are stripped or textified as described in docs for
PullParser.get_text() method of pullparser module.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
- form.
+ form. The "global form" (consisting of all form controls not contained
+ in any FORM element) is considered not to be part of this sequence and
+ to have no name, so will not be matched unless both name and nr are
+ None.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
+ global_form = self._factory.global_form
+ if nr is None and name is None and \
+ predicate is not None and predicate(global_form):
+ self.form = global_form
+ return
+
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
diff --git a/test/test_browser.doctest b/test/test_browser.doctest
index a7c966a..cba1922 100644
--- a/test/test_browser.doctest
+++ b/test/test_browser.doctest
@@ -1,254 +1,279 @@
>>> import mechanize
>>> from mechanize._response import test_response
>>> from test_browser import TestBrowser2, make_mock_handler
Opening a new response should close the old one.
>>> class TestHttpHandler(mechanize.BaseHandler):
... def http_open(self, request):
... return test_response(url=request.get_full_url())
>>> class TestHttpBrowser(TestBrowser2):
... handler_classes = TestBrowser2.handler_classes.copy()
... handler_classes["http"] = TestHttpHandler
... default_schemes = ["http"]
>>> def response_impl(response):
... return response.wrapped.fp.__class__.__name__
>>> br = TestHttpBrowser()
>>> r = br.open("http://example.com")
>>> print response_impl(r)
StringI
>>> r2 = br.open("http://example.com")
>>> print response_impl(r2)
StringI
>>> print response_impl(r)
eofresponse
So should .set_response()
>>> br.set_response(test_response())
>>> print response_impl(r2)
eofresponse
.visit_response() works very similarly to .open()
>>> br = TestHttpBrowser()
>>> r = br.open("http://example.com")
>>> r2 = test_response(url="http://example.com/2")
>>> print response_impl(r2)
StringI
>>> br.visit_response(r2)
>>> print response_impl(r)
eofresponse
>>> br.geturl() == br.request.get_full_url() == "http://example.com/2"
True
>>> junk = br.back()
>>> br.geturl() == br.request.get_full_url() == "http://example.com"
True
.back() may reload if the complete response was not read. If so, it
should return the new response, not the old one
>>> class ReloadCheckBrowser(TestHttpBrowser):
... reloaded = False
... def reload(self):
... self.reloaded = True
... return TestHttpBrowser.reload(self)
>>> br = ReloadCheckBrowser()
>>> old = br.open("http://example.com")
>>> junk = br.open("http://example.com/2")
>>> new = br.back()
>>> br.reloaded
True
>>> new.wrapped is not old.wrapped
True
Warn early about some mistakes setting a response object
>>> import StringIO
>>> br = TestBrowser2()
>>> br.set_response("blah")
Traceback (most recent call last):
...
ValueError: not a response object
>>> br.set_response(StringIO.StringIO())
Traceback (most recent call last):
...
ValueError: not a response object
.open() without an appropriate scheme handler should fail with
URLError
>>> br = TestBrowser2()
>>> br.open("http://example.com")
Traceback (most recent call last):
...
URLError: <urlopen error unknown url type: http>
Reload after failed .open() should fail due to failure to open, not
with BrowserStateError
>>> br.reload()
Traceback (most recent call last):
...
URLError: <urlopen error unknown url type: http>
.clear_history() should do what it says on the tin. Note that the
history does not include the current response!
>>> br = TestBrowser2()
>>> br.add_handler(make_mock_handler(test_response)([("http_open", None)]))
>>> br.response() is None
True
>>> len(br._history._history)
0
>>> r = br.open("http://example.com/1")
>>> br.response() is not None
True
>>> len(br._history._history)
0
>>> br.clear_history()
>>> br.response() is not None
True
>>> len(br._history._history)
0
>>> r = br.open("http://example.com/2")
>>> br.response() is not None
True
>>> len(br._history._history)
1
>>> br.clear_history()
>>> br.response() is not None
True
>>> len(br._history._history)
0
.open()ing a Request with False .visit does not affect Browser state.
Redirections during such a non-visiting request should also be
non-visiting.
>>> from mechanize import BrowserStateError, Request, HTTPRedirectHandler
>>> from test_urllib2 import MockHTTPHandler
>>> req = Request("http://example.com")
>>> req.visit = False
>>> br = TestBrowser2()
>>> hh = MockHTTPHandler(302, "Location: http://example.com/\r\n\r\n")
>>> br.add_handler(hh)
>>> br.add_handler(HTTPRedirectHandler())
>>> def raises(exc_class, fn, *args, **kwds):
... try:
... fn(*args, **kwds)
... except exc_class, exc:
... return True
... return False
>>> def test_state(br):
... return (br.request is None and
... br.response() is None and
... raises(BrowserStateError, br.back)
... )
>>> test_state(br)
True
>>> r = br.open(req)
>>> test_state(br)
True
...in fact, any redirection (but not refresh), proxy request, basic or
digest auth request, or robots.txt request should be non-visiting,
even if .visit is True:
>>> from test_urllib2 import MockPasswordManager
>>> def test_one_visit(handlers):
... br = TestBrowser2()
... for handler in handlers: br.add_handler(handler)
... req = Request("http://example.com")
... req.visit = True
... br.open(req)
... return br
>>> def test_state(br):
... # XXX the _history._history check is needed because of the weird
... # throwing-away of history entries by .back() where response is
... # None, which makes the .back() check insufficient to tell if a
... # history entry was .add()ed. I don't want to change this until
... # post-stable.
... return (
... br.response() and
... br.request and
... len(br._history._history) == 0 and
... raises(BrowserStateError, br.back))
>>> hh = MockHTTPHandler(302, "Location: http://example.com/\r\n\r\n")
>>> br = test_one_visit([hh, HTTPRedirectHandler()])
>>> test_state(br)
True
>>> class MockPasswordManager:
... def add_password(self, realm, uri, user, password): pass
... def find_user_password(self, realm, authuri): return '', ''
>>> ah = mechanize.HTTPBasicAuthHandler(MockPasswordManager())
>>> hh = MockHTTPHandler(
... 401, 'WWW-Authenticate: Basic realm="realm"\r\n\r\n')
>>> test_state(test_one_visit([hh, ah]))
True
>>> ph = mechanize.ProxyHandler(dict(http="proxy.example.com:3128"))
>>> ah = mechanize.ProxyBasicAuthHandler(MockPasswordManager())
>>> hh = MockHTTPHandler(
... 407, 'Proxy-Authenticate: Basic realm="realm"\r\n\r\n')
>>> test_state(test_one_visit([ph, hh, ah]))
True
XXX Can't really fix this one properly without significant changes --
the refresh should go onto the history *after* the call, but currently
all redirects, including refreshes, are done by recursive .open()
calls, which gets the history wrong in this case. Will have to wait
until after stable release:
#>>> hh = MockHTTPHandler(
#... "refresh", 'Location: http://example.com/\r\n\r\n')
#>>> br = test_one_visit([hh, HTTPRedirectHandler()])
#>>> br.response() is not None
#True
#>>> br.request is not None
#True
#>>> r = br.back()
XXX digest, robots
.global_form() is separate from the other forms (partly for backwards-
compatibility reasons).
>>> from mechanize._response import test_response
>>> br = TestBrowser2()
>>> html = """\
... <html><body>
... <input type="text" name="a" />
... <form><input type="text" name="b" /></form>
... </body></html>
... """
>>> response = test_response(html, headers=[("Content-type", "text/html")])
>>> br.global_form()
Traceback (most recent call last):
BrowserStateError: not viewing any document
>>> br.set_response(response)
>>> br.global_form().find_control(nr=0).name
'a'
>>> len(list(br.forms()))
1
>>> iter(br.forms()).next().find_control(nr=0).name
'b'
+
+
+
+.select_form() works with the global form
+
+>>> import ClientForm
+>>> from mechanize._response import test_html_response
+>>> br = TestBrowser2()
+>>> br.visit_response(test_html_response("""\
+... <html><head><title></title></head><body>
+... <input type="text" name="a" value="b"></input>
+... <form>
+... <input type="text" name="p" value="q"></input>
+... </form>
+... </body></html>"""))
+>>> def has_a(form):
+... try:
+... form.find_control(name="a")
+... except ClientForm.ControlNotFoundError:
+... return False
+... else:
+... return True
+>>> br.select_form(predicate=has_a)
+>>> br.form.find_control(name="a").value
+'b'
diff --git a/test/test_browser.py b/test/test_browser.py
index e763674..1bc318f 100644
--- a/test/test_browser.py
+++ b/test/test_browser.py
@@ -1,770 +1,770 @@
#!/usr/bin/env python
"""Tests for mechanize.Browser."""
import sys, os, random
from unittest import TestCase
import StringIO, re, urllib2
import mechanize
from mechanize._response import test_html_response
FACTORY_CLASSES = [mechanize.DefaultFactory, mechanize.RobustFactory]
# XXX these 'mock' classes are badly in need of simplification / removal
# (note this stuff is also used by test_useragent.py and test_browser.doctest)
class MockMethod:
def __init__(self, meth_name, action, handle):
self.meth_name = meth_name
self.handle = handle
self.action = action
def __call__(self, *args):
return apply(self.handle, (self.meth_name, self.action)+args)
class MockHeaders(dict):
def getheaders(self, name):
name = name.lower()
return [v for k, v in self.iteritems() if name == k.lower()]
class MockResponse:
closeable_response = None
def __init__(self, url="http://example.com/", data=None, info=None):
self.url = url
self.fp = StringIO.StringIO(data)
if info is None: info = {}
self._info = MockHeaders(info)
def info(self): return self._info
def geturl(self): return self.url
def read(self, size=-1): return self.fp.read(size)
def seek(self, whence):
assert whence == 0
self.fp.seek(0)
def close(self): pass
def get_data(self): pass
def make_mock_handler(response_class=MockResponse):
class MockHandler:
processor_order = 500
handler_order = -1
def __init__(self, methods):
self._define_methods(methods)
def _define_methods(self, methods):
for name, action in methods:
if name.endswith("_open"):
meth = MockMethod(name, action, self.handle)
else:
meth = MockMethod(name, action, self.process)
setattr(self.__class__, name, meth)
def handle(self, fn_name, response, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if response:
if isinstance(response, urllib2.HTTPError):
raise response
r = response
r.seek(0)
else:
r = response_class()
req = args[0]
r.url = req.get_full_url()
return r
def process(self, fn_name, action, *args, **kwds):
self.parent.calls.append((self, fn_name, args, kwds))
if fn_name.endswith("_request"):
return args[0]
else:
return args[1]
def close(self): pass
def add_parent(self, parent):
self.parent = parent
self.parent.calls = []
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
return MockHandler
class TestBrowser(mechanize.Browser):
default_features = []
default_others = []
default_schemes = []
class TestBrowser2(mechanize.Browser):
# XXX better name!
# As TestBrowser, this is neutered so doesn't know about protocol handling,
# but still knows what to do with unknown schemes, etc., because
# UserAgent's default_others list is left intact, including classes like
# UnknownHandler
default_features = []
default_schemes = []
class BrowserTests(TestCase):
def test_referer(self):
b = TestBrowser()
url = "http://www.example.com/"
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
<form name="form1">
<input type="hidden" name="foo" value="bar"></input>
<input type="submit"></input>
</form>
<a href="http://example.com/foo/bar.html" name="apples"></a>
<a href="https://example.com/spam/eggs.html" name="secure"></a>
<a href="blah://example.com/" name="pears"></a>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
# Referer not added by .open()...
req = mechanize.Request(url)
b.open(req)
self.assert_(req.get_header("Referer") is None)
# ...even if we're visiting a document
b.open(req)
self.assert_(req.get_header("Referer") is None)
# Referer added by .click_link() and .click()
b.select_form("form1")
req2 = b.click()
self.assertEqual(req2.get_header("Referer"), url)
r2 = b.open(req2)
req3 = b.click_link(name="apples")
self.assertEqual(req3.get_header("Referer"), url+"?foo=bar")
# Referer not added when going from https to http URL
b.add_handler(make_mock_handler()([("https_open", r)]))
r3 = b.open(req3)
req4 = b.click_link(name="secure")
self.assertEqual(req4.get_header("Referer"),
"http://example.com/foo/bar.html")
r4 = b.open(req4)
req5 = b.click_link(name="apples")
self.assert_(not req5.has_header("Referer"))
# Referer not added for non-http, non-https requests
b.add_handler(make_mock_handler()([("blah_open", r)]))
req6 = b.click_link(name="pears")
self.assert_(not req6.has_header("Referer"))
# Referer not added when going from non-http, non-https URL
r4 = b.open(req6)
req7 = b.click_link(name="apples")
self.assert_(not req7.has_header("Referer"))
# XXX Referer added for redirect
def test_encoding(self):
import mechanize
from StringIO import StringIO
import urllib, mimetools
# always take first encoding, since that's the one from the real HTTP
# headers, rather than from HTTP-EQUIV
b = mechanize.Browser()
for s, ct in [("", mechanize._html.DEFAULT_ENCODING),
("Foo: Bar\r\n\r\n", mechanize._html.DEFAULT_ENCODING),
("Content-Type: text/html; charset=UTF-8\r\n\r\n",
"UTF-8"),
("Content-Type: text/html; charset=UTF-8\r\n"
"Content-Type: text/html; charset=KOI8-R\r\n\r\n",
"UTF-8"),
]:
msg = mimetools.Message(StringIO(s))
r = urllib.addinfourl(StringIO(""), msg, "http://www.example.com/")
b.set_response(r)
self.assertEqual(b.encoding(), ct)
def test_history(self):
import mechanize
from mechanize import _response
def same_response(ra, rb):
return ra.wrapped is rb.wrapped
class Handler(mechanize.BaseHandler):
def http_open(self, request):
r = _response.test_response(url=request.get_full_url())
# these tests aren't interested in auto-.reload() behaviour of
# .back(), so read the response to prevent that happening
r.get_data()
return r
b = TestBrowser2()
b.add_handler(Handler())
self.assertRaises(mechanize.BrowserStateError, b.back)
r1 = b.open("http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back)
r2 = b.open("http://example.com/foo")
self.assert_(same_response(b.back(), r1))
r3 = b.open("http://example.com/bar")
r4 = b.open("http://example.com/spam")
self.assert_(same_response(b.back(), r3))
self.assert_(same_response(b.back(), r1))
self.assertEquals(b.geturl(), "http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back)
# reloading does a real HTTP fetch rather than using history cache
r5 = b.reload()
self.assert_(not same_response(r5, r1))
# .geturl() gets fed through to b.response
self.assertEquals(b.geturl(), "http://example.com/")
# can go back n times
r6 = b.open("spam")
self.assertEquals(b.geturl(), "http://example.com/spam")
r7 = b.open("/spam")
self.assert_(same_response(b.response(), r7))
self.assertEquals(b.geturl(), "http://example.com/spam")
self.assert_(same_response(b.back(2), r5))
self.assertEquals(b.geturl(), "http://example.com/")
self.assertRaises(mechanize.BrowserStateError, b.back, 2)
r8 = b.open("/spam")
# even if we get an HTTPError, history, .response() and .request should
# still get updated
class Handler2(mechanize.BaseHandler):
def https_open(self, request):
r = urllib2.HTTPError(
"https://example.com/bad", 503, "Oops",
MockHeaders(), StringIO.StringIO())
return r
b.add_handler(Handler2())
self.assertRaises(urllib2.HTTPError, b.open, "https://example.com/badreq")
self.assertEqual(b.response().geturl(), "https://example.com/bad")
self.assertEqual(b.request.get_full_url(), "https://example.com/badreq")
self.assert_(same_response(b.back(), r8))
# .close() should make use of Browser methods and attributes complain
# noisily, since they should not be called after .close()
b.form = "blah"
b.close()
for attr in ("form open error retrieve add_handler "
"request response set_response geturl reload back "
"clear_history set_cookie links forms viewing_html "
"encoding title select_form click submit click_link "
"follow_link find_link".split()
):
self.assert_(getattr(b, attr) is None)
def test_reload_read_incomplete(self):
import mechanize
from mechanize._response import test_response
class Browser(TestBrowser):
def __init__(self):
TestBrowser.__init__(self)
self.reloaded = False
def reload(self):
self.reloaded = True
TestBrowser.reload(self)
br = Browser()
data = "<html><head><title></title></head><body>%s</body></html>"
data = data % ("The quick brown fox jumps over the lazy dog."*100)
class Handler(mechanize.BaseHandler):
def http_open(self, requst):
return test_response(data, [("content-type", "text/html")])
br.add_handler(Handler())
# .reload() on .back() if the whole response hasn't already been read
# (.read_incomplete is True)
r = br.open("http://example.com")
r.read(10)
br.open('http://www.example.com/blah')
self.failIf(br.reloaded)
br.back()
self.assert_(br.reloaded)
# don't reload if already read
br.reloaded = False
br.response().read()
br.open('http://www.example.com/blah')
br.back()
self.failIf(br.reloaded)
def test_viewing_html(self):
# XXX not testing multiple Content-Type headers
import mechanize
url = "http://example.com/"
for allow_xhtml in False, True:
for ct, expect in [
(None, False),
("text/plain", False),
("text/html", True),
# don't try to handle XML until we can do it right!
("text/xhtml", allow_xhtml),
("text/xml", allow_xhtml),
("application/xml", allow_xhtml),
("application/xhtml+xml", allow_xhtml),
("text/html; charset=blah", True),
(" text/html ; charset=ook ", True),
]:
b = TestBrowser(mechanize.DefaultFactory(
i_want_broken_xhtml_support=allow_xhtml))
hdrs = {}
if ct is not None:
hdrs["Content-Type"] = ct
b.add_handler(make_mock_handler()([("http_open",
MockResponse(url, "", hdrs))]))
r = b.open(url)
self.assertEqual(b.viewing_html(), expect)
for allow_xhtml in False, True:
for ext, expect in [
(".htm", True),
(".html", True),
# don't try to handle XML until we can do it right!
(".xhtml", allow_xhtml),
(".html?foo=bar&a=b;whelk#kool", True),
(".txt", False),
(".xml", False),
("", False),
]:
b = TestBrowser(mechanize.DefaultFactory(
i_want_broken_xhtml_support=allow_xhtml))
url = "http://example.com/foo"+ext
b.add_handler(make_mock_handler()(
[("http_open", MockResponse(url, "", {}))]))
r = b.open(url)
self.assertEqual(b.viewing_html(), expect)
def test_empty(self):
import mechanize
url = "http://example.com/"
b = TestBrowser()
self.assert_(b.response() is None)
# To open a relative reference (often called a "relative URL"), you
# have to have already opened a URL for it "to be relative to".
self.assertRaises(mechanize.BrowserStateError, b.open, "relative_ref")
# we can still clear the history even if we've not visited any URL
b.clear_history()
# most methods raise BrowserStateError...
def test_state_error(method_names):
for attr in method_names:
method = getattr(b, attr)
#print attr
self.assertRaises(mechanize.BrowserStateError, method)
self.assertRaises(mechanize.BrowserStateError, b.select_form,
name="blah")
self.assertRaises(mechanize.BrowserStateError, b.find_link,
name="blah")
# ...if not visiting a URL...
test_state_error(("geturl reload back viewing_html encoding "
"click links forms title select_form".split()))
self.assertRaises(mechanize.BrowserStateError, b.set_cookie, "foo=bar")
self.assertRaises(mechanize.BrowserStateError, b.submit, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.click_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.follow_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.find_link, nr=0)
# ...and lots do so if visiting a non-HTML URL
b.add_handler(make_mock_handler()(
[("http_open", MockResponse(url, "", {}))]))
r = b.open(url)
self.assert_(not b.viewing_html())
test_state_error("click links forms title select_form".split())
self.assertRaises(mechanize.BrowserStateError, b.submit, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.click_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.follow_link, nr=0)
self.assertRaises(mechanize.BrowserStateError, b.find_link, nr=0)
b = TestBrowser()
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
self.assertEqual(b.title(), "Title")
self.assertEqual(len(list(b.links())), 0)
self.assertEqual(len(list(b.forms())), 0)
self.assertRaises(ValueError, b.select_form)
self.assertRaises(mechanize.FormNotFoundError, b.select_form,
name="blah")
self.assertRaises(mechanize.FormNotFoundError, b.select_form,
- predicate=lambda x: True)
+ predicate=lambda form: form is not b.global_form())
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
name="blah")
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
predicate=lambda x: True)
def test_forms(self):
for factory_class in FACTORY_CLASSES:
self._test_forms(factory_class())
def _test_forms(self, factory):
import mechanize
url = "http://example.com"
b = TestBrowser(factory=factory)
r = test_html_response(
url=url,
headers=[("content-type", "text/html")],
data="""\
<html>
<head><title>Title</title></head>
<body>
<form name="form1">
<input type="text"></input>
<input type="checkbox" name="cheeses" value="cheddar"></input>
<input type="checkbox" name="cheeses" value="edam"></input>
<input type="submit" name="one"></input>
</form>
<a href="http://example.com/foo/bar.html" name="apples">
<form name="form2">
<input type="submit" name="two">
</form>
</body>
</html>
"""
)
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
forms = list(b.forms())
self.assertEqual(len(forms), 2)
for got, expect in zip([f.name for f in forms], [
"form1", "form2"]):
self.assertEqual(got, expect)
self.assertRaises(mechanize.FormNotFoundError, b.select_form, "foo")
# no form is set yet
self.assertRaises(AttributeError, getattr, b, "possible_items")
b.select_form("form1")
# now unknown methods are fed through to selected ClientForm.HTMLForm
self.assertEqual(
[i.name for i in b.find_control("cheeses").items],
["cheddar", "edam"])
b["cheeses"] = ["cheddar", "edam"]
self.assertEqual(b.click_pairs(), [
("cheeses", "cheddar"), ("cheeses", "edam"), ("one", "")])
b.select_form(nr=1)
self.assertEqual(b.name, "form2")
self.assertEqual(b.click_pairs(), [("two", "")])
def test_link_encoding(self):
for factory_class in FACTORY_CLASSES:
self._test_link_encoding(factory_class())
def _test_link_encoding(self, factory):
import urllib
import mechanize
from mechanize._rfc3986 import clean_url
url = "http://example.com/"
for encoding in ["UTF-8", "latin-1"]:
encoding_decl = "; charset=%s" % encoding
b = TestBrowser(factory=factory)
r = MockResponse(url, """\
<a href="http://example.com/foo/bar——.html"
name="name0——">blah——</a>
""", #"
{"content-type": "text/html%s" % encoding_decl})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
Link = mechanize.Link
try:
mdashx2 = u"\u2014".encode(encoding)*2
except UnicodeError:
mdashx2 = '——'
qmdashx2 = clean_url(mdashx2, encoding)
# base_url, url, text, tag, attrs
exp = Link(url, "http://example.com/foo/bar%s.html" % qmdashx2,
"blah"+mdashx2, "a",
[("href", "http://example.com/foo/bar%s.html" % mdashx2),
("name", "name0%s" % mdashx2)])
# nr
link = b.find_link()
## print
## print exp
## print link
self.assertEqual(link, exp)
def test_link_whitespace(self):
from mechanize import Link
for factory_class in FACTORY_CLASSES:
base_url = "http://example.com/"
url = " http://example.com/foo.html%20+ "
stripped_url = url.strip()
html = '<a href="%s"></a>' % url
b = TestBrowser(factory=factory_class())
r = MockResponse(base_url, html, {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(base_url)
link = b.find_link(nr=0)
self.assertEqual(
link,
Link(base_url, stripped_url, "", "a", [("href", url)])
)
def test_links(self):
for factory_class in FACTORY_CLASSES:
self._test_links(factory_class())
def _test_links(self, factory):
import mechanize
from mechanize import Link
url = "http://example.com/"
b = TestBrowser(factory=factory)
r = MockResponse(url,
"""<html>
<head><title>Title</title></head>
<body>
<a href="http://example.com/foo/bar.html" name="apples"></a>
<a name="pears"></a>
<a href="spam" name="pears"></a>
<area href="blah" name="foo"></area>
<form name="form2">
<input type="submit" name="two">
</form>
<frame name="name" href="href" src="src"></frame>
<iframe name="name2" href="href" src="src"></iframe>
<a name="name3" href="one">yada yada</a>
<a name="pears" href="two" weird="stuff">rhubarb</a>
<a></a>
<iframe src="foo"></iframe>
</body>
</html>
""", {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
exp_links = [
# base_url, url, text, tag, attrs
Link(url, "http://example.com/foo/bar.html", "", "a",
[("href", "http://example.com/foo/bar.html"),
("name", "apples")]),
Link(url, "spam", "", "a", [("href", "spam"), ("name", "pears")]),
Link(url, "blah", None, "area",
[("href", "blah"), ("name", "foo")]),
Link(url, "src", None, "frame",
[("name", "name"), ("href", "href"), ("src", "src")]),
Link(url, "src", None, "iframe",
[("name", "name2"), ("href", "href"), ("src", "src")]),
Link(url, "one", "yada yada", "a",
[("name", "name3"), ("href", "one")]),
Link(url, "two", "rhubarb", "a",
[("name", "pears"), ("href", "two"), ("weird", "stuff")]),
Link(url, "foo", None, "iframe",
[("src", "foo")]),
]
links = list(b.links())
self.assertEqual(len(links), len(exp_links))
for got, expect in zip(links, exp_links):
self.assertEqual(got, expect)
# nr
l = b.find_link()
self.assertEqual(l.url, "http://example.com/foo/bar.html")
l = b.find_link(nr=1)
self.assertEqual(l.url, "spam")
# text
l = b.find_link(text="yada yada")
self.assertEqual(l.url, "one")
self.assertRaises(mechanize.LinkNotFoundError,
b.find_link, text="da ya")
l = b.find_link(text_regex=re.compile("da ya"))
self.assertEqual(l.url, "one")
l = b.find_link(text_regex="da ya")
self.assertEqual(l.url, "one")
# name
l = b.find_link(name="name3")
self.assertEqual(l.url, "one")
l = b.find_link(name_regex=re.compile("oo"))
self.assertEqual(l.url, "blah")
l = b.find_link(name_regex="oo")
self.assertEqual(l.url, "blah")
# url
l = b.find_link(url="spam")
self.assertEqual(l.url, "spam")
l = b.find_link(url_regex=re.compile("pam"))
self.assertEqual(l.url, "spam")
l = b.find_link(url_regex="pam")
self.assertEqual(l.url, "spam")
# tag
l = b.find_link(tag="area")
self.assertEqual(l.url, "blah")
# predicate
l = b.find_link(predicate=
lambda l: dict(l.attrs).get("weird") == "stuff")
self.assertEqual(l.url, "two")
# combinations
l = b.find_link(name="pears", nr=1)
self.assertEqual(l.text, "rhubarb")
l = b.find_link(url="src", nr=0, name="name2")
self.assertEqual(l.tag, "iframe")
self.assertEqual(l.url, "src")
self.assertRaises(mechanize.LinkNotFoundError, b.find_link,
url="src", nr=1, name="name2")
l = b.find_link(tag="a", predicate=
lambda l: dict(l.attrs).get("weird") == "stuff")
self.assertEqual(l.url, "two")
# .links()
self.assertEqual(list(b.links(url="src")), [
Link(url, url="src", text=None, tag="frame",
attrs=[("name", "name"), ("href", "href"), ("src", "src")]),
Link(url, url="src", text=None, tag="iframe",
attrs=[("name", "name2"), ("href", "href"), ("src", "src")]),
])
def test_base_uri(self):
import mechanize
url = "http://example.com/"
for html, urls in [
(
"""<base href="http://www.python.org/foo/">
<a href="bar/baz.html"></a>
<a href="/bar/baz.html"></a>
<a href="http://example.com/bar %2f%2Fblah;/baz@~._-.html"></a>
""",
[
"http://www.python.org/foo/bar/baz.html",
"http://www.python.org/bar/baz.html",
"http://example.com/bar%20%2f%2Fblah;/baz@~._-.html",
]),
(
"""<a href="bar/baz.html"></a>
<a href="/bar/baz.html"></a>
<a href="http://example.com/bar/baz.html"></a>
""",
[
"http://example.com/bar/baz.html",
"http://example.com/bar/baz.html",
"http://example.com/bar/baz.html",
]
),
]:
b = TestBrowser()
r = MockResponse(url, html, {"content-type": "text/html"})
b.add_handler(make_mock_handler()([("http_open", r)]))
r = b.open(url)
self.assertEqual([link.absolute_url for link in b.links()], urls)
def test_set_cookie(self):
class CookieTestBrowser(TestBrowser):
default_features = list(TestBrowser.default_features)+["_cookies"]
# have to be visiting HTTP/HTTPS URL
url = "ftp://example.com/"
br = CookieTestBrowser()
r = mechanize.make_response(
"<html><head><title>Title</title></head><body></body></html>",
[("content-type", "text/html")],
url,
200, "OK",
)
br.add_handler(make_mock_handler()([("http_open", r)]))
handler = br._ua_handlers["_cookies"]
cj = handler.cookiejar
self.assertRaises(mechanize.BrowserStateError,
br.set_cookie, "foo=bar")
self.assertEqual(len(cj), 0)
url = "http://example.com/"
br = CookieTestBrowser()
r = mechanize.make_response(
"<html><head><title>Title</title></head><body></body></html>",
[("content-type", "text/html")],
url,
200, "OK",
)
br.add_handler(make_mock_handler()([("http_open", r)]))
handler = br._ua_handlers["_cookies"]
cj = handler.cookiejar
# have to be visiting a URL
self.assertRaises(mechanize.BrowserStateError,
br.set_cookie, "foo=bar")
self.assertEqual(len(cj), 0)
# normal case
br.open(url)
br.set_cookie("foo=bar")
self.assertEqual(len(cj), 1)
self.assertEqual(cj._cookies["example.com"]["/"]["foo"].value, "bar")
class ResponseTests(TestCase):
def test_set_response(self):
import copy
from mechanize import response_seek_wrapper
br = TestBrowser()
url = "http://example.com/"
html = """<html><body><a href="spam">click me</a></body></html>"""
headers = {"content-type": "text/html"}
r = response_seek_wrapper(MockResponse(url, html, headers))
br.add_handler(make_mock_handler()([("http_open", r)]))
r = br.open(url)
self.assertEqual(r.read(), html)
r.seek(0)
self.assertEqual(copy.copy(r).read(), html)
self.assertEqual(list(br.links())[0].url, "spam")
newhtml = """<html><body><a href="eggs">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, "spam")
r.seek(0)
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "eggs")
def test_str(self):
import mimetools
from mechanize import _response
br = TestBrowser()
self.assertEqual(
str(br),
"<TestBrowser (not visiting a URL)>"
)
fp = StringIO.StringIO('<html><form name="f"><input /></form></html>')
headers = mimetools.Message(
StringIO.StringIO("Content-type: text/html"))
response = _response.response_seek_wrapper(
_response.closeable_response(
fp, headers, "http://example.com/", 200, "OK"))
br.set_response(response)
self.assertEqual(
str(br),
"<TestBrowser visiting http://example.com/>"
)
br.select_form(nr=0)
self.assertEqual(
str(br),
"""\
<TestBrowser visiting http://example.com/
selected form:
<f GET http://example.com/ application/x-www-form-urlencoded
<TextControl(<None>=)>>
>""")
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
3c9d42e322a7fb822097ce65ca873678f19c604e
|
Make bundled BeautifulSoup 2 emacs syntax highlighting-friendly
|
diff --git a/mechanize/_beautifulsoup.py b/mechanize/_beautifulsoup.py
index 2541dcc..268b305 100644
--- a/mechanize/_beautifulsoup.py
+++ b/mechanize/_beautifulsoup.py
@@ -1,552 +1,552 @@
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
v2.1.1
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup parses arbitrarily invalid XML- or HTML-like substance
into a tree representation. It provides methods and Pythonic idioms
that make it easy to search and modify the tree.
A well-formed XML/HTML document will yield a well-formed data
structure. An ill-formed XML/HTML document will yield a
correspondingly ill-formed data structure. If your document is only
locally well-formed, you can use this library to find and process the
well-formed part of it. The BeautifulSoup class has heuristics for
obtaining a sensible parse tree in the face of common HTML errors.
Beautiful Soup has no external dependencies. It works with Python 2.2
and up.
Beautiful Soup defines classes for four different parsing strategies:
* BeautifulStoneSoup, for parsing XML, SGML, or your domain-specific
language that kind of looks like XML.
* BeautifulSoup, for parsing run-of-the-mill HTML code, be it valid
or invalid.
* ICantBelieveItsBeautifulSoup, for parsing valid but bizarre HTML
that trips up BeautifulSoup.
* BeautifulSOAP, for making it easier to parse XML documents that use
lots of subelements containing a single string, where you'd prefer
they put that string into an attribute (such as SOAP messages).
You can subclass BeautifulStoneSoup or BeautifulSoup to create a
parsing strategy specific to an XML schema or a particular bizarre
HTML document. Typically your subclass would just override
SELF_CLOSING_TAGS and/or NESTABLE_TAGS.
-"""
+""" #"
from __future__ import generators
__author__ = "Leonard Richardson ([email protected])"
__version__ = "2.1.1"
__date__ = "$Date: 2004/10/18 00:14:20 $"
__copyright__ = "Copyright (c) 2004-2005 Leonard Richardson"
__license__ = "PSF"
from sgmllib import SGMLParser, SGMLParseError
import types
import re
import sgmllib
#This code makes Beautiful Soup able to parse XML with namespaces
sgmllib.tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*')
class NullType(object):
"""Similar to NoneType with a corresponding singleton instance
'Null' that, unlike None, accepts any message and returns itself.
Examples:
>>> Null("send", "a", "message")("and one more",
... "and what you get still") is Null
True
"""
def __new__(cls): return Null
def __call__(self, *args, **kwargs): return Null
## def __getstate__(self, *args): return Null
def __getattr__(self, attr): return Null
def __getitem__(self, item): return Null
def __setattr__(self, attr, value): pass
def __setitem__(self, item, value): pass
def __len__(self): return 0
# FIXME: is this a python bug? otherwise ``for x in Null: pass``
# never terminates...
def __iter__(self): return iter([])
def __contains__(self, item): return False
def __repr__(self): return "Null"
Null = object.__new__(NullType)
class PageElement:
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=Null, previous=Null):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous = previous
self.next = Null
self.previousSibling = Null
self.nextSibling = Null
if self.parent and self.parent.contents:
self.previousSibling = self.parent.contents[-1]
self.previousSibling.nextSibling = self
def findNext(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._first(self.fetchNext, name, attrs, text)
firstNext = findNext
def fetchNext(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before after Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextGenerator)
def findNextSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._first(self.fetchNextSiblings, name, attrs, text)
firstNextSibling = findNextSibling
def fetchNextSiblings(self, name=None, attrs={}, text=None, limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.nextSiblingGenerator)
def findPrevious(self, name=None, attrs={}, text=None):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._first(self.fetchPrevious, name, attrs, text)
def fetchPrevious(self, name=None, attrs={}, text=None, limit=None):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._fetch(name, attrs, text, limit, self.previousGenerator)
firstPrevious = findPrevious
def findPreviousSibling(self, name=None, attrs={}, text=None):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._first(self.fetchPreviousSiblings, name, attrs, text)
firstPreviousSibling = findPreviousSibling
def fetchPreviousSiblings(self, name=None, attrs={}, text=None,
limit=None):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._fetch(name, attrs, text, limit,
self.previousSiblingGenerator)
def findParent(self, name=None, attrs={}):
"""Returns the closest parent of this Tag that matches the given
criteria."""
r = Null
l = self.fetchParents(name, attrs, 1)
if l:
r = l[0]
return r
firstParent = findParent
def fetchParents(self, name=None, attrs={}, limit=None):
"""Returns the parents of this Tag that match the given
criteria."""
return self._fetch(name, attrs, None, limit, self.parentGenerator)
#These methods do the real heavy lifting.
def _first(self, method, name, attrs, text):
r = Null
l = method(name, attrs, text, 1)
if l:
r = l[0]
return r
def _fetch(self, name, attrs, text, limit, generator):
"Iterates over a generator looking for things that match."
if not hasattr(attrs, 'items'):
attrs = {'class' : attrs}
results = []
g = generator()
while True:
try:
i = g.next()
except StopIteration:
break
found = None
if isinstance(i, Tag):
if not text:
if not name or self._matches(i, name):
match = True
for attr, matchAgainst in attrs.items():
check = i.get(attr)
if not self._matches(check, matchAgainst):
match = False
break
if match:
found = i
elif text:
if self._matches(i, text):
found = i
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#Generators that can be used to navigate starting from both
#NavigableTexts and Tags.
def nextGenerator(self):
i = self
while i:
i = i.next
yield i
def nextSiblingGenerator(self):
i = self
while i:
i = i.nextSibling
yield i
def previousGenerator(self):
i = self
while i:
i = i.previous
yield i
def previousSiblingGenerator(self):
i = self
while i:
i = i.previousSibling
yield i
def parentGenerator(self):
i = self
while i:
i = i.parent
yield i
def _matches(self, chunk, howToMatch):
#print 'looking for %s in %s' % (howToMatch, chunk)
#
# If given a list of items, return true if the list contains a
# text element that matches.
if isList(chunk) and not isinstance(chunk, Tag):
for tag in chunk:
if isinstance(tag, NavigableText) and self._matches(tag, howToMatch):
return True
return False
if callable(howToMatch):
return howToMatch(chunk)
if isinstance(chunk, Tag):
#Custom match methods take the tag as an argument, but all other
#ways of matching match the tag name as a string
chunk = chunk.name
#Now we know that chunk is a string
if not isinstance(chunk, basestring):
chunk = str(chunk)
if hasattr(howToMatch, 'match'):
# It's a regexp object.
return howToMatch.search(chunk)
if isList(howToMatch):
return chunk in howToMatch
if hasattr(howToMatch, 'items'):
return howToMatch.has_key(chunk)
#It's just a string
return str(howToMatch) == chunk
class NavigableText(PageElement):
def __getattr__(self, attr):
"For backwards compatibility, text.string gives you text"
if attr == 'string':
return self
else:
raise AttributeError, "'%s' object has no attribute '%s'" % (self.__class__.__name__, attr)
class NavigableString(str, NavigableText):
pass
class NavigableUnicodeString(unicode, NavigableText):
pass
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, name, attrs=None, parent=Null, previous=Null):
"Basic constructor."
self.name = name
if attrs == None:
attrs = []
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self._getAttrMap().get(key, default)
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self._getAttrMap()[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self._getAttrMap()
self.attrMap[key] = value
found = False
for i in range(0, len(self.attrs)):
if self.attrs[i][0] == key:
self.attrs[i] = (key, value)
found = True
if not found:
self.attrs.append((key, value))
self._getAttrMap()[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
for item in self.attrs:
if item[0] == key:
self.attrs.remove(item)
#We don't break because bad HTML can define the same
#attribute multiple times.
self._getAttrMap()
if self.attrMap.has_key(key):
del self.attrMap[key]
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
fetch() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return apply(self.fetch, args, kwargs)
def __getattr__(self, tag):
if len(tag) > 3 and tag.rfind('Tag') == len(tag)-3:
return self.first(tag[:-3])
elif tag.find('__') != 0:
return self.first(tag)
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag.
NOTE: right now this will return false if two tags have the
same attributes in a different order. Should this be fixed?"""
if not hasattr(other, 'name') or not hasattr(other, 'attrs') or not hasattr(other, 'contents') or self.name != other.name or self.attrs != other.attrs or len(self) != len(other):
return False
for i in range(0, len(self.contents)):
if self.contents[i] != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self):
"""Renders this tag as a string."""
return str(self)
def __unicode__(self):
return self.__str__(1)
def __str__(self, needUnicode=None, showStructureIndent=None):
"""Returns a string or Unicode representation of this tag and
its contents.
NOTE: since Python's HTML parser consumes whitespace, this
method is not certain to reproduce the whitespace present in
the original string."""
attrs = []
if self.attrs:
for key, val in self.attrs:
attrs.append('%s="%s"' % (key, val))
close = ''
closeTag = ''
if self.isSelfClosing():
close = ' /'
else:
closeTag = '</%s>' % self.name
indentIncrement = None
if showStructureIndent != None:
indentIncrement = showStructureIndent
if not self.hidden:
indentIncrement += 1
contents = self.renderContents(indentIncrement, needUnicode=needUnicode)
if showStructureIndent:
space = '\n%s' % (' ' * showStructureIndent)
if self.hidden:
s = contents
else:
s = []
attributeString = ''
if attrs:
attributeString = ' ' + ' '.join(attrs)
if showStructureIndent:
s.append(space)
s.append('<%s%s%s>' % (self.name, attributeString, close))
s.append(contents)
if closeTag and showStructureIndent != None:
s.append(space)
s.append(closeTag)
s = ''.join(s)
isUnicode = type(s) == types.UnicodeType
if needUnicode and not isUnicode:
s = unicode(s)
elif isUnicode and needUnicode==False:
s = str(s)
return s
def prettify(self, needUnicode=None):
return self.__str__(needUnicode, showStructureIndent=True)
def renderContents(self, showStructureIndent=None, needUnicode=None):
"""Renders the contents of this tag as a (possibly Unicode)
string."""
s=[]
for c in self:
text = None
if isinstance(c, NavigableUnicodeString) or type(c) == types.UnicodeType:
text = unicode(c)
elif isinstance(c, Tag):
s.append(c.__str__(needUnicode, showStructureIndent))
elif needUnicode:
text = unicode(c)
else:
text = str(c)
if text:
if showStructureIndent != None:
if text[-1] == '\n':
text = text[:-1]
s.append(text)
return ''.join(s)
#Soup methods
def firstText(self, text, recursive=True):
"""Convenience method to retrieve the first piece of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.first(recursive=recursive, text=text)
def fetchText(self, text, recursive=True, limit=None):
"""Convenience method to retrieve all pieces of text matching the
given criteria. 'text' can be a string, a regular expression object,
a callable that takes a string and returns whether or not the
string 'matches', etc."""
return self.fetch(recursive=recursive, text=text, limit=limit)
def first(self, name=None, attrs={}, recursive=True, text=None):
"""Return only the first child of this
Tag matching the given criteria."""
r = Null
l = self.fetch(name, attrs, recursive, text, 1)
if l:
r = l[0]
return r
findChild = first
def fetch(self, name=None, attrs={}, recursive=True, text=None,
limit=None):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.recursiveChildGenerator
if not recursive:
generator = self.childGenerator
return self._fetch(name, attrs, text, limit, generator)
fetchChildren = fetch
#Utility methods
def isSelfClosing(self):
"""Returns true iff this is a self-closing tag as defined in the HTML
standard.
TODO: This is specific to BeautifulSoup and its subclasses, but it's
used by __str__"""
return self.name in BeautifulSoup.SELF_CLOSING_TAGS
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.contents.append(tag)
#Private methods
def _getAttrMap(self):
"""Initializes a map representation of this tag's attributes,
if not already initialized."""
if not getattr(self, 'attrMap'):
self.attrMap = {}
for (key, value) in self.attrs:
self.attrMap[key] = value
return self.attrMap
#Generator methods
def childGenerator(self):
for i in range(0, len(self.contents)):
yield self.contents[i]
raise StopIteration
def recursiveChildGenerator(self):
stack = [(self, 0)]
while stack:
tag, start = stack.pop()
if isinstance(tag, Tag):
for i in range(start, len(tag.contents)):
a = tag.contents[i]
yield a
if isinstance(a, Tag) and tag.contents:
if i < len(tag.contents) - 1:
stack.append((tag, i+1))
stack.append((a, 0))
break
raise StopIteration
def isList(l):
"""Convenience method that works with all 2.x versions of Python
to determine whether or not something is listlike."""
return hasattr(l, '__iter__') \
or (type(l) in (types.ListType, types.TupleType))
def buildTagMap(default, *args):
"""Turns a list of maps, lists, or scalars into a single map.
Used to build the SELF_CLOSING_TAGS and NESTABLE_TAGS maps out
of lists and partial maps."""
built = {}
|
Almad/Mechanize
|
3bdda2aef8d72092fa46ca2a915aeeeb0b377746
|
Add convenience method Browser.open_local_file(filename)
|
diff --git a/functional_tests.py b/functional_tests.py
index 670c46c..93e02f9 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,413 +1,421 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
# XXX
# document twisted.web2 install (I forgot how I did it -- reinstall!)
# implement remaining stuff used by functional_tests.py
# in twisted-localserver.py:
# - 302 followed by 404 response
# - helper cgi script for cookies &c.
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
+ def test_open_local_file(self):
+ # Since the file: URL scheme is not well standardised, Browser has a
+ # special method to open files by name, for convenience:
+ br = mechanize.Browser()
+ response = br.open_local_file("mechanize/_mechanize.py")
+ self.assert_("def open_local_file(self, filename):" in
+ response.get_data())
+
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = urljoin(self.uri, "bits/referertest.html")
info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
Note not all the functional tests use the local server yet
-- some currently always access the internet regardless of
this option and the --uri option.
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/mechanize/_mechanize.py b/mechanize/_mechanize.py
index 09f3d39..8fa8cae 100644
--- a/mechanize/_mechanize.py
+++ b/mechanize/_mechanize.py
@@ -1,656 +1,670 @@
"""Stateful programmatic WWW navigation, after Perl's WWW::Mechanize.
Copyright 2003-2006 John J. Lee <[email protected]>
Copyright 2003 Andy Lester (original Perl code)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
-import urllib2, sys, copy, re
+import urllib2, sys, copy, re, os, urllib
+
from _useragent import UserAgentBase
from _html import DefaultFactory
import _response
import _request
import _rfc3986
__version__ = (0, 1, 8, "b", None) # 0.1.8b
class BrowserStateError(Exception): pass
class LinkNotFoundError(Exception): pass
class FormNotFoundError(Exception): pass
+def sanepathname2url(path):
+ urlpath = urllib.pathname2url(path)
+ if os.name == "nt" and urlpath.startswith("///"):
+ urlpath = urlpath[2:]
+ # XXX don't ask me about the mac...
+ return urlpath
+
+
class History:
"""
Though this will become public, the implied interface is not yet stable.
"""
def __init__(self):
self._history = [] # LIFO
def add(self, request, response):
self._history.append((request, response))
def back(self, n, _response):
response = _response # XXX move Browser._response into this class?
while n > 0 or response is None:
try:
request, response = self._history.pop()
except IndexError:
raise BrowserStateError("already at start of history")
n -= 1
return request, response
def clear(self):
del self._history[:]
def close(self):
for request, response in self._history:
if response is not None:
response.close()
del self._history[:]
class HTTPRefererProcessor(urllib2.BaseHandler):
def http_request(self, request):
# See RFC 2616 14.36. The only times we know the source of the
# request URI has a URI associated with it are redirect, and
# Browser.click() / Browser.submit() / Browser.follow_link().
# Otherwise, it's the user's job to add any Referer header before
# .open()ing.
if hasattr(request, "redirect_dict"):
request = self.parent._add_referer_header(
request, origin_request=False)
return request
https_request = http_request
class Browser(UserAgentBase):
"""Browser-like class with support for history, forms and links.
BrowserStateError is raised whenever the browser is in the wrong state to
complete the requested operation - eg., when .back() is called when the
browser history is empty, or when .follow_link() is called when the current
response does not contain HTML data.
Public attributes:
request: current request (mechanize.Request or urllib2.Request)
form: currently selected form (see .select_form())
"""
handler_classes = copy.copy(UserAgentBase.handler_classes)
handler_classes["_referer"] = HTTPRefererProcessor
default_features = copy.copy(UserAgentBase.default_features)
default_features.append("_referer")
def __init__(self,
factory=None,
history=None,
request_class=None,
):
"""
Only named arguments should be passed to this constructor.
factory: object implementing the mechanize.Factory interface.
history: object implementing the mechanize.History interface. Note
this interface is still experimental and may change in future.
request_class: Request class to use. Defaults to mechanize.Request
by default for Pythons older than 2.4, urllib2.Request otherwise.
The Factory and History objects passed in are 'owned' by the Browser,
so they should not be shared across Browsers. In particular,
factory.set_response() should not be called except by the owning
Browser itself.
Note that the supplied factory's request_class is overridden by this
constructor, to ensure only one Request class is used.
"""
self._handle_referer = True
if history is None:
history = History()
self._history = history
if request_class is None:
if not hasattr(urllib2.Request, "add_unredirected_header"):
request_class = _request.Request
else:
request_class = urllib2.Request # Python >= 2.4
if factory is None:
factory = DefaultFactory()
factory.set_request_class(request_class)
self._factory = factory
self.request_class = request_class
self.request = None
self._set_response(None, False)
# do this last to avoid __getattr__ problems
UserAgentBase.__init__(self)
def close(self):
UserAgentBase.close(self)
if self._response is not None:
self._response.close()
if self._history is not None:
self._history.close()
self._history = None
# make use after .close easy to spot
self.form = None
self.request = self._response = None
self.request = self.response = self.set_response = None
self.geturl = self.reload = self.back = None
self.clear_history = self.set_cookie = self.links = self.forms = None
self.viewing_html = self.encoding = self.title = None
self.select_form = self.click = self.submit = self.click_link = None
self.follow_link = self.find_link = None
def set_handle_referer(self, handle):
"""Set whether to add Referer header to each request.
This base class does not implement this feature (so don't turn this on
if you're using this base class directly), but the subclass
mechanize.Browser does.
"""
self._set_handler("_referer", handle)
self._handle_referer = bool(handle)
def _add_referer_header(self, request, origin_request=True):
if self.request is None:
return request
scheme = request.get_type()
original_scheme = self.request.get_type()
if scheme not in ["http", "https"]:
return request
if not origin_request and not self.request.has_header("Referer"):
return request
if (self._handle_referer and
original_scheme in ["http", "https"] and
not (original_scheme == "https" and scheme != "https")):
# strip URL fragment (RFC 2616 14.36)
parts = _rfc3986.urlsplit(self.request.get_full_url())
parts = parts[:-1]+(None,)
referer = _rfc3986.urlunsplit(parts)
request.add_unredirected_header("Referer", referer)
return request
def open_novisit(self, url, data=None):
"""Open a URL without visiting it.
The browser state (including .request, .response(), history, forms and
links) are all left unchanged by calling this function.
The interface is the same as for .open().
This is useful for things like fetching images.
See also .retrieve().
"""
return self._mech_open(url, data, visit=False)
def open(self, url, data=None):
return self._mech_open(url, data)
def _mech_open(self, url, data=None, update_history=True, visit=None):
try:
url.get_full_url
except AttributeError:
# string URL -- convert to absolute URL if required
scheme, authority = _rfc3986.urlsplit(url)[:2]
if scheme is None:
# relative URL
if self._response is None:
raise BrowserStateError(
"can't fetch relative reference: "
"not viewing any document")
url = _rfc3986.urljoin(self._response.geturl(), url)
request = self._request(url, data, visit)
visit = request.visit
if visit is None:
visit = True
if visit:
self._visit_request(request, update_history)
success = True
try:
response = UserAgentBase.open(self, request, data)
except urllib2.HTTPError, error:
success = False
if error.fp is None: # not a response
raise
response = error
## except (IOError, socket.error, OSError), error:
## # Yes, urllib2 really does raise all these :-((
## # See test_urllib2.py for examples of socket.gaierror and OSError,
## # plus note that FTPHandler raises IOError.
## # XXX I don't seem to have an example of exactly socket.error being
## # raised, only socket.gaierror...
## # I don't want to start fixing these here, though, since this is a
## # subclass of OpenerDirector, and it would break old code. Even in
## # Python core, a fix would need some backwards-compat. hack to be
## # acceptable.
## raise
if visit:
self._set_response(response, False)
response = copy.copy(self._response)
elif response is not None:
response = _response.upgrade_response(response)
if not success:
raise response
return response
def __str__(self):
text = []
text.append("<%s " % self.__class__.__name__)
if self._response:
text.append("visiting %s" % self._response.geturl())
else:
text.append("(not visiting a URL)")
if self.form:
text.append("\n selected form:\n %s\n" % str(self.form))
text.append(">")
return "".join(text)
def response(self):
"""Return a copy of the current response.
The returned object has the same interface as the object returned by
.open() (or urllib2.urlopen()).
"""
return copy.copy(self._response)
+ def open_local_file(self, filename):
+ path = sanepathname2url(os.path.abspath(filename))
+ url = 'file://'+path
+ return self.open(url)
+
def set_response(self, response):
"""Replace current response with (a copy of) response.
response may be None.
This is intended mostly for HTML-preprocessing.
"""
self._set_response(response, True)
def _set_response(self, response, close_current):
# sanity check, necessary but far from sufficient
if not (response is None or
(hasattr(response, "info") and hasattr(response, "geturl") and
hasattr(response, "read")
)
):
raise ValueError("not a response object")
self.form = None
if response is not None:
response = _response.upgrade_response(response)
if close_current and self._response is not None:
self._response.close()
self._response = response
self._factory.set_response(response)
def visit_response(self, response, request=None):
"""Visit the response, as if it had been .open()ed.
Unlike .set_response(), this updates history rather than replacing the
current response.
"""
if request is None:
request = _request.Request(response.geturl())
self._visit_request(request, True)
self._set_response(response, False)
def _visit_request(self, request, update_history):
if self._response is not None:
self._response.close()
if self.request is not None and update_history:
self._history.add(self.request, self._response)
self._response = None
# we want self.request to be assigned even if UserAgentBase.open
# fails
self.request = request
def geturl(self):
"""Get URL of current document."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._response.geturl()
def reload(self):
"""Reload current document, and return response object."""
if self.request is None:
raise BrowserStateError("no URL has yet been .open()ed")
if self._response is not None:
self._response.close()
return self._mech_open(self.request, update_history=False)
def back(self, n=1):
"""Go back n steps in history, and return response object.
n: go back this number of steps (default 1 step)
"""
if self._response is not None:
self._response.close()
self.request, response = self._history.back(n, self._response)
self.set_response(response)
if not response.read_complete:
return self.reload()
return copy.copy(response)
def clear_history(self):
self._history.clear()
def set_cookie(self, cookie_string):
"""Request to set a cookie.
Note that it is NOT necessary to call this method under ordinary
circumstances: cookie handling is normally entirely automatic. The
intended use case is rather to simulate the setting of a cookie by
client script in a web page (e.g. JavaScript). In that case, use of
this method is necessary because mechanize currently does not support
JavaScript, VBScript, etc.
The cookie is added in the same way as if it had arrived with the
current response, as a result of the current request. This means that,
for example, it is not appropriate to set the cookie based on the
current request, no cookie will be set.
The cookie will be returned automatically with subsequent responses
made by the Browser instance whenever that's appropriate.
cookie_string should be a valid value of the Set-Cookie header.
For example:
browser.set_cookie(
"sid=abcdef; expires=Wednesday, 09-Nov-06 23:12:40 GMT")
Currently, this method does not allow for adding RFC 2986 cookies.
This limitation will be lifted if anybody requests it.
"""
if self._response is None:
raise BrowserStateError("not viewing any document")
if self.request.get_type() not in ["http", "https"]:
raise BrowserStateError("can't set cookie for non-HTTP/HTTPS "
"transactions")
cookiejar = self._ua_handlers["_cookies"].cookiejar
response = self.response() # copy
headers = response.info()
headers["Set-cookie"] = cookie_string
cookiejar.extract_cookies(response, self.request)
def links(self, **kwds):
"""Return iterable over links (mechanize.Link objects)."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
links = self._factory.links()
if kwds:
return self._filter_links(links, **kwds)
else:
return links
def forms(self):
"""Return iterable over forms.
The returned form objects implement the ClientForm.HTMLForm interface.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.forms()
def global_form(self):
"""Return the global form object, or None if the factory implementation
did not supply one.
The "global" form object contains all controls that are not descendants of
any FORM element.
The returned form object implements the ClientForm.HTMLForm interface.
This is a separate method since the global form is not regarded as part
of the sequence of forms in the document -- mostly for
backwards-compatibility.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.global_form
def viewing_html(self):
"""Return whether the current response contains HTML data."""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.is_html
def encoding(self):
""""""
if self._response is None:
raise BrowserStateError("not viewing any document")
return self._factory.encoding
def title(self):
"""Return title, or None if there is no title element in the document.
Tags are stripped or textified as described in docs for
PullParser.get_text() method of pullparser module.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
return self._factory.title
def select_form(self, name=None, predicate=None, nr=None):
"""Select an HTML form for input.
This is a bit like giving a form the "input focus" in a browser.
If a form is selected, the Browser object supports the HTMLForm
interface, so you can call methods like .set_value(), .set(), and
.click().
Another way to select a form is to assign to the .form attribute. The
form assigned should be one of the objects returned by the .forms()
method.
At least one of the name, predicate and nr arguments must be supplied.
If no matching form is found, mechanize.FormNotFoundError is raised.
If name is specified, then the form must have the indicated name.
If predicate is specified, then the form must match that function. The
predicate function is passed the HTMLForm as its single argument, and
should return a boolean value indicating whether the form matched.
nr, if supplied, is the sequence number of the form (where 0 is the
first). Note that control 0 is the first form matching all the other
arguments (if supplied); it is not necessarily the first control in the
form.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if (name is None) and (predicate is None) and (nr is None):
raise ValueError(
"at least one argument must be supplied to specify form")
orig_nr = nr
for form in self.forms():
if name is not None and name != form.name:
continue
if predicate is not None and not predicate(form):
continue
if nr:
nr -= 1
continue
self.form = form
break # success
else:
# failure
description = []
if name is not None: description.append("name '%s'" % name)
if predicate is not None:
description.append("predicate %s" % predicate)
if orig_nr is not None: description.append("nr %d" % orig_nr)
description = ", ".join(description)
raise FormNotFoundError("no form matching "+description)
def click(self, *args, **kwds):
"""See ClientForm.HTMLForm.click for documentation."""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
request = self.form.click(*args, **kwds)
return self._add_referer_header(request)
def submit(self, *args, **kwds):
"""Submit current form.
Arguments are as for ClientForm.HTMLForm.click().
Return value is same as for Browser.open().
"""
return self.open(self.click(*args, **kwds))
def click_link(self, link=None, **kwds):
"""Find a link and return a Request object for it.
Arguments are as for .find_link(), except that a link may be supplied
as the first argument.
"""
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
if not link:
link = self.find_link(**kwds)
else:
if kwds:
raise ValueError(
"either pass a Link, or keyword arguments, not both")
request = self.request_class(link.absolute_url)
return self._add_referer_header(request)
def follow_link(self, link=None, **kwds):
"""Find a link and .open() it.
Arguments are as for .click_link().
Return value is same as for Browser.open().
"""
return self.open(self.click_link(link, **kwds))
def find_link(self, **kwds):
"""Find a link in current page.
Links are returned as mechanize.Link objects.
# Return third link that .search()-matches the regexp "python"
# (by ".search()-matches", I mean that the regular expression method
# .search() is used, rather than .match()).
find_link(text_regex=re.compile("python"), nr=2)
# Return first http link in the current page that points to somewhere
# on python.org whose link text (after tags have been removed) is
# exactly "monty python".
find_link(text="monty python",
url_regex=re.compile("http.*python.org"))
# Return first link with exactly three HTML attributes.
find_link(predicate=lambda link: len(link.attrs) == 3)
Links include anchors (<a>), image maps (<area>), and frames (<frame>,
<iframe>).
All arguments must be passed by keyword, not position. Zero or more
arguments may be supplied. In order to find a link, all arguments
supplied must match.
If a matching link is not found, mechanize.LinkNotFoundError is raised.
text: link text between link tags: eg. <a href="blah">this bit</a> (as
returned by pullparser.get_compressed_text(), ie. without tags but
with opening tags "textified" as per the pullparser docs) must compare
equal to this argument, if supplied
text_regex: link text between tag (as defined above) must match the
regular expression object or regular expression string passed as this
argument, if supplied
name, name_regex: as for text and text_regex, but matched against the
name HTML attribute of the link tag
url, url_regex: as for text and text_regex, but matched against the
URL of the link tag (note this matches against Link.url, which is a
relative or absolute URL according to how it was written in the HTML)
tag: element name of opening tag, eg. "a"
predicate: a function taking a Link object as its single argument,
returning a boolean result, indicating whether the links
nr: matches the nth link that matches all other criteria (default 0)
"""
try:
return self._filter_links(self._factory.links(), **kwds).next()
except StopIteration:
raise LinkNotFoundError()
def __getattr__(self, name):
# pass through ClientForm / DOMForm methods and attributes
form = self.__dict__.get("form")
if form is None:
raise AttributeError(
"%s instance has no attribute %s (perhaps you forgot to "
".select_form()?)" % (self.__class__, name))
return getattr(form, name)
def _filter_links(self, links,
text=None, text_regex=None,
name=None, name_regex=None,
url=None, url_regex=None,
tag=None,
predicate=None,
nr=0
):
if not self.viewing_html():
raise BrowserStateError("not viewing HTML")
found_links = []
orig_nr = nr
for link in links:
if url is not None and url != link.url:
continue
if url_regex is not None and not re.search(url_regex, link.url):
continue
if (text is not None and
(link.text is None or text != link.text)):
continue
if (text_regex is not None and
(link.text is None or not re.search(text_regex, link.text))):
continue
if name is not None and name != dict(link.attrs).get("name"):
continue
if name_regex is not None:
link_name = dict(link.attrs).get("name")
if link_name is None or not re.search(name_regex, link_name):
continue
if tag is not None and tag != link.tag:
continue
if predicate is not None and not predicate(link):
continue
if nr:
nr -= 1
continue
yield link
nr = orig_nr
|
Almad/Mechanize
|
6c88e95464785acd7ff454ee225b03a35bc823bc
|
Fix test failure
|
diff --git a/test/test_useragent.py b/test/test_useragent.py
index 387a4bc..3e8e325 100644
--- a/test/test_useragent.py
+++ b/test/test_useragent.py
@@ -1,58 +1,58 @@
#!/usr/bin/env python
from unittest import TestCase
import mechanize
from test_browser import make_mock_handler
class UserAgentTests(TestCase):
def test_set_handled_schemes(self):
class MockHandlerClass(make_mock_handler()):
def __call__(self): return self
class BlahHandlerClass(MockHandlerClass): pass
class BlahProcessorClass(MockHandlerClass): pass
BlahHandler = BlahHandlerClass([("blah_open", None)])
BlahProcessor = BlahProcessorClass([("blah_request", None)])
class TestUserAgent(mechanize.UserAgent):
default_others = []
default_features = []
handler_classes = mechanize.UserAgent.handler_classes.copy()
handler_classes.update(
{"blah": BlahHandler, "_blah": BlahProcessor})
ua = TestUserAgent()
- self.assertEqual(len(ua.handlers), 5)
+ self.assertEqual(len(ua.handlers), 4)
ua.set_handled_schemes(["http", "https"])
self.assertEqual(len(ua.handlers), 2)
self.assertRaises(ValueError,
ua.set_handled_schemes, ["blah", "non-existent"])
self.assertRaises(ValueError,
ua.set_handled_schemes, ["blah", "_blah"])
ua.set_handled_schemes(["blah"])
req = mechanize.Request("blah://example.com/")
r = ua.open(req)
exp_calls = [("blah_open", (req,), {})]
assert len(ua.calls) == len(exp_calls)
for got, expect in zip(ua.calls, exp_calls):
self.assertEqual(expect, got[1:])
ua.calls = []
req = mechanize.Request("blah://example.com/")
ua._set_handler("_blah", True)
r = ua.open(req)
exp_calls = [
("blah_request", (req,), {}),
("blah_open", (req,), {})]
assert len(ua.calls) == len(exp_calls)
for got, expect in zip(ua.calls, exp_calls):
self.assertEqual(expect, got[1:])
ua._set_handler("_blah", True)
if __name__ == "__main__":
import unittest
unittest.main()
|
Almad/Mechanize
|
16e9a3beb31a268aab241cfa3b1981862f4f1aa2
|
Remove gopher support, since Python 2.6 no longer supports it ([email protected])
|
diff --git a/mechanize/__init__.py b/mechanize/__init__.py
index 8bea889..983ca7b 100644
--- a/mechanize/__init__.py
+++ b/mechanize/__init__.py
@@ -1,125 +1,123 @@
__all__ = [
'AbstractBasicAuthHandler',
'AbstractDigestAuthHandler',
'BaseHandler',
'Browser',
'BrowserStateError',
'CacheFTPHandler',
'ContentTooShortError',
'Cookie',
'CookieJar',
'CookiePolicy',
'DefaultCookiePolicy',
'DefaultFactory',
'FTPHandler',
'Factory',
'FileCookieJar',
'FileHandler',
'FormNotFoundError',
'FormsFactory',
- 'GopherError',
- 'GopherHandler',
'HTTPBasicAuthHandler',
'HTTPCookieProcessor',
'HTTPDefaultErrorHandler',
'HTTPDigestAuthHandler',
'HTTPEquivProcessor',
'HTTPError',
'HTTPErrorProcessor',
'HTTPHandler',
'HTTPPasswordMgr',
'HTTPPasswordMgrWithDefaultRealm',
'HTTPProxyPasswordMgr',
'HTTPRedirectDebugProcessor',
'HTTPRedirectHandler',
'HTTPRefererProcessor',
'HTTPRefreshProcessor',
'HTTPRequestUpgradeProcessor',
'HTTPResponseDebugProcessor',
'HTTPRobotRulesProcessor',
'HTTPSClientCertMgr',
'HTTPSHandler',
'HeadParser',
'History',
'LWPCookieJar',
'Link',
'LinkNotFoundError',
'LinksFactory',
'LoadError',
'MSIECookieJar',
'MozillaCookieJar',
'OpenerDirector',
'OpenerFactory',
'ParseError',
'ProxyBasicAuthHandler',
'ProxyDigestAuthHandler',
'ProxyHandler',
'Request',
'ResponseUpgradeProcessor',
'RobotExclusionError',
'RobustFactory',
'RobustFormsFactory',
'RobustLinksFactory',
'RobustTitleFactory',
'SeekableProcessor',
'SeekableResponseOpener',
'TitleFactory',
'URLError',
'USE_BARE_EXCEPT',
'UnknownHandler',
'UserAgent',
'UserAgentBase',
'XHTMLCompatibleHeadParser',
'__version__',
'build_opener',
'install_opener',
'lwp_cookie_str',
'make_response',
'request_host',
'response_seek_wrapper', # XXX deprecate in public interface?
'seek_wrapped_response' # XXX should probably use this internally in place of response_seek_wrapper()
'str2time',
'urlopen',
'urlretrieve']
from _mechanize import __version__
# high-level stateful browser-style interface
from _mechanize import \
Browser, History, \
BrowserStateError, LinkNotFoundError, FormNotFoundError
# configurable URL-opener interface
from _useragent import UserAgentBase, UserAgent
from _html import \
ParseError, \
Link, \
Factory, DefaultFactory, RobustFactory, \
FormsFactory, LinksFactory, TitleFactory, \
RobustFormsFactory, RobustLinksFactory, RobustTitleFactory
# urllib2 work-alike interface (part from mechanize, part from urllib2)
# This is a superset of the urllib2 interface.
from _urllib2 import *
# misc
from _opener import ContentTooShortError, OpenerFactory, urlretrieve
from _util import http2time as str2time
from _response import \
response_seek_wrapper, seek_wrapped_response, make_response
from _http import HeadParser
try:
from _http import XHTMLCompatibleHeadParser
except ImportError:
pass
# cookies
from _clientcookie import Cookie, CookiePolicy, DefaultCookiePolicy, \
CookieJar, FileCookieJar, LoadError, request_host
from _lwpcookiejar import LWPCookieJar, lwp_cookie_str
from _mozillacookiejar import MozillaCookieJar
from _msiecookiejar import MSIECookieJar
# If you hate the idea of turning bugs into warnings, do:
# import mechanize; mechanize.USE_BARE_EXCEPT = False
USE_BARE_EXCEPT = True
diff --git a/mechanize/_urllib2.py b/mechanize/_urllib2.py
index d35df21..8020f3b 100644
--- a/mechanize/_urllib2.py
+++ b/mechanize/_urllib2.py
@@ -1,62 +1,60 @@
# urllib2 work-alike interface
# ...from urllib2...
from urllib2 import \
URLError, \
- HTTPError, \
- GopherError
+ HTTPError
# ...and from mechanize
from _opener import OpenerDirector, \
SeekableResponseOpener, \
build_opener, install_opener, urlopen
from _auth import \
HTTPPasswordMgr, \
HTTPPasswordMgrWithDefaultRealm, \
AbstractBasicAuthHandler, \
AbstractDigestAuthHandler, \
HTTPProxyPasswordMgr, \
ProxyHandler, \
ProxyBasicAuthHandler, \
ProxyDigestAuthHandler, \
HTTPBasicAuthHandler, \
HTTPDigestAuthHandler, \
HTTPSClientCertMgr
from _request import \
Request
from _http import \
RobotExclusionError
# handlers...
# ...from urllib2...
from urllib2 import \
BaseHandler, \
UnknownHandler, \
FTPHandler, \
CacheFTPHandler, \
- FileHandler, \
- GopherHandler
+ FileHandler
# ...and from mechanize
from _http import \
HTTPHandler, \
HTTPDefaultErrorHandler, \
HTTPRedirectHandler, \
HTTPEquivProcessor, \
HTTPCookieProcessor, \
HTTPRefererProcessor, \
HTTPRefreshProcessor, \
HTTPErrorProcessor, \
HTTPRobotRulesProcessor
from _upgrade import \
HTTPRequestUpgradeProcessor, \
ResponseUpgradeProcessor
from _debug import \
HTTPResponseDebugProcessor, \
HTTPRedirectDebugProcessor
from _seek import \
SeekableProcessor
# crap ATM
## from _gzip import \
## HTTPGzipProcessor
import httplib
if hasattr(httplib, 'HTTPS'):
from _http import HTTPSHandler
del httplib
diff --git a/mechanize/_useragent.py b/mechanize/_useragent.py
index a6d5769..272605c 100644
--- a/mechanize/_useragent.py
+++ b/mechanize/_useragent.py
@@ -1,348 +1,347 @@
"""Convenient HTTP UserAgent class.
This is a subclass of urllib2.OpenerDirector.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import sys, warnings, urllib2
import _opener
import _urllib2
import _auth
import _gzip
import _response
class UserAgentBase(_opener.OpenerDirector):
"""Convenient user-agent class.
Do not use .add_handler() to add a handler for something already dealt with
by this code.
The only reason at present for the distinction between UserAgent and
UserAgentBase is so that classes that depend on .seek()able responses
(e.g. mechanize.Browser) can inherit from UserAgentBase. The subclass
UserAgent exposes a .set_seekable_responses() method that allows switching
off the adding of a .seek() method to responses.
Public attributes:
addheaders: list of (name, value) pairs specifying headers to send with
every request, unless they are overridden in the Request instance.
>>> ua = UserAgentBase()
>>> ua.addheaders = [
... ("User-agent", "Mozilla/5.0 (compatible)"),
... ("From", "[email protected]")]
"""
handler_classes = {
# scheme handlers
"http": _urllib2.HTTPHandler,
# CacheFTPHandler is buggy, at least in 2.3, so we don't use it
"ftp": _urllib2.FTPHandler,
"file": _urllib2.FileHandler,
- "gopher": _urllib2.GopherHandler,
# other handlers
"_unknown": _urllib2.UnknownHandler,
# HTTP{S,}Handler depend on HTTPErrorProcessor too
"_http_error": _urllib2.HTTPErrorProcessor,
"_http_request_upgrade": _urllib2.HTTPRequestUpgradeProcessor,
"_http_default_error": _urllib2.HTTPDefaultErrorHandler,
# feature handlers
"_basicauth": _urllib2.HTTPBasicAuthHandler,
"_digestauth": _urllib2.HTTPDigestAuthHandler,
"_redirect": _urllib2.HTTPRedirectHandler,
"_cookies": _urllib2.HTTPCookieProcessor,
"_refresh": _urllib2.HTTPRefreshProcessor,
"_equiv": _urllib2.HTTPEquivProcessor,
"_proxy": _urllib2.ProxyHandler,
"_proxy_basicauth": _urllib2.ProxyBasicAuthHandler,
"_proxy_digestauth": _urllib2.ProxyDigestAuthHandler,
"_robots": _urllib2.HTTPRobotRulesProcessor,
"_gzip": _gzip.HTTPGzipProcessor, # experimental!
# debug handlers
"_debug_redirect": _urllib2.HTTPRedirectDebugProcessor,
"_debug_response_body": _urllib2.HTTPResponseDebugProcessor,
}
- default_schemes = ["http", "ftp", "file", "gopher"]
+ default_schemes = ["http", "ftp", "file"]
default_others = ["_unknown", "_http_error", "_http_request_upgrade",
"_http_default_error",
]
default_features = ["_redirect", "_cookies",
"_refresh", "_equiv",
"_basicauth", "_digestauth",
"_proxy", "_proxy_basicauth", "_proxy_digestauth",
"_robots",
]
if hasattr(_urllib2, 'HTTPSHandler'):
handler_classes["https"] = _urllib2.HTTPSHandler
default_schemes.append("https")
def __init__(self):
_opener.OpenerDirector.__init__(self)
ua_handlers = self._ua_handlers = {}
for scheme in (self.default_schemes+
self.default_others+
self.default_features):
klass = self.handler_classes[scheme]
ua_handlers[scheme] = klass()
for handler in ua_handlers.itervalues():
self.add_handler(handler)
# Yuck.
# Ensure correct default constructor args were passed to
# HTTPRefreshProcessor and HTTPEquivProcessor.
if "_refresh" in ua_handlers:
self.set_handle_refresh(True)
if "_equiv" in ua_handlers:
self.set_handle_equiv(True)
# Ensure default password managers are installed.
pm = ppm = None
if "_basicauth" in ua_handlers or "_digestauth" in ua_handlers:
pm = _urllib2.HTTPPasswordMgrWithDefaultRealm()
if ("_proxy_basicauth" in ua_handlers or
"_proxy_digestauth" in ua_handlers):
ppm = _auth.HTTPProxyPasswordMgr()
self.set_password_manager(pm)
self.set_proxy_password_manager(ppm)
# set default certificate manager
if "https" in ua_handlers:
cm = _urllib2.HTTPSClientCertMgr()
self.set_client_cert_manager(cm)
def close(self):
_opener.OpenerDirector.close(self)
self._ua_handlers = None
# XXX
## def set_timeout(self, timeout):
## self._timeout = timeout
## def set_http_connection_cache(self, conn_cache):
## self._http_conn_cache = conn_cache
## def set_ftp_connection_cache(self, conn_cache):
## # XXX ATM, FTP has cache as part of handler; should it be separate?
## self._ftp_conn_cache = conn_cache
def set_handled_schemes(self, schemes):
"""Set sequence of URL scheme (protocol) strings.
For example: ua.set_handled_schemes(["http", "ftp"])
If this fails (with ValueError) because you've passed an unknown
scheme, the set of handled schemes will not be changed.
"""
want = {}
for scheme in schemes:
if scheme.startswith("_"):
raise ValueError("not a scheme '%s'" % scheme)
if scheme not in self.handler_classes:
raise ValueError("unknown scheme '%s'")
want[scheme] = None
# get rid of scheme handlers we don't want
for scheme, oldhandler in self._ua_handlers.items():
if scheme.startswith("_"): continue # not a scheme handler
if scheme not in want:
self._replace_handler(scheme, None)
else:
del want[scheme] # already got it
# add the scheme handlers that are missing
for scheme in want.keys():
self._set_handler(scheme, True)
def set_cookiejar(self, cookiejar):
"""Set a mechanize.CookieJar, or None."""
self._set_handler("_cookies", obj=cookiejar)
# XXX could use Greg Stein's httpx for some of this instead?
# or httplib2??
def set_proxies(self, proxies):
"""Set a dictionary mapping URL scheme to proxy specification, or None.
e.g. {"http": "joe:[email protected]:3128",
"ftp": "proxy.example.com"}
"""
self._set_handler("_proxy", obj=proxies)
def add_password(self, url, user, password, realm=None):
self._password_manager.add_password(realm, url, user, password)
def add_proxy_password(self, user, password, hostport=None, realm=None):
self._proxy_password_manager.add_password(
realm, hostport, user, password)
def add_client_certificate(self, url, key_file, cert_file):
"""Add an SSL client certificate, for HTTPS client auth.
key_file and cert_file must be filenames of the key and certificate
files, in PEM format. You can use e.g. OpenSSL to convert a p12 (PKCS
12) file to PEM format:
openssl pkcs12 -clcerts -nokeys -in cert.p12 -out cert.pem
openssl pkcs12 -nocerts -in cert.p12 -out key.pem
Note that client certificate password input is very inflexible ATM. At
the moment this seems to be console only, which is presumably the
default behaviour of libopenssl. In future mechanize may support
third-party libraries that (I assume) allow more options here.
"""
self._client_cert_manager.add_key_cert(url, key_file, cert_file)
# the following are rarely useful -- use add_password / add_proxy_password
# instead
def set_password_manager(self, password_manager):
"""Set a mechanize.HTTPPasswordMgrWithDefaultRealm, or None."""
self._password_manager = password_manager
self._set_handler("_basicauth", obj=password_manager)
self._set_handler("_digestauth", obj=password_manager)
def set_proxy_password_manager(self, password_manager):
"""Set a mechanize.HTTPProxyPasswordMgr, or None."""
self._proxy_password_manager = password_manager
self._set_handler("_proxy_basicauth", obj=password_manager)
self._set_handler("_proxy_digestauth", obj=password_manager)
def set_client_cert_manager(self, cert_manager):
"""Set a mechanize.HTTPClientCertMgr, or None."""
self._client_cert_manager = cert_manager
handler = self._ua_handlers["https"]
handler.client_cert_manager = cert_manager
# these methods all take a boolean parameter
def set_handle_robots(self, handle):
"""Set whether to observe rules from robots.txt."""
self._set_handler("_robots", handle)
def set_handle_redirect(self, handle):
"""Set whether to handle HTTP 30x redirections."""
self._set_handler("_redirect", handle)
def set_handle_refresh(self, handle, max_time=None, honor_time=True):
"""Set whether to handle HTTP Refresh headers."""
self._set_handler("_refresh", handle, constructor_kwds=
{"max_time": max_time, "honor_time": honor_time})
def set_handle_equiv(self, handle, head_parser_class=None):
"""Set whether to treat HTML http-equiv headers like HTTP headers.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
if head_parser_class is not None:
constructor_kwds = {"head_parser_class": head_parser_class}
else:
constructor_kwds={}
self._set_handler("_equiv", handle, constructor_kwds=constructor_kwds)
def set_handle_gzip(self, handle):
"""Handle gzip transfer encoding.
"""
if handle:
warnings.warn(
"gzip transfer encoding is experimental!", stacklevel=2)
self._set_handler("_gzip", handle)
def set_debug_redirects(self, handle):
"""Log information about HTTP redirects (including refreshes).
Logging is performed using module logging. The logger name is
"mechanize.http_redirects". To actually print some debug output,
eg:
import sys, logging
logger = logging.getLogger("mechanize.http_redirects")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
Other logger names relevant to this module:
"mechanize.http_responses"
"mechanize.cookies" (or "cookielib" if running Python 2.4)
To turn on everything:
import sys, logging
logger = logging.getLogger("mechanize")
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
"""
self._set_handler("_debug_redirect", handle)
def set_debug_responses(self, handle):
"""Log HTTP response bodies.
See docstring for .set_debug_redirects() for details of logging.
Response objects may be .seek()able if this is set (currently returned
responses are, raised HTTPError exception responses are not).
"""
self._set_handler("_debug_response_body", handle)
def set_debug_http(self, handle):
"""Print HTTP headers to sys.stdout."""
level = int(bool(handle))
for scheme in "http", "https":
h = self._ua_handlers.get(scheme)
if h is not None:
h.set_http_debuglevel(level)
def _set_handler(self, name, handle=None, obj=None,
constructor_args=(), constructor_kwds={}):
if handle is None:
handle = obj is not None
if handle:
handler_class = self.handler_classes[name]
if obj is not None:
newhandler = handler_class(obj)
else:
newhandler = handler_class(*constructor_args, **constructor_kwds)
else:
newhandler = None
self._replace_handler(name, newhandler)
def _replace_handler(self, name, newhandler=None):
# first, if handler was previously added, remove it
if name is not None:
handler = self._ua_handlers.get(name)
if handler:
try:
self.handlers.remove(handler)
except ValueError:
pass
# then add the replacement, if any
if newhandler is not None:
self.add_handler(newhandler)
self._ua_handlers[name] = newhandler
class UserAgent(UserAgentBase):
def __init__(self):
UserAgentBase.__init__(self)
self._seekable = False
def set_seekable_responses(self, handle):
"""Make response objects .seek()able."""
self._seekable = bool(handle)
def open(self, fullurl, data=None):
if self._seekable:
def bound_open(fullurl, data=None):
return UserAgentBase.open(self, fullurl, data)
response = _opener.wrapped_open(
bound_open, _response.seek_wrapped_response, fullurl, data)
else:
response = UserAgentBase.open(self, fullurl, data)
return response
|
Almad/Mechanize
|
a662cea1e66a200fe9c3972d897e1542c63b4027
|
Clean up local server imports
|
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index 6bf8be3..8ca8a66 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,124 +1,124 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
You need Twisted XXX version to run it:
XXX installation instructions
"""
-import os, sys, re, time
-from twisted.web2 import server, http, resource, channel, \
- static, http_headers, responsecode, twcgi
+import sys, re
+from twisted.web2 import server, http, resource, channel, \
+ http_headers, responsecode, twcgi
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
REFERER_TEST_HTML = """\
<html>
<head>
<title>mechanize Referer (sic) test page</title>
</head>
<body>
<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
</body>
</html>"""
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
def _make_page(parent, name, text,
content_type="text/html",
leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
page.addSlash = not leaf
setattr(parent, "child_"+name, page)
return page
def make_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=False)
def make_leaf_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=True)
def make_redirect(parent, name, location_relative_ref):
redirect = resource.RedirectResource(path=location_relative_ref)
setattr(parent, "child_"+name, redirect)
return redirect
def make_cgi_bin(parent, name, dir_name):
cgi_bin = twcgi.CGIDirectory(dir_name)
setattr(parent, "child_"+name, cgi_bin)
return cgi_bin
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
make_leaf_page(root, "robots.txt",
"User-Agent: *\nDisallow: /norobots",
"text/plain")
make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
make_leaf_page(bits, "cctest2.txt",
"Hello ClientCookie functional test suite.",
"text/plain")
make_leaf_page(bits, "referertest.html", REFERER_TEST_HTML)
make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
make_redirect(root, "redirected", "/doesnotexist")
make_cgi_bin(root, "cgi-bin", "test-tools")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
c3f5c3bbd40adf5ce514b6bc232e4328dc1bff69
|
Move cookietest.cgi from examples into test-tools
|
diff --git a/examples/cookietest.cgi b/test-tools/cookietest.cgi
similarity index 91%
rename from examples/cookietest.cgi
rename to test-tools/cookietest.cgi
index 4daf737..b66d20c 100755
--- a/examples/cookietest.cgi
+++ b/test-tools/cookietest.cgi
@@ -1,43 +1,42 @@
#!/usr/bin/python
# -*-python-*-
-# The copy of this script that lives at wwwsearch.sf.net is used by the
-# mechanize functional tests.
+# This is used by functional_tests.py
print "Content-Type: text/html"
print "Set-Cookie: foo=bar\n"
import sys, os, string, cgi, Cookie
from types import ListType
print "<html><head><title>Cookies and form submission parameters</title>"
cookie = Cookie.SimpleCookie()
cookieHdr = os.environ.get("HTTP_COOKIE", "")
cookie.load(cookieHdr)
if not cookie.has_key("foo"):
print '<meta http-equiv="refresh" content="5">'
print "</head>"
print "<p>Received cookies:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_COOKIE", ""))
print "</pre>"
if cookie.has_key("foo"):
print "Your browser supports cookies!"
print "<p>Referer:</p>"
print "<pre>"
print cgi.escape(os.environ.get("HTTP_REFERER", ""))
print "</pre>"
form = cgi.FieldStorage()
print "<p>Received parameters:</p>"
print "<pre>"
for k in form.keys():
v = form[k]
if isinstance(v, ListType):
vs = []
for item in v:
vs.append(item.value)
text = string.join(vs, ", ")
else:
text = v.value
print "%s: %s" % (cgi.escape(k), cgi.escape(text))
print "</pre></html>"
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index f7a305a..6bf8be3 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,124 +1,124 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
You need Twisted XXX version to run it:
XXX installation instructions
"""
import os, sys, re, time
from twisted.web2 import server, http, resource, channel, \
static, http_headers, responsecode, twcgi
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
REFERER_TEST_HTML = """\
<html>
<head>
<title>mechanize Referer (sic) test page</title>
</head>
<body>
<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
</body>
</html>"""
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
def _make_page(parent, name, text,
content_type="text/html",
leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
page.addSlash = not leaf
setattr(parent, "child_"+name, page)
return page
def make_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=False)
def make_leaf_page(parent, name, text,
content_type="text/html"):
return _make_page(parent, name, text, content_type, leaf=True)
def make_redirect(parent, name, location_relative_ref):
redirect = resource.RedirectResource(path=location_relative_ref)
setattr(parent, "child_"+name, redirect)
return redirect
def make_cgi_bin(parent, name, dir_name):
cgi_bin = twcgi.CGIDirectory(dir_name)
setattr(parent, "child_"+name, cgi_bin)
return cgi_bin
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
make_leaf_page(root, "robots.txt",
"User-Agent: *\nDisallow: /norobots",
"text/plain")
make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
make_leaf_page(bits, "cctest2.txt",
"Hello ClientCookie functional test suite.",
"text/plain")
make_leaf_page(bits, "referertest.html", REFERER_TEST_HTML)
make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
make_redirect(root, "redirected", "/doesnotexist")
- make_cgi_bin(root, "cgi-bin", "examples")
+ make_cgi_bin(root, "cgi-bin", "test-tools")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
765d16166afbea3b224d00b32d727e199acf6ec0
|
Minor refactoring
|
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index 84b81ef..f7a305a 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,124 +1,124 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
You need Twisted XXX version to run it:
XXX installation instructions
"""
import os, sys, re, time
from twisted.web2 import server, http, resource, channel, \
static, http_headers, responsecode, twcgi
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
REFERER_TEST_HTML = """\
<html>
<head>
<title>mechanize Referer (sic) test page</title>
</head>
<body>
<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
</body>
</html>"""
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
-def _make_page(root, name, text,
+def _make_page(parent, name, text,
content_type="text/html",
leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
page.addSlash = not leaf
- setattr(root, "child_"+name, page)
+ setattr(parent, "child_"+name, page)
return page
-def make_page(root, name, text,
+def make_page(parent, name, text,
content_type="text/html"):
- return _make_page(root, name, text, content_type, leaf=False)
+ return _make_page(parent, name, text, content_type, leaf=False)
-def make_leaf_page(root, name, text,
+def make_leaf_page(parent, name, text,
content_type="text/html"):
- return _make_page(root, name, text, content_type, leaf=True)
+ return _make_page(parent, name, text, content_type, leaf=True)
-def make_redirect(root, name, location_relative_ref):
+def make_redirect(parent, name, location_relative_ref):
redirect = resource.RedirectResource(path=location_relative_ref)
- setattr(root, "child_"+name, redirect)
+ setattr(parent, "child_"+name, redirect)
return redirect
def make_cgi_bin(parent, name, dir_name):
cgi_bin = twcgi.CGIDirectory(dir_name)
setattr(parent, "child_"+name, cgi_bin)
return cgi_bin
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
make_leaf_page(root, "robots.txt",
"User-Agent: *\nDisallow: /norobots",
"text/plain")
make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
make_leaf_page(bits, "cctest2.txt",
"Hello ClientCookie functional test suite.",
"text/plain")
make_leaf_page(bits, "referertest.html", REFERER_TEST_HTML)
make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
make_redirect(root, "redirected", "/doesnotexist")
make_cgi_bin(root, "cgi-bin", "examples")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
0da1566c879baf1120e78af304cb64f97297ec65
|
Get rid of the remaining hard-coded URLs in functional tests :-)
|
diff --git a/functional_tests.py b/functional_tests.py
index ef29371..670c46c 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,414 +1,413 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
# XXX
# document twisted.web2 install (I forgot how I did it -- reinstall!)
# implement remaining stuff used by functional_tests.py
# in twisted-localserver.py:
# - 302 followed by 404 response
# - helper cgi script for cookies &c.
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 and 404 (/redirected is configured to redirect
# to a non-existent URL /nonexistent) has caused problems in the past
# due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
- referer = "http://wwwsearch.sourceforge.net/bits/referertest.html"
- info = "http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi"
+ referer = urljoin(self.uri, "bits/referertest.html")
+ info = urljoin(self.uri, "/cgi-bin/cookietest.cgi")
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
- r = urlopen(
- "http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi")
+ r = urlopen(urljoin(self.uri, "/cgi-bin/cookietest.cgi"))
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
- url = "http://www.python.org/"
+ url = urljoin(self.uri, "/mechanize/")
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
Note not all the functional tests use the local server yet
-- some currently always access the internet regardless of
this option and the --uri option.
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index 8f51265..84b81ef 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,107 +1,124 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
You need Twisted XXX version to run it:
XXX installation instructions
"""
import os, sys, re, time
from twisted.web2 import server, http, resource, channel, \
- static, http_headers, responsecode
+ static, http_headers, responsecode, twcgi
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
+REFERER_TEST_HTML = """\
+<html>
+<head>
+<title>mechanize Referer (sic) test page</title>
+</head>
+<body>
+<p>This page exists to test the Referer functionality of <a href="/mechanize">mechanize</a>.
+<p><a href="/cgi-bin/cookietest.cgi">Here</a> is a link to a page that displays the Referer header.
+</body>
+</html>"""
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
def _make_page(root, name, text,
content_type="text/html",
leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
page.addSlash = not leaf
setattr(root, "child_"+name, page)
return page
def make_page(root, name, text,
content_type="text/html"):
return _make_page(root, name, text, content_type, leaf=False)
def make_leaf_page(root, name, text,
content_type="text/html"):
return _make_page(root, name, text, content_type, leaf=True)
def make_redirect(root, name, location_relative_ref):
redirect = resource.RedirectResource(path=location_relative_ref)
setattr(root, "child_"+name, redirect)
return redirect
+def make_cgi_bin(parent, name, dir_name):
+ cgi_bin = twcgi.CGIDirectory(dir_name)
+ setattr(parent, "child_"+name, cgi_bin)
+ return cgi_bin
+
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
make_leaf_page(root, "robots.txt",
"User-Agent: *\nDisallow: /norobots",
"text/plain")
make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
make_leaf_page(bits, "cctest2.txt",
"Hello ClientCookie functional test suite.",
"text/plain")
+ make_leaf_page(bits, "referertest.html", REFERER_TEST_HTML)
make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
make_redirect(root, "redirected", "/doesnotexist")
+ make_cgi_bin(root, "cgi-bin", "examples")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
f89e9461f0de68534d850b40e49ff53120b9ea61
|
A bit more work on local twisted server for functional tests: Add 302 redirection page to get rid of one of the hardcoded wwwsearch URLs. Fix some issues in local twisted server.
|
diff --git a/functional_tests.py b/functional_tests.py
index 8ef3524..ef29371 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,414 +1,414 @@
#!/usr/bin/env python
# These tests access the network.
# thanks Moof (aka Giles Antonio Radford) for some of these
import os, sys
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
from mechanize._rfc3986 import urljoin
# XXX
# document twisted.web2 install (I forgot how I did it -- reinstall!)
# implement remaining stuff used by functional_tests.py
# in twisted-localserver.py:
# - 302 followed by 404 response
# - helper cgi script for cookies &c.
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
## logger.addHandler(logging.StreamHandler(sys.stdout))
## #logger.setLevel(logging.DEBUG)
## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
- # the combination of 302 (caused by use of "sf.net") and 404 has caused
- # problems in the past due to accidental double-wrapping of the error
- # response
+ # the combination of 302 and 404 (/redirected is configured to redirect
+ # to a non-existent URL /nonexistent) has caused problems in the past
+ # due to accidental double-wrapping of the error response
import urllib2
self.assertRaises(
urllib2.HTTPError,
- self.browser.open, "http://wwwsearch.sf.net/doesnotexist",
+ self.browser.open, urljoin(self.uri, "/redirected"),
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = "http://wwwsearch.sourceforge.net/bits/referertest.html"
info = "http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi"
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(
"http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi")
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = "http://www.python.org/"
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
import sys
sys.path.insert(0, "test-tools")
import testprogram
USAGE_EXAMPLES = """
Examples:
%(progName)s
- run all tests
%(progName)s functional_tests.SimpleTests
- run all 'test*' test methods in class SimpleTests
%(progName)s functional_tests.SimpleTests.test_redirect
- run SimpleTests.test_redirect
%(progName)s -l
- start a local Twisted HTTP server and run the functional
tests against that, rather than against SourceForge
(quicker!)
Note not all the functional tests use the local server yet
-- some currently always access the internet regardless of
this option and the --uri option.
"""
prog = testprogram.TestProgram(
["functional_tests"],
localServerProcess=testprogram.TwistedServerProcess(),
usageExamples=USAGE_EXAMPLES,
)
result = prog.runTests()
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
index a73e300..8f51265 100644
--- a/test-tools/twisted-localserver.py
+++ b/test-tools/twisted-localserver.py
@@ -1,91 +1,107 @@
#!/usr/bin/env python
"""
%prog port
e.g. %prog 8000
Runs a local server to point the mechanize functional tests at. Example:
python test-tools/twisted-localserver.py 8042
python functional_tests.py --uri=http://localhost:8042/
You need Twisted XXX version to run it:
XXX installation instructions
"""
import os, sys, re, time
from twisted.web2 import server, http, resource, channel, \
static, http_headers, responsecode
from twisted.internet import reactor
def html(title=None):
f = open("README.html", "r")
html = f.read()
if title is not None:
html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
return html
MECHANIZE_HTML = html()
ROOT_HTML = html("Python bits")
RELOAD_TEST_HTML = """\
<html>
<head><title>Title</title></head>
<body>
<a href="/mechanize">near the start</a>
<p>Now some data to prevent HEAD parsing from reading the link near
the end.
<pre>
%s</pre>
<a href="/mechanize">near the end</a>
</body>
</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
class Page(resource.Resource):
addSlash = True
content_type = http_headers.MimeType("text", "html")
def render(self, ctx):
return http.Response(
responsecode.OK,
{"content-type": self.content_type},
self.text)
-def make_page(root, name, text,
- content_type="text/html"):
+def _make_page(root, name, text,
+ content_type="text/html",
+ leaf=False):
page = Page()
page.text = text
base_type, specific_type = content_type.split("/")
page.content_type = http_headers.MimeType(base_type, specific_type)
+ page.addSlash = not leaf
setattr(root, "child_"+name, page)
return page
+def make_page(root, name, text,
+ content_type="text/html"):
+ return _make_page(root, name, text, content_type, leaf=False)
+
+def make_leaf_page(root, name, text,
+ content_type="text/html"):
+ return _make_page(root, name, text, content_type, leaf=True)
+
+def make_redirect(root, name, location_relative_ref):
+ redirect = resource.RedirectResource(path=location_relative_ref)
+ setattr(root, "child_"+name, redirect)
+ return redirect
+
def main():
root = Page()
root.text = ROOT_HTML
make_page(root, "mechanize", MECHANIZE_HTML)
- bits = make_page(root, "robots.txt",
- "User-Agent: *\nDisallow: /norobots",
- "text/plain")
- bits = make_page(root, "robots", "Hello, robots.", "text/plain")
- bits = make_page(root, "norobots", "Hello, non-robots.", "text/plain")
+ make_leaf_page(root, "robots.txt",
+ "User-Agent: *\nDisallow: /norobots",
+ "text/plain")
+ make_leaf_page(root, "robots", "Hello, robots.", "text/plain")
+ make_leaf_page(root, "norobots", "Hello, non-robots.", "text/plain")
bits = make_page(root, "bits", "GeneralFAQ.html")
- make_page(bits, "cctest2.txt",
- "Hello ClientCookie functional test suite.",
- "text/plain")
- make_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
+ make_leaf_page(bits, "cctest2.txt",
+ "Hello ClientCookie functional test suite.",
+ "text/plain")
+ make_leaf_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
+ make_redirect(root, "redirected", "/doesnotexist")
site = server.Site(root)
reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
reactor.run()
main()
|
Almad/Mechanize
|
56fe4a874e0ba859c8a4d4c514231b1822803636
|
Add an -l option to run the functional tests against a local twisted.web2-based server. There are still a few tests that always run against the wwwsearch.sf.net site -- need to extend the local srver a bit to cover these.
|
diff --git a/functional_tests.py b/functional_tests.py
index dedfb32..8ef3524 100755
--- a/functional_tests.py
+++ b/functional_tests.py
@@ -1,382 +1,414 @@
#!/usr/bin/env python
# These tests access the network.
-import os
+# thanks Moof (aka Giles Antonio Radford) for some of these
+
+import os, sys
from unittest import TestCase
import mechanize
from mechanize import build_opener, install_opener, urlopen, urlretrieve
from mechanize import CookieJar, HTTPCookieProcessor, \
HTTPHandler, HTTPRefreshProcessor, \
HTTPEquivProcessor, HTTPRedirectHandler, \
HTTPRedirectDebugProcessor, HTTPResponseDebugProcessor
+from mechanize._rfc3986 import urljoin
+
+# XXX
+# document twisted.web2 install (I forgot how I did it -- reinstall!)
+# implement remaining stuff used by functional_tests.py
+# in twisted-localserver.py:
+# - 302 followed by 404 response
+# - helper cgi script for cookies &c.
#from cookielib import CookieJar
#from urllib2 import build_opener, install_opener, urlopen
#from urllib2 import HTTPCookieProcessor, HTTPHandler
#from mechanize import CreateBSDDBCookieJar
## import logging
## logger = logging.getLogger("mechanize")
-## logger.addHandler(logging.StreamHandler())
-## logger.setLevel(logging.DEBUG)
+## logger.addHandler(logging.StreamHandler(sys.stdout))
+## #logger.setLevel(logging.DEBUG)
+## logger.setLevel(logging.INFO)
def sanepathname2url(path):
import urllib
urlpath = urllib.pathname2url(path)
if os.name == "nt" and urlpath.startswith("///"):
urlpath = urlpath[2:]
# XXX don't ask me about the mac...
return urlpath
class SimpleTests(TestCase):
# thanks Moof (aka Giles Antonio Radford)
def setUp(self):
self.browser = mechanize.Browser()
def test_simple(self):
- self.browser.open('http://wwwsearch.sourceforge.net/')
+ self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
# relative URL
self.browser.open('/mechanize/')
self.assertEqual(self.browser.title(), 'mechanize')
def test_302_and_404(self):
# the combination of 302 (caused by use of "sf.net") and 404 has caused
# problems in the past due to accidental double-wrapping of the error
# response
import urllib2
self.assertRaises(
urllib2.HTTPError,
- self.browser.open, "http://wwwsearch.sf.net/doesnotexist"
+ self.browser.open, "http://wwwsearch.sf.net/doesnotexist",
)
def test_reread(self):
# closing response shouldn't stop methods working (this happens also to
# be true for e.g. mechanize.OpenerDirector when mechanize's own
# handlers are in use, but is guaranteed to be true for
# mechanize.Browser)
- r = self.browser.open('http://wwwsearch.sourceforge.net/')
+ r = self.browser.open(self.uri)
data = r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(), data)
self.assertEqual(self.browser.response().read(), data)
def test_error_recovery(self):
self.assertRaises(OSError, self.browser.open,
'file:///c|thisnoexistyiufheiurgbueirgbue')
- self.browser.open('http://wwwsearch.sourceforge.net/')
+ self.browser.open(self.uri)
self.assertEqual(self.browser.title(), 'Python bits')
def test_redirect(self):
# 301 redirect due to missing final '/'
- r = self.browser.open('http://wwwsearch.sourceforge.net/bits')
+ r = self.browser.open(urljoin(self.uri, "bits"))
self.assertEqual(r.code, 200)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_file_url(self):
url = "file://%s" % sanepathname2url(
os.path.abspath('functional_tests.py'))
r = self.browser.open(url)
self.assert_("this string appears in this file ;-)" in r.read())
def test_open_novisit(self):
def test_state(br):
self.assert_(br.request is None)
self.assert_(br.response() is None)
self.assertRaises(mechanize.BrowserStateError, br.back)
test_state(self.browser)
# note this involves a redirect, which should itself be non-visiting
- r = self.browser.open_novisit("http://wwwsearch.sourceforge.net/bits")
+ r = self.browser.open_novisit(urljoin(self.uri, "bits"))
test_state(self.browser)
self.assert_("GeneralFAQ.html" in r.read(2048))
def test_non_seekable(self):
# check everything still works without response_seek_wrapper and
# the .seek() method on response objects
ua = mechanize.UserAgent()
ua.set_seekable_responses(False)
ua.set_handle_equiv(False)
- response = ua.open('http://wwwsearch.sourceforge.net/')
+ response = ua.open(self.uri)
self.failIf(hasattr(response, "seek"))
data = response.read()
self.assert_("Python bits" in data)
class ResponseTests(TestCase):
def test_seek(self):
br = mechanize.Browser()
- r = br.open("http://wwwsearch.sourceforge.net/")
+ r = br.open(self.uri)
html = r.read()
r.seek(0)
self.assertEqual(r.read(), html)
def test_seekable_response_opener(self):
opener = mechanize.OpenerFactory(
mechanize.SeekableResponseOpener).build_opener()
- r = opener.open("http://wwwsearch.sourceforge.net/bits/cctest2.txt")
+ r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.seek(0)
self.assertEqual(r.read(),
r.get_data(),
"Hello ClientCookie functional test suite.\n")
def test_no_seek(self):
# should be possible to turn off UserAgent's .seek() functionality
def check_no_seek(opener):
- r = opener.open(
- "http://wwwsearch.sourceforge.net/bits/cctest2.txt")
+ r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
self.assert_(not hasattr(r, "seek"))
try:
- opener.open("http://wwwsearch.sourceforge.net/nonexistent")
+ opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
self.assert_(not hasattr(exc, "seek"))
# mechanize.UserAgent
opener = mechanize.UserAgent()
opener.set_handle_equiv(False)
opener.set_seekable_responses(False)
opener.set_debug_http(False)
check_no_seek(opener)
# mechanize.OpenerDirector
opener = mechanize.build_opener()
check_no_seek(opener)
def test_consistent_seek(self):
# if we explicitly request that returned response objects have the
# .seek() method, then raised HTTPError exceptions should also have the
# .seek() method
def check(opener, excs_also):
- r = opener.open(
- "http://wwwsearch.sourceforge.net/bits/cctest2.txt")
+ r = opener.open(urljoin(self.uri, "bits/cctest2.txt"))
data = r.read()
r.seek(0)
self.assertEqual(data, r.read(), r.get_data())
try:
- opener.open("http://wwwsearch.sourceforge.net/nonexistent")
+ opener.open(urljoin(self.uri, "nonexistent"))
except mechanize.HTTPError, exc:
data = exc.read()
if excs_also:
exc.seek(0)
self.assertEqual(data, exc.read(), exc.get_data())
else:
self.assert_(False)
opener = mechanize.UserAgent()
opener.set_debug_http(False)
# Here, only the .set_handle_equiv() causes .seek() to be present, so
# exceptions don't necessarily support the .seek() method (and do not,
# at present).
opener.set_handle_equiv(True)
opener.set_seekable_responses(False)
check(opener, excs_also=False)
# Here, (only) the explicit .set_seekable_responses() causes .seek() to
# be present (different mechanism from .set_handle_equiv()). Since
# there's an explicit request, ALL responses are seekable, even
# exception responses (HTTPError instances).
opener.set_handle_equiv(False)
opener.set_seekable_responses(True)
check(opener, excs_also=True)
def test_set_response(self):
br = mechanize.Browser()
- r = br.open("http://wwwsearch.sourceforge.net/")
+ r = br.open(self.uri)
html = r.read()
self.assertEqual(br.title(), "Python bits")
newhtml = """<html><body><a href="spam">click me</a></body></html>"""
r.set_data(newhtml)
self.assertEqual(r.read(), newhtml)
self.assertEqual(br.response().read(), html)
br.response().set_data(newhtml)
self.assertEqual(br.response().read(), html)
self.assertEqual(list(br.links())[0].url, 'http://sourceforge.net')
br.set_response(r)
self.assertEqual(br.response().read(), newhtml)
self.assertEqual(list(br.links())[0].url, "spam")
def test_new_response(self):
br = mechanize.Browser()
data = "<html><head><title>Test</title></head><body><p>Hello.</p></body></html>"
response = mechanize.make_response(
data,
[("Content-type", "text/html")],
"http://example.com/",
200,
"OK"
)
br.set_response(response)
self.assertEqual(br.response().get_data(), data)
def hidden_test_close_pickle_load(self):
print ("Test test_close_pickle_load is expected to fail unless Python "
"standard library patch http://python.org/sf/1144636 has been "
"applied")
import pickle
b = mechanize.Browser()
- r = b.open("http://wwwsearch.sourceforge.net/bits/cctest2.txt")
+ r = b.open(urljoin(self.uri, "bits/cctest2.txt"))
r.read()
r.close()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
HIGHEST_PROTOCOL = -1
p = pickle.dumps(b, HIGHEST_PROTOCOL)
b = pickle.loads(p)
r = b.response()
r.seek(0)
self.assertEqual(r.read(),
"Hello ClientCookie functional test suite.\n")
class FunctionalTests(TestCase):
def test_referer(self):
br = mechanize.Browser()
referer = "http://wwwsearch.sourceforge.net/bits/referertest.html"
info = "http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi"
r = br.open(info)
self.assert_(referer not in r.get_data())
br.open(referer)
r = br.follow_link(text="Here")
self.assert_(referer in r.get_data())
def test_cookies(self):
import urllib2
# this test page depends on cookies, and an http-equiv refresh
#cj = CreateBSDDBCookieJar("/home/john/db.db")
cj = CookieJar()
handlers = [
HTTPCookieProcessor(cj),
HTTPRefreshProcessor(max_time=None, honor_time=False),
HTTPEquivProcessor(),
HTTPRedirectHandler(), # needed for Refresh handling in 2.4.0
# HTTPHandler(True),
# HTTPRedirectDebugProcessor(),
# HTTPResponseDebugProcessor(),
]
o = apply(build_opener, handlers)
try:
install_opener(o)
try:
r = urlopen(
"http://wwwsearch.sourceforge.net/cgi-bin/cookietest.cgi")
except urllib2.URLError, e:
#print e.read()
raise
data = r.read()
#print data
self.assert_(
data.find("Your browser supports cookies!") >= 0)
self.assert_(len(cj) == 1)
# test response.seek() (added by HTTPEquivProcessor)
r.seek(0)
samedata = r.read()
r.close()
self.assert_(samedata == data)
finally:
o.close()
install_opener(None)
def test_robots(self):
plain_opener = mechanize.build_opener(mechanize.HTTPRobotRulesProcessor)
browser = mechanize.Browser()
for opener in plain_opener, browser:
- r = opener.open("http://wwwsearch.sourceforge.net/robots")
+ r = opener.open(urljoin(self.uri, "robots"))
self.assertEqual(r.code, 200)
self.assertRaises(
mechanize.RobotExclusionError,
- opener.open, "http://wwwsearch.sourceforge.net/norobots")
+ opener.open, urljoin(self.uri, "norobots"))
def test_urlretrieve(self):
url = "http://www.python.org/"
test_filename = "python.html"
def check_retrieve(opener, filename, headers):
self.assertEqual(headers.get('Content-Type'), 'text/html')
f = open(filename)
data = f.read()
f.close()
opener.close()
from urllib import urlopen
r = urlopen(url)
self.assertEqual(data, r.read())
r.close()
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, test_filename, verif.callback)
try:
self.assertEqual(filename, test_filename)
check_retrieve(opener, filename, headers)
self.assert_(os.path.isfile(filename))
finally:
os.remove(filename)
opener = mechanize.build_opener()
verif = CallbackVerifier(self)
filename, headers = opener.retrieve(url, reporthook=verif.callback)
check_retrieve(opener, filename, headers)
# closing the opener removed the temporary file
self.failIf(os.path.isfile(filename))
def test_reload_read_incomplete(self):
from mechanize import Browser
browser = Browser()
- r1 = browser.open(
- "http://wwwsearch.sourceforge.net/bits/mechanize_reload_test.html")
+ r1 = browser.open(urljoin(self.uri, "bits/mechanize_reload_test.html"))
# if we don't do anything and go straight to another page, most of the
# last page's response won't be .read()...
- r2 = browser.open("http://wwwsearch.sourceforge.net/mechanize")
+ r2 = browser.open(urljoin(self.uri, "mechanize"))
self.assert_(len(r1.get_data()) < 4097) # we only .read() a little bit
# ...so if we then go back, .follow_link() for a link near the end (a
# few kb in, past the point that always gets read in HTML files because
# of HEAD parsing) will only work if it causes a .reload()...
r3 = browser.back()
browser.follow_link(text="near the end")
# ... good, no LinkNotFoundError, so we did reload.
# we have .read() the whole file
self.assertEqual(len(r3._seek_wrapper__cache.getvalue()), 4202)
## def test_cacheftp(self):
## from urllib2 import CacheFTPHandler, build_opener
## o = build_opener(CacheFTPHandler())
## r = o.open("ftp://ftp.python.org/pub/www.python.org/robots.txt")
## data1 = r.read()
## r.close()
## r = o.open("ftp://ftp.python.org/pub/www.python.org/2.3.2/announce.txt")
## data2 = r.read()
## r.close()
## self.assert_(data1 != data2)
class CallbackVerifier:
# for .test_urlretrieve()
def __init__(self, testcase):
self._count = 0
self._testcase = testcase
def callback(self, block_nr, block_size, total_size):
self._testcase.assertEqual(block_nr, self._count)
self._count = self._count + 1
if __name__ == "__main__":
- import unittest
- unittest.main()
+ import sys
+ sys.path.insert(0, "test-tools")
+ import testprogram
+ USAGE_EXAMPLES = """
+Examples:
+ %(progName)s
+ - run all tests
+ %(progName)s functional_tests.SimpleTests
+ - run all 'test*' test methods in class SimpleTests
+ %(progName)s functional_tests.SimpleTests.test_redirect
+ - run SimpleTests.test_redirect
+
+ %(progName)s -l
+ - start a local Twisted HTTP server and run the functional
+ tests against that, rather than against SourceForge
+ (quicker!)
+ Note not all the functional tests use the local server yet
+ -- some currently always access the internet regardless of
+ this option and the --uri option.
+"""
+ prog = testprogram.TestProgram(
+ ["functional_tests"],
+ localServerProcess=testprogram.TwistedServerProcess(),
+ usageExamples=USAGE_EXAMPLES,
+ )
+ result = prog.runTests()
diff --git a/test-tools/testprogram.py b/test-tools/testprogram.py
new file mode 100644
index 0000000..627e753
--- /dev/null
+++ b/test-tools/testprogram.py
@@ -0,0 +1,311 @@
+"""Local server and cgitb support."""
+
+import cgitb
+#cgitb.enable(format="text")
+
+import sys, os, traceback, logging, glob, time
+from unittest import defaultTestLoader, TextTestRunner, TestSuite, TestCase, \
+ _TextTestResult
+
+
+class ServerProcess:
+
+ def __init__(self, filename, name=None):
+ if filename is None:
+ raise ValueError('filename arg must be a string')
+ if name is None:
+ name = filename
+ self.name = os.path.basename(name)
+ self.port = None
+ self.report_hook = lambda msg: None
+ self._filename = filename
+
+ def _get_args(self):
+ """Return list of command line arguments.
+
+ Override me.
+ """
+ return []
+
+ def start(self):
+ self.report_hook("starting (%s)" % (
+ [sys.executable, self._filename]+self._get_args()))
+ self._pid = os.spawnv(
+ os.P_NOWAIT,
+ sys.executable,
+ [sys.executable, self._filename]+self._get_args())
+ self.report_hook("waiting for startup")
+ self._wait_for_startup()
+ self.report_hook("running")
+
+ def _wait_for_startup(self):
+ import socket
+ def connect():
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(1.0)
+ try:
+ sock.connect(('127.0.0.1', self.port))
+ finally:
+ sock.close()
+ backoff(connect, (socket.error,))
+
+ def stop(self):
+ """Kill process (forcefully if necessary)."""
+ if os.name == 'nt':
+ kill_windows(self._pid, self.report_hook)
+ else:
+ kill_posix(self._pid, self.report_hook)
+
+def backoff(func, errors,
+ initial_timeout=1., hard_timeout=60., factor=1.2):
+ starttime = time.time()
+ timeout = initial_timeout
+ while time.time() < starttime + hard_timeout - 0.01:
+ try:
+ func()
+ except errors, exc:
+ time.sleep(timeout)
+ timeout *= factor
+ hard_limit = hard_timeout - (time.time() - starttime)
+ timeout = min(timeout, hard_limit)
+ else:
+ break
+
+def kill_windows(handle, report_hook):
+ try:
+ import win32api
+ except ImportError:
+ import ctypes
+ ctypes.windll.kernel32.TerminateProcess(int(handle), -1)
+ else:
+ win32api.TerminateProcess(int(handle), -1)
+
+def kill_posix(pid, report_hook):
+ import signal
+ os.kill(pid, signal.SIGTERM)
+
+ timeout = 10.
+ starttime = time.time()
+ report_hook("waiting for exit")
+ def do_nothing(*args):
+ pass
+ old_handler = signal.signal(signal.SIGCHLD, do_nothing)
+ try:
+ while time.time() < starttime + timeout - 0.01:
+ pid, sts = os.waitpid(pid, os.WNOHANG)
+ if pid != 0:
+ # exited, or error
+ break
+ newtimeout = timeout - (time.time() - starttime) - 1.
+ time.sleep(newtimeout) # wait for signal
+ else:
+ report_hook("forcefully killing")
+ try:
+ os.kill(pid, signal.SIGKILL)
+ except OSError, exc:
+ if exc.errno != errno.ECHILD:
+ raise
+ finally:
+ signal.signal(signal.SIGCHLD, old_handler)
+
+class TwistedServerProcess(ServerProcess):
+
+ def __init__(self, name=None):
+ top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
+ path = os.path.join(top_level_dir, "test-tools/twisted-localserver.py")
+ ServerProcess.__init__(self, path, name)
+
+ def _get_args(self):
+ return [str(self.port)]
+
+
+class CgitbTextResult(_TextTestResult):
+ def _exc_info_to_string(self, err, test):
+ """Converts a sys.exc_info()-style tuple of values into a string."""
+ exctype, value, tb = err
+ # Skip test runner traceback levels
+ while tb and self._is_relevant_tb_level(tb):
+ tb = tb.tb_next
+ if exctype is test.failureException:
+ # Skip assert*() traceback levels
+ length = self._count_relevant_tb_levels(tb)
+ return cgitb.text((exctype, value, tb))
+ return cgitb.text((exctype, value, tb))
+
+class CgitbTextTestRunner(TextTestRunner):
+ def _makeResult(self):
+ return CgitbTextResult(self.stream, self.descriptions, self.verbosity)
+
+def add_uri_attribute_to_test_cases(suite, uri):
+ for test in suite._tests:
+ if isinstance(test, TestCase):
+ test.uri = uri
+ else:
+ try:
+ add_uri_attribute_to_test_cases(test, uri)
+ except AttributeError:
+ pass
+
+
+class TestProgram:
+ """A command-line program that runs a set of tests; this is primarily
+ for making test modules conveniently executable.
+ """
+ USAGE = """\
+Usage: %(progName)s [options] [test] [...]
+
+Note not all the functional tests take note of the --uri argument yet --
+some currently always access the internet regardless of the --uri and
+--run-local-server options.
+
+Options:
+ -l, --run-local-server
+ Run a local Twisted HTTP server for the functional
+ tests. You need Twisted installed for this to work.
+ The server is run on the port given in the --uri
+ option. If --run-local-server is given but no --uri is
+ given, http://127.0.0.1:8000 is used as the base URI.
+ Also, if you're on Windows and don't have pywin32 or
+ ctypes installed, this option won't work, and you'll
+ have to start up test-tools/localserver.py manually.
+ --uri=URL Base URI for functional tests
+ (test.py does not access the network, unless you tell
+ it to run module functional_tests;
+ functional_tests.py does access the network)
+ e.g. --uri=http://127.0.0.1:8000/
+ -h, --help Show this message
+ -v, --verbose Verbose output
+ -q, --quiet Minimal output
+
+The following options are only available through test.py (you can still run the
+functional tests through test.py, just give 'functional_tests' as the module
+name to run):
+
+ -u Skip plain (non-doctest) unittests
+ -d Skip doctests
+ -c Run coverage (requires coverage.py, seems buggy)
+ -t Display tracebacks using cgitb's text mode
+
+"""
+ USAGE_EXAMPLES = """
+Examples:
+ %(progName)s
+ - run all tests
+ %(progName)s test_cookies
+ - run module 'test_cookies'
+ %(progName)s test_cookies.CookieTests
+ - run all 'test*' test methods in test_cookies.CookieTests
+ %(progName)s test_cookies.CookieTests.test_expires
+ - run test_cookies.CookieTests.test_expires
+
+ %(progName)s functional_tests
+ - run the functional tests
+ %(progName)s -l functional_tests
+ - start a local Twisted HTTP server and run the functional
+ tests against that, rather than against SourceForge
+ (quicker!)
+"""
+ def __init__(self, moduleNames, localServerProcess, defaultTest=None,
+ argv=None, testRunner=None, testLoader=defaultTestLoader,
+ defaultUri="http://wwwsearch.sf.net/",
+ usageExamples=USAGE_EXAMPLES,
+ ):
+ self.modules = []
+ for moduleName in moduleNames:
+ module = __import__(moduleName)
+ for part in moduleName.split('.')[1:]:
+ module = getattr(module, part)
+ self.modules.append(module)
+ self.uri = None
+ self._defaultUri = defaultUri
+ if argv is None:
+ argv = sys.argv
+ self.verbosity = 1
+ self.defaultTest = defaultTest
+ self.testRunner = testRunner
+ self.testLoader = testLoader
+ self.progName = os.path.basename(argv[0])
+ self.usageExamples = usageExamples
+ self.runLocalServer = False
+ self.parseArgs(argv)
+ if self.runLocalServer:
+ import urllib
+ from mechanize._rfc3986 import urlsplit
+ authority = urlsplit(self.uri)[1]
+ host, port = urllib.splitport(authority)
+ if port is None:
+ port = "80"
+ try:
+ port = int(port)
+ except:
+ self.usageExit("port in --uri value must be an integer "
+ "(try --uri=http://127.0.0.1:8000/)")
+ self._serverProcess = localServerProcess
+ def report(msg):
+ print "%s: %s" % (localServerProcess.name, msg)
+ localServerProcess.port = port
+ localServerProcess.report_hook = report
+
+ def usageExit(self, msg=None):
+ if msg: print msg
+ print (self.USAGE + self.usageExamples) % self.__dict__
+ sys.exit(2)
+
+ def parseArgs(self, argv):
+ import getopt
+ try:
+ options, args = getopt.getopt(
+ argv[1:],
+ 'hHvql',
+ ['help','verbose','quiet', 'uri=', 'run-local-server'],
+ )
+ uri = None
+ for opt, value in options:
+ if opt in ('-h','-H','--help'):
+ self.usageExit()
+ if opt in ('--uri',):
+ uri = value
+ if opt in ('-q','--quiet'):
+ self.verbosity = 0
+ if opt in ('-v','--verbose'):
+ self.verbosity = 2
+ if opt in ('-l', '--run-local-server'):
+ self.runLocalServer = True
+ if uri is None:
+ if self.runLocalServer:
+ uri = "http://127.0.0.1:8000"
+ else:
+ uri = self._defaultUri
+ self.uri = uri
+ if len(args) == 0 and self.defaultTest is None:
+ suite = TestSuite()
+ for module in self.modules:
+ test = self.testLoader.loadTestsFromModule(module)
+ suite.addTest(test)
+ self.test = suite
+ add_uri_attribute_to_test_cases(self.test, self.uri)
+ return
+ if len(args) > 0:
+ self.testNames = args
+ else:
+ self.testNames = (self.defaultTest,)
+ self.createTests()
+ add_uri_attribute_to_test_cases(self.test, self.uri)
+ except getopt.error, msg:
+ self.usageExit(msg)
+
+ def createTests(self):
+ self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+ def runTests(self):
+ if self.testRunner is None:
+ self.testRunner = TextTestRunner(verbosity=self.verbosity)
+
+ if self.runLocalServer:
+ self._serverProcess.start()
+ try:
+ result = self.testRunner.run(self.test)
+ finally:
+ if self.runLocalServer:
+ self._serverProcess.stop()
+ return result
diff --git a/test-tools/twisted-localserver.py b/test-tools/twisted-localserver.py
new file mode 100644
index 0000000..a73e300
--- /dev/null
+++ b/test-tools/twisted-localserver.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+%prog port
+
+e.g. %prog 8000
+
+Runs a local server to point the mechanize functional tests at. Example:
+
+python test-tools/twisted-localserver.py 8042
+python functional_tests.py --uri=http://localhost:8042/
+
+You need Twisted XXX version to run it:
+
+XXX installation instructions
+"""
+
+import os, sys, re, time
+from twisted.web2 import server, http, resource, channel, \
+ static, http_headers, responsecode
+
+from twisted.internet import reactor
+
+def html(title=None):
+ f = open("README.html", "r")
+ html = f.read()
+ if title is not None:
+ html = re.sub("<title>(.*)</title>", "<title>%s</title>" % title, html)
+ return html
+
+MECHANIZE_HTML = html()
+ROOT_HTML = html("Python bits")
+RELOAD_TEST_HTML = """\
+<html>
+<head><title>Title</title></head>
+<body>
+
+<a href="/mechanize">near the start</a>
+
+<p>Now some data to prevent HEAD parsing from reading the link near
+the end.
+
+<pre>
+%s</pre>
+
+<a href="/mechanize">near the end</a>
+
+</body>
+
+</html>""" % (("0123456789ABCDEF"*4+"\n")*61)
+
+
+class Page(resource.Resource):
+
+ addSlash = True
+ content_type = http_headers.MimeType("text", "html")
+
+ def render(self, ctx):
+ return http.Response(
+ responsecode.OK,
+ {"content-type": self.content_type},
+ self.text)
+
+def make_page(root, name, text,
+ content_type="text/html"):
+ page = Page()
+ page.text = text
+ base_type, specific_type = content_type.split("/")
+ page.content_type = http_headers.MimeType(base_type, specific_type)
+ setattr(root, "child_"+name, page)
+ return page
+
+def main():
+ root = Page()
+ root.text = ROOT_HTML
+ make_page(root, "mechanize", MECHANIZE_HTML)
+ bits = make_page(root, "robots.txt",
+ "User-Agent: *\nDisallow: /norobots",
+ "text/plain")
+ bits = make_page(root, "robots", "Hello, robots.", "text/plain")
+ bits = make_page(root, "norobots", "Hello, non-robots.", "text/plain")
+ bits = make_page(root, "bits", "GeneralFAQ.html")
+ make_page(bits, "cctest2.txt",
+ "Hello ClientCookie functional test suite.",
+ "text/plain")
+ make_page(bits, "mechanize_reload_test.html", RELOAD_TEST_HTML)
+
+ site = server.Site(root)
+ reactor.listenTCP(int(sys.argv[1]), channel.HTTPFactory(site))
+ reactor.run()
+
+main()
diff --git a/test.py b/test.py
index ade2722..8de048c 100755
--- a/test.py
+++ b/test.py
@@ -1,245 +1,149 @@
#!/usr/bin/env python
"""Test runner.
For further help, enter this at a command prompt:
python test.py --help
"""
-import cgitb
-#cgitb.enable(format="text")
-
# Modules containing tests to run -- a test is anything named *Tests, which
# should be classes deriving from unittest.TestCase.
MODULE_NAMES = ["test_date", "test_browser", "test_response", "test_cookies",
"test_headers", "test_urllib2", "test_pullparser",
"test_useragent", "test_html", "test_opener",
]
-import sys, os, traceback, logging, glob
-from unittest import defaultTestLoader, TextTestRunner, TestSuite, TestCase, \
- _TextTestResult
+import sys, os, logging, glob
#level = logging.DEBUG
#level = logging.INFO
#level = logging.WARNING
#level = logging.NOTSET
#logging.getLogger("mechanize").setLevel(level)
#logging.getLogger("mechanize").addHandler(logging.StreamHandler(sys.stdout))
-class CgitbTextResult(_TextTestResult):
- def _exc_info_to_string(self, err, test):
- """Converts a sys.exc_info()-style tuple of values into a string."""
- exctype, value, tb = err
- # Skip test runner traceback levels
- while tb and self._is_relevant_tb_level(tb):
- tb = tb.tb_next
- if exctype is test.failureException:
- # Skip assert*() traceback levels
- length = self._count_relevant_tb_levels(tb)
- return cgitb.text((exctype, value, tb))
- return cgitb.text((exctype, value, tb))
-
-class CgitbTextTestRunner(TextTestRunner):
- def _makeResult(self):
- return CgitbTextResult(self.stream, self.descriptions, self.verbosity)
-
-
-class TestProgram:
- """A command-line program that runs a set of tests; this is primarily
- for making test modules conveniently executable.
- """
- USAGE = """\
-Usage: %(progName)s [options] [test] [...]
-
-Options:
- -h, --help Show this message
- -v, --verbose Verbose output
- -q, --quiet Minimal output
-
-Examples:
- %(progName)s - run default set of tests
- %(progName)s MyTestSuite - run suite 'MyTestSuite'
- %(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
- %(progName)s MyTestCase - run all 'test*' test methods
- in MyTestCase
-"""
- def __init__(self, moduleNames, defaultTest=None,
- argv=None, testRunner=None, testLoader=defaultTestLoader):
- self.modules = []
- for moduleName in moduleNames:
- module = __import__(moduleName)
- for part in moduleName.split('.')[1:]:
- module = getattr(module, part)
- self.modules.append(module)
- if argv is None:
- argv = sys.argv
- self.verbosity = 1
- self.defaultTest = defaultTest
- self.testRunner = testRunner
- self.testLoader = testLoader
- self.progName = os.path.basename(argv[0])
- self.parseArgs(argv)
-
- def usageExit(self, msg=None):
- if msg: print msg
- print self.USAGE % self.__dict__
- sys.exit(2)
-
- def parseArgs(self, argv):
- import getopt
- try:
- options, args = getopt.getopt(argv[1:], 'hHvq',
- ['help','verbose','quiet'])
- for opt, value in options:
- if opt in ('-h','-H','--help'):
- self.usageExit()
- if opt in ('-q','--quiet'):
- self.verbosity = 0
- if opt in ('-v','--verbose'):
- self.verbosity = 2
- if len(args) == 0 and self.defaultTest is None:
- suite = TestSuite()
- for module in self.modules:
- test = self.testLoader.loadTestsFromModule(module)
- suite.addTest(test)
- self.test = suite
- return
- if len(args) > 0:
- self.testNames = args
- else:
- self.testNames = (self.defaultTest,)
- self.createTests()
- except getopt.error, msg:
- self.usageExit(msg)
-
- def createTests(self):
- self.test = self.testLoader.loadTestsFromNames(self.testNames)
-
- def runTests(self):
- if self.testRunner is None:
- self.testRunner = TextTestRunner(verbosity=self.verbosity)
- result = self.testRunner.run(self.test)
- return result
-
-
if __name__ == "__main__":
-## sys.path.insert(0, '/home/john/comp/dev/rl/jjlee/lib/python')
-## import jjl
-## import __builtin__
-## __builtin__.jjl = jjl
+ # XXX
+ # temporary stop-gap to run doctests &c.
+ # should switch to nose or something
- # XXX temporary stop-gap to run doctests
+ top_level_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
# XXXX coverage output seems incorrect ATM
run_coverage = "-c" in sys.argv
if run_coverage:
sys.argv.remove("-c")
use_cgitb = "-t" in sys.argv
if use_cgitb:
sys.argv.remove("-t")
run_doctests = "-d" not in sys.argv
if not run_doctests:
sys.argv.remove("-d")
run_unittests = "-u" not in sys.argv
if not run_unittests:
sys.argv.remove("-u")
# import local copy of Python 2.5 doctest
assert os.path.isdir("test")
sys.path.insert(0, "test")
# needed for recent doctest / linecache -- this is only for testing
# purposes, these don't get installed
# doctest.py revision 45701 and linecache.py revision 45940. Since
# linecache is used by Python itself, linecache.py is renamed
# linecache_copy.py, and this copy of doctest is modified (only) to use
# that renamed module.
sys.path.insert(0, "test-tools")
import doctest
+ import testprogram
import coverage
if run_coverage:
print 'running coverage'
coverage.erase()
coverage.start()
import mechanize
class DefaultResult:
def wasSuccessful(self):
return True
result = DefaultResult()
if run_doctests:
# run .doctest files needing special support
common_globs = {"mechanize": mechanize}
- pm_doctest_filename = os.path.join("test", "test_password_manager.doctest")
+ pm_doctest_filename = os.path.join(
+ "test", "test_password_manager.doctest")
for globs in [
{"mgr_class": mechanize.HTTPPasswordMgr},
{"mgr_class": mechanize.HTTPProxyPasswordMgr},
]:
globs.update(common_globs)
doctest.testfile(
pm_doctest_filename,
#os.path.join("test", "test_scratch.doctest"),
globs=globs,
)
# run .doctest files
special_doctests = [pm_doctest_filename,
os.path.join("test", "test_scratch.doctest"),
]
doctest_files = glob.glob(os.path.join("test", "*.doctest"))
for dt in special_doctests:
if dt in doctest_files:
doctest_files.remove(dt)
for df in doctest_files:
doctest.testfile(df)
# run doctests in docstrings
from mechanize import _headersutil, _auth, _clientcookie, _pullparser, \
_http, _rfc3986
doctest.testmod(_headersutil)
doctest.testmod(_rfc3986)
doctest.testmod(_auth)
doctest.testmod(_clientcookie)
doctest.testmod(_pullparser)
doctest.testmod(_http)
if run_unittests:
# run vanilla unittest tests
import unittest
test_path = os.path.join(os.path.dirname(sys.argv[0]), "test")
sys.path.insert(0, test_path)
test_runner = None
if use_cgitb:
- test_runner = CgitbTextTestRunner()
- prog = TestProgram(MODULE_NAMES, testRunner=test_runner)
+ test_runner = testprogram.CgitbTextTestRunner()
+ prog = testprogram.TestProgram(
+ MODULE_NAMES,
+ testRunner=test_runner,
+ localServerProcess=testprogram.TwistedServerProcess(),
+ )
result = prog.runTests()
if run_coverage:
# HTML coverage report
import colorize
try:
os.mkdir("coverage")
except OSError:
pass
private_modules = glob.glob("mechanize/_*.py")
private_modules.remove("mechanize/__init__.py")
for module_filename in private_modules:
module_name = module_filename.replace("/", ".")[:-3]
print module_name
module = sys.modules[module_name]
f, s, m, mf = coverage.analysis(module)
fo = open(os.path.join('coverage', os.path.basename(f)+'.html'), 'wb')
colorize.colorize_file(f, outstream=fo, not_covered=mf)
fo.close()
coverage.report(module)
#print coverage.analysis(module)
# XXX exit status is wrong -- does not take account of doctests
sys.exit(not result.wasSuccessful())
|
devnev/skyways
|
82440178231ad43533660fa7d08114e530e859b8
|
Added install command to README, copied to README.rst.
|
diff --git a/README b/README
index d257313..f3a65bc 100644
--- a/README
+++ b/README
@@ -1,27 +1,27 @@
SkyWays
=======
Skyways is a toy project to create a game roughly based on the DOS game called
"Skyroads". The aim is not to recreate the original game, just make something
similarly fun, and to explore possibilities along the way.
Depends
-------
One or more of: SDL, Qt4, freeglut, OpenGlut.
All of: Boost, OpenGL, FTGL
To explicitly select the backends to be built, use::
./configure --enable-{qt,sdl,glut}={yes,no,test}
Build
-----
-::
+Commands to build & install from sources::
autoreconf
./configure
make
- # there is currently no install step
+ make install
diff --git a/README.rst b/README.rst
new file mode 100644
index 0000000..f3a65bc
--- /dev/null
+++ b/README.rst
@@ -0,0 +1,27 @@
+SkyWays
+=======
+
+Skyways is a toy project to create a game roughly based on the DOS game called
+"Skyroads". The aim is not to recreate the original game, just make something
+similarly fun, and to explore possibilities along the way.
+
+Depends
+-------
+
+One or more of: SDL, Qt4, freeglut, OpenGlut.
+
+All of: Boost, OpenGL, FTGL
+
+To explicitly select the backends to be built, use::
+
+ ./configure --enable-{qt,sdl,glut}={yes,no,test}
+
+Build
+-----
+
+Commands to build & install from sources::
+
+ autoreconf
+ ./configure
+ make
+ make install
|
devnev/skyways
|
051bc82d8633e6d101ad725d5d90da17427a2a02
|
Fixed build/deps directories creation.
|
diff --git a/Makefile.in b/Makefile.in
index 492b8ff..dff942a 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,46 +1,48 @@
#!/usr/bin/make -f
# @configure_input@
default: all
prefix=@prefix@
datarootdir=@datarootdir@
datadir=@datadir@
pkgdatadir=$(datadir)/@PACKAGE_TARNAME@
srcdir=@srcdir@
bindir=@bindir@
docdir=@docdir@
builddir=@builddir@
exec_prefix=@exec_prefix@
DESTDIR=
PACKAGE_TARNAME=@PACKAGE_TARNAME@
vpath %.c $(srcdir)
vpath %.cpp $(srcdir)
vpath %.hpp $(srcdir)
vpath %.o $(builddir)
vpath %.d $(builddir)
VPATH = $(srcdir)
+BUILD_MAKEFILE=$(builddir)/Makefile
+
MODULES := @modules@
@glut_backend_stmt@SkywaysGlut_BINARY := skyways.glut
@qt_backend_stmt@SkywaysQt_BINARY := skyways.qt
@sdl_backend_stmt@SkywaysSdl_BINARY := skyways.sdl
CFLAGS := @CFLAGS@ -Wall $(CFLAGS)
CXXFLAGS := @CXXFLAGS@ -Wall $(CXXFLAGS)
CPPFLAGS := @CPPFLAGS@ -Wall @FTGL_CFLAGS@ @BOOST_CPPFLAGS@ -I@top_srcdir@/src -DDATADIR='"$(pkgdatadir)"' $(CPPFLAGS)
LDFLAGS := @LDFLAGS@ @BOOST_LDFLAGS@ $(LDFLAGS)
LIBS := @LIBS@ @FTGL_LIBS@ @BOOST_FILESYSTEM_LIB@ $(LIBS)
@glut_backend_stmt@SkywaysGlut_LIBS=@GLUT_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
@qt_backend_stmt@SkywaysQt_CPPFLAGS=-D_REENTRANT -DQT_NO_DEBUG -DQT_OPENGL_LIB -DQT_GUI_LIB -DQT_CORE_LIB @QT_CFLAGS@
@qt_backend_stmt@SkywaysQt_LIBS=@QT_LIBS@
@qt_backend_stmt@SkywaysQt_OBJECTS=src/backends/moc_qtwindow_SkywaysQt.o
@sdl_backend_stmt@SkywaysSdl_CPPFLAGS=@SDL_CFLAGS@
@sdl_backend_stmt@SkywaysSdl_LIBS=@SDL_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
ADDON_MK := Dirs C_Cpp Dist
MK_INCLUDE:=1
include $(srcdir)/mk/Build.mk
diff --git a/mk/C_Cpp.mk b/mk/C_Cpp.mk
index d3e4099..bfec4d8 100644
--- a/mk/C_Cpp.mk
+++ b/mk/C_Cpp.mk
@@ -1,77 +1,77 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
DEPDIR?=.deps
#{{{ per-directory-and-module source listing hooks
define csrcall_moddir_tpl
CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
endef
define cxxsrcall_moddir_tpl
CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
endef
define hdrall_moddir_tpl
HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
endef
define objects_moddir_tpl
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
endef
MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) csrcall cxxsrcall hdrall objects
#}}}
#{{{ per-module compilation flag hooks
define FLAGS_module_tpl
$$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
endef
define C_module_tpl
%_$(1).o: %.c
$$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define CXX_module_tpl
%_$(1).o: %.cpp
$$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define LD_module_tpl
$$($(1)_BINARY): $$(OBJECTS_$(1))
$$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
endef
MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS C CXX LD
#}}}
#{{{ post-module combination lists
define targets_postmod_tpl
OBJECTS:=$$(foreach mod,$(MODULES),$$(OBJECTS_$$(mod)))
BINARIES:=$$(foreach mod,$(MODULES),$$($$(mod)_BINARY))
#DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
CLEAN:=$$(CLEAN) $$(OBJECTS) $$(BINARIES) $$(DEPENDS)
endef
POSTMOD_TEMPLATES := $(POSTMOD_TEMPLATES) targets
#}}}
#{{{ dependency rule
define depends_rules_tpl
-include $$(DEPENDS)
all: $$(BINARIES)
- $(OBJECTS): build-dirs deps-dirs
- .PHONY: build-dirs deps-dirs
- build-dirs:
- mkdir -p $(patsubst %,$(builddir)/%,$(DIRECTORIES))
- deps-dirs:
- mkdir -p $(patsubst %,$(builddir)/%/$(DEPDIR),$(DIRECTORIES))
+ .PHONY: deps-dirs
+ $$(builddir)/.deps-dirs-stamp: $$(builddir)/.build-dirs-stamp
+ mkdir -p $$(patsubst %,$$(builddir)/%/$$(DEPDIR),$$(DIRECTORIES))
+ touch $$@
+ deps-dirs: $$(builddir)/.deps-dirs-stamp
+ $$(OBJECTS): $$(builddir)/.deps-dirs-stamp
endef
RULES_TEMPLATES := $(RULES_TEMPATE) depends
#}}}
diff --git a/mk/Dirs.mk b/mk/Dirs.mk
index 9de91ec..4379242 100644
--- a/mk/Dirs.mk
+++ b/mk/Dirs.mk
@@ -1,69 +1,82 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
#{{{ directory enter hooks
define checkdir_enter_tpl
$$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
endef
define enter_enter_tpl
supsp := $$(sp)
sp := $$(lastsp).x
lastsp := $$(sp)
sp_list := $$(sp_list) $$(sp)
dirstack_$$(sp) := $$(d)
d := $$(directory)
DIRECTORIES := $$(DIRECTORIES) $$(d)
endef
define subdirs_enter_tpl
SUBDIRS :=
endef
define enter_directory
$(foreach tpl,checkdir enter subdirs $(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
endef
#}}}
#{{{ directory leave hooks
define include_subdir
directory := $$(d)/$(1)
include $$(srcdir)/$$(directory)/Dir.mk
endef
define include_subdir_list
$$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
endef
define subdirs_leave_tpl
SUBDIRS_$$(sp) := $$(SUBDIRS)
$$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
SUBDIRS :=
endef
define leave_leave_tpl
d := $$(dirstack_$$(sp))
sp := $$(supsp)
endef
define leave_directory
$(foreach tpl,$(LEAVE_TEMPLATES) subdirs leave,$$(eval $$(call $(tpl)_leave_tpl)))
endef
#}}}
#{{{ hook top-level directory into loading
define dirs_config_tpl
directory := .
include $(srcdir)/Dir.mk
endef
CONFIG_TEMPLATES := dirs $(CONFIG_TEMPLATES)
#}}}
#{{{ create hook mechanism for per-module-and-dir config hooks
define process_module_directory
$$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl)_moddir_tpl,$(1),$(2))))
endef
define dirs_module_tpl
$$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
endef
MOD_TEMPLATES := dirs $(MOD_TEMPLATES)
#}}}
+#{{{ rule hook to create build directories
+
+define builddirs_rules_tpl
+ $$(builddir)/.build-dirs-stamp:
+ mkdir -p $$(patsubst %,$$(builddir)/%,$$(DIRECTORIES))
+ touch $$@
+ .PHONY: build-dirs
+ build-dirs: $$(builddir)/.build-dirs-stamp
+ $$(BUILD_MAKEFILE): build-dirs
+endef
+RULES_TEMPLATES := $(RULES_TEMPLATES) builddirs
+
+#}}}
|
devnev/skyways
|
8063db79f27f29282adc3ac7c02eb4702dc4c013
|
Properly modularized clean rule.
|
diff --git a/mk/Build.mk b/mk/Build.mk
index a05f374..d88bf95 100644
--- a/mk/Build.mk
+++ b/mk/Build.mk
@@ -1,37 +1,34 @@
#!/usr/bin/make -f
default: all
MK_INCLUDE:=1
SRCDIR:=.
BUILDDIR:=.
-DEPDIR:=.deps
ADDONDIR:=$(SRCDIR)/mk
include $(BUILDDIR)/Config.mk
$(foreach MK,$(ADDON_MK),$(eval include $(ADDONDIR)/$(MK).mk))
define config_top_tpl
$$(foreach tpl,$$(CONFIG_TEMPLATES),$$(eval $$(call $$(tpl)_config_tpl)))
endef
define post_config_top_tpl
$$(foreach tpl,$$(POSTCONF_TEMPLATES),$$(eval $$(call $$(tpl)_postconfig_tpl)))
endef
define modules_top_tpl
$$(foreach mod,$(MODULES),$$(foreach tpl,$$(MOD_TEMPLATES),$$(eval $$(call $$(tpl)_module_tpl,$$(mod)))))
endef
define post_modules_top_tpl
$$(foreach tpl,$$(POSTMOD_TEMPLATES),$$(eval $$(call $$(tpl)_postmod_tpl)))
endef
define rules_top_tpl
$$(foreach tpl,$$(RULES_TEMPLATES),$$(eval $$(call $$(tpl)_rules_tpl)))
endef
TOP_TEMPLATES := config post_config modules post_modules rules
$(foreach tpl,$(TOP_TEMPLATES),$(eval $(call $(tpl)_top_tpl)))
-CLEAN:=$(CLEAN) $(OBJECTS) $(BINARIES) $(DEPENDS)
-
.PHONY: default all clean
clean:
- rm -f $(CLEAN) || true
+ $(if $(CLEAN),rm -f $(CLEAN) || true,@:)
diff --git a/mk/C_Cpp.mk b/mk/C_Cpp.mk
index d7c3779..fb23ef2 100644
--- a/mk/C_Cpp.mk
+++ b/mk/C_Cpp.mk
@@ -1,81 +1,83 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
ifndef CCPP_MK_INCLUDED
CCPP_MK_INCLUDED := 1
include $(ADDONDIR)/Dirs.mk
+DEPDIR?=.deps
#{{{ per-directory-and-module source listing hooks
define csrcall_moddir_tpl
CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
endef
define cxxsrcall_moddir_tpl
CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
endef
define hdrall_moddir_tpl
HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
endef
define objects_moddir_tpl
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
endef
MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) csrcall cxxsrcall hdrall objects
#}}}
#{{{ per-module compilation flag hooks
define FLAGS_module_tpl
$$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
endef
define C_module_tpl
%_$(1).o: %.c
$$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define CXX_module_tpl
%_$(1).o: %.cpp
$$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define LD_module_tpl
$$($(1)_BINARY): $$(OBJECTS_$(1))
$$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
endef
MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS C CXX LD
#}}}
#{{{ post-module combination lists
define targets_postmod_tpl
OBJECTS:=$$(foreach mod,$(MODULES),$$(OBJECTS_$$(mod)))
BINARIES:=$$(foreach mod,$(MODULES),$$($$(mod)_BINARY))
#DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
+ CLEAN:=$$(CLEAN) $$(OBJECTS) $$(BINARIES) $$(DEPENDS)
endef
POSTMOD_TEMPLATES := $(POSTMOD_TEMPLATES) targets
#}}}
#{{{ dependency rule
define depends_rules_tpl
-include $$(DEPENDS)
all: $$(BINARIES)
$(OBJECTS): build-dirs deps-dirs
.PHONY: build-dirs deps-dirs
build-dirs:
mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES))
deps-dirs:
mkdir -p $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
endef
RULES_TEMPLATES := $(RULES_TEMPATE) depends
#}}}
endif
|
devnev/skyways
|
4144b5b44b1de7d7907e11b5a7b92c452325a56f
|
Fixed generating build directory tree structure.
|
diff --git a/mk/Build.mk b/mk/Build.mk
index 9546777..a05f374 100644
--- a/mk/Build.mk
+++ b/mk/Build.mk
@@ -1,42 +1,37 @@
#!/usr/bin/make -f
default: all
MK_INCLUDE:=1
SRCDIR:=.
BUILDDIR:=.
DEPDIR:=.deps
ADDONDIR:=$(SRCDIR)/mk
include $(BUILDDIR)/Config.mk
$(foreach MK,$(ADDON_MK),$(eval include $(ADDONDIR)/$(MK).mk))
define config_top_tpl
$$(foreach tpl,$$(CONFIG_TEMPLATES),$$(eval $$(call $$(tpl)_config_tpl)))
endef
define post_config_top_tpl
$$(foreach tpl,$$(POSTCONF_TEMPLATES),$$(eval $$(call $$(tpl)_postconfig_tpl)))
endef
define modules_top_tpl
$$(foreach mod,$(MODULES),$$(foreach tpl,$$(MOD_TEMPLATES),$$(eval $$(call $$(tpl)_module_tpl,$$(mod)))))
endef
define post_modules_top_tpl
$$(foreach tpl,$$(POSTMOD_TEMPLATES),$$(eval $$(call $$(tpl)_postmod_tpl)))
endef
define rules_top_tpl
$$(foreach tpl,$$(RULES_TEMPLATES),$$(eval $$(call $$(tpl)_rules_tpl)))
endef
TOP_TEMPLATES := config post_config modules post_modules rules
$(foreach tpl,$(TOP_TEMPLATES),$(eval $(call $(tpl)_top_tpl)))
CLEAN:=$(CLEAN) $(OBJECTS) $(BINARIES) $(DEPENDS)
-.PHONY: default all clean force
+.PHONY: default all clean
clean:
rm -f $(CLEAN) || true
-force:
- @/bin/true
-
-$(SRCDIR)/mk/Build.mk: force
- @mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES)) $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
diff --git a/mk/C_Cpp.mk b/mk/C_Cpp.mk
index d837703..d7c3779 100644
--- a/mk/C_Cpp.mk
+++ b/mk/C_Cpp.mk
@@ -1,76 +1,81 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
ifndef CCPP_MK_INCLUDED
CCPP_MK_INCLUDED := 1
include $(ADDONDIR)/Dirs.mk
#{{{ per-directory-and-module source listing hooks
define csrcall_moddir_tpl
CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
endef
define cxxsrcall_moddir_tpl
CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
endef
define hdrall_moddir_tpl
HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
endef
define objects_moddir_tpl
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
endef
MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) csrcall cxxsrcall hdrall objects
#}}}
#{{{ per-module compilation flag hooks
define FLAGS_module_tpl
$$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
endef
define C_module_tpl
%_$(1).o: %.c
$$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define CXX_module_tpl
%_$(1).o: %.cpp
$$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define LD_module_tpl
$$($(1)_BINARY): $$(OBJECTS_$(1))
$$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
endef
MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS C CXX LD
#}}}
#{{{ post-module combination lists
define targets_postmod_tpl
OBJECTS:=$$(foreach mod,$(MODULES),$$(OBJECTS_$$(mod)))
BINARIES:=$$(foreach mod,$(MODULES),$$($$(mod)_BINARY))
#DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
- -include $$(DEPENDS)
endef
POSTMOD_TEMPLATES := $(POSTMOD_TEMPLATES) targets
#}}}
#{{{ dependency rule
define depends_rules_tpl
-include $$(DEPENDS)
all: $$(BINARIES)
+ $(OBJECTS): build-dirs deps-dirs
+ .PHONY: build-dirs deps-dirs
+ build-dirs:
+ mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES))
+ deps-dirs:
+ mkdir -p $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
endef
RULES_TEMPLATES := $(RULES_TEMPATE) depends
#}}}
endif
|
devnev/skyways
|
a1c2de33367dd928dc57fcc32a40f2337861c665
|
Fixed docs installation.
|
diff --git a/Dir.mk b/Dir.mk
index 5fbff60..0461402 100644
--- a/Dir.mk
+++ b/Dir.mk
@@ -1,12 +1,12 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
$(eval $(call enter_directory))
DATA_$(sp) := $(d)/world $(d)/DejaVuSans.ttf
-DOC_$(sp) := $(d)/COPYING $(d)/README
+DOCS_$(sp) := $(d)/COPYING $(d)/README
SUBDIRS := blocks shaders src
$(eval $(call leave_directory))
# vim: ft=make
diff --git a/testbuild.sh b/testbuild.sh
index 873e06c..80b73ed 100755
--- a/testbuild.sh
+++ b/testbuild.sh
@@ -1,6 +1,6 @@
#!/bin/sh
mkdir -p build && \
cd build && \
../configure --prefix=`pwd` && \
make $MAKEOPTS pkgdatadir=`pwd` && \
- make $MAKEOPTS install pkgdatadir=`pwd`
+ make $MAKEOPTS install pkgdatadir=`pwd` docdir=`pwd`/doc
|
devnev/skyways
|
2ab7e04c220d59e60a95d3175f2455847b0e9a36
|
Added install targets.
|
diff --git a/Config.mk.in b/Config.mk.in
index 1f752b5..44b51e4 100644
--- a/Config.mk.in
+++ b/Config.mk.in
@@ -1,36 +1,42 @@
#!/usr/bin/make -f
# @configure_input@
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
prefix=@prefix@
datarootdir=@datarootdir@
datadir=@datadir@
-appdatadir=${datadir}/@PACKAGE_TARNAME@
+pkgdatadir=$(datadir)/@PACKAGE_TARNAME@
srcdir=@srcdir@
+bindir=@bindir@
+docdir=@docdir@
+exec_prefix=@exec_prefix@
+DESTDIR=
+PACKAGE_TARNAME=@PACKAGE_TARNAME@
vpath %.c @srcdir@
vpath %.cpp @srcdir@
vpath %.hpp @srcdir@
vpath %.o @builddir@
vpath %.d @builddir@
+VPATH = @srcdir@
MODULES := @modules@
@glut_backend_stmt@SkywaysGlut_BINARY := skyways.glut
@qt_backend_stmt@SkywaysQt_BINARY := skyways.qt
@sdl_backend_stmt@SkywaysSdl_BINARY := skyways.sdl
CFLAGS := @CFLAGS@ -Wall $(CFLAGS)
CXXFLAGS := @CXXFLAGS@ -Wall $(CXXFLAGS)
-CPPFLAGS := @CPPFLAGS@ -Wall @FTGL_CFLAGS@ @BOOST_CPPFLAGS@ -I@top_srcdir@/src -DDATADIR='"$(appdatadir)"' $(CPPFLAGS)
+CPPFLAGS := @CPPFLAGS@ -Wall @FTGL_CFLAGS@ @BOOST_CPPFLAGS@ -I@top_srcdir@/src -DDATADIR='"$(pkgdatadir)"' $(CPPFLAGS)
LDFLAGS := @LDFLAGS@ @BOOST_LDFLAGS@ $(LDFLAGS)
LIBS := @LIBS@ @FTGL_LIBS@ @BOOST_FILESYSTEM_LIB@ $(LIBS)
@glut_backend_stmt@SkywaysGlut_LIBS=@GLUT_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
@qt_backend_stmt@SkywaysQt_CPPFLAGS=-D_REENTRANT -DQT_NO_DEBUG -DQT_OPENGL_LIB -DQT_GUI_LIB -DQT_CORE_LIB @QT_CFLAGS@
@qt_backend_stmt@SkywaysQt_LIBS=@QT_LIBS@
@qt_backend_stmt@SkywaysQt_OBJECTS=src/backends/moc_qtwindow_SkywaysQt.o
@sdl_backend_stmt@SkywaysSdl_CPPFLAGS=@SDL_CFLAGS@
@sdl_backend_stmt@SkywaysSdl_LIBS=@SDL_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
-ADDON_MK := Dirs C_Cpp
+ADDON_MK := Dirs C_Cpp Dist
diff --git a/Dir.mk b/Dir.mk
index 6bccc68..5fbff60 100644
--- a/Dir.mk
+++ b/Dir.mk
@@ -1,10 +1,12 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
$(eval $(call enter_directory))
-SUBDIRS := src
+DATA_$(sp) := $(d)/world $(d)/DejaVuSans.ttf
+DOC_$(sp) := $(d)/COPYING $(d)/README
+SUBDIRS := blocks shaders src
$(eval $(call leave_directory))
# vim: ft=make
diff --git a/blocks/Dir.mk b/blocks/Dir.mk
new file mode 100644
index 0000000..e129323
--- /dev/null
+++ b/blocks/Dir.mk
@@ -0,0 +1,10 @@
+#!/usr/bin/make -f
+
+$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
+$(eval $(call enter_directory))
+
+DATA_$(sp) := $(d)/cube $(d)/flat $(d)/tunnel
+
+$(eval $(call leave_directory))
+
+# vim: ft=make
diff --git a/mk/Dirs.mk b/mk/Dirs.mk
index ef89213..c0407f2 100644
--- a/mk/Dirs.mk
+++ b/mk/Dirs.mk
@@ -1,78 +1,74 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
ifndef DIRS_MK_INCLUDED
DIRS_MK_INCLUDED := 1
#{{{ directory enter hooks
define checkdir_enter_tpl
$$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
endef
define enter_enter_tpl
supsp := $$(sp)
sp := $$(lastsp).x
lastsp := $$(sp)
sp_list := $$(sp_list) $$(sp)
dirstack_$$(sp) := $$(d)
d := $$(directory)
DIRECTORIES := $$(DIRECTORIES) $$(d)
endef
define subdirs_enter_tpl
SUBDIRS :=
endef
define enter_directory
$(foreach tpl,checkdir enter subdirs $(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
endef
#}}}
#{{{ directory leave hooks
-define addextras_leave_tpl
- DATA := $(DATA) $(DATA_$(sp))
- DOC := $(DOC) $(DOC_$(sp))
-endef
define include_subdir
directory := $$(d)/$(1)
include $$(SRCDIR)/$$(directory)/Dir.mk
endef
define include_subdir_list
$$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
endef
define subdirs_leave_tpl
SUBDIRS_$$(sp) := $$(SUBDIRS)
$$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
SUBDIRS :=
endef
define leave_leave_tpl
d := $$(dirstack_$$(sp))
sp := $$(supsp)
endef
define leave_directory
$(foreach tpl,$(LEAVE_TEMPLATES) subdirs leave,$$(eval $$(call $(tpl)_leave_tpl)))
endef
#}}}
#{{{ hook top-level directory into loading
define dirs_config_tpl
directory := .
include $(SRCDIR)/Dir.mk
endef
CONFIG_TEMPLATES := dirs $(CONFIG_TEMPLATES)
#}}}
#{{{ create hook mechanism for per-module-and-dir config hooks
define process_module_directory
$$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl)_moddir_tpl,$(1),$(2))))
endef
define dirs_module_tpl
$$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
endef
MOD_TEMPLATES := dirs $(MOD_TEMPLATES)
#}}}
endif
diff --git a/mk/Dist.mk b/mk/Dist.mk
new file mode 100644
index 0000000..54c4dd6
--- /dev/null
+++ b/mk/Dist.mk
@@ -0,0 +1,57 @@
+#!/usr/bin/make -f
+
+$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
+
+ifndef DIST_MK_INCLUDED
+DIST_MK_INCLUDED := 1
+
+include $(ADDONDIR)/Dirs.mk
+
+install = install
+install_dir = $(install) -d
+install_data = $(install) -c -m 644
+install_doc = $(install_data)
+install_program = $(install) -c
+
+define distfiles_leave_tpl
+ DOCS:=$$(DOCS)$$(if $$(DOCS_$$(sp)), $$(DOCS_$$(sp)),)
+ DATA:=$$(DATA)$$(if $$(DATA_$$(sp)), $$(DATA_$$(sp)),)
+ $$(if $$(DATA_$$(sp)),$$(eval DATA_DIRS:=$$(DATA_DIRS) $$(d)))
+ NOINST:=$$(NOINST)$$(if $$(NOINST_$$(sp)), $$(NOINST_$$(sp)),)
+endef
+LEAVE_TEMPLATES := $(LEAVE_TEMPLATES) distfiles
+
+define install_doc_rule
+ .PHONY: $$(DESTDIR)$$(docdir)/$$(notdir $(1))
+ $$(DESTDIR)$$(docdir)/$$(notdir $(1)): $(1) install-doc-dirs
+ $$(install_doc) $$< $$@
+endef
+define install_data_rule
+ .PHONY: $$(DESTDIR)$$(pkgdatadir)/$(1)
+ $$(DESTDIR)$$(pkgdatadir)/$(1): $(1) install-data-dirs
+ $$(install_data) $$< $$@
+endef
+define install_prog_rule
+ .PHONY: $$(DESTDIR)$$(bindir)/$(1)
+ $$(DESTDIR)$$(bindir)/$(1): $(1) install-prog-dirs
+ $$(install_program) $$< $$@
+endef
+define install_rules_tpl
+ .PHONY: install-doc-dirs install-data-dirs install-prog-dirs install-dirs install-docs install-data install-progs install
+ install-doc-dirs:
+ $$(install_dir) $$(DESTDIR)$$(docdir)
+ install-docs: $$(foreach doc,$$(DOCS),$$(DESTDIR)$$(docdir)/$$(notdir $$(doc)))
+ $$(foreach doc,$$(DOCS),$$(eval $$(call install_doc_rule,$$(doc))))
+ install-data-dirs:
+ $$(install_dir) $$(addprefix $$(DESTDIR)$$(pkgdatadir)/,$$(DATA_DIRS))
+ install-data: $$(foreach data,$$(DATA),$$(DESTDIR)$$(pkgdatadir)/$$(data))
+ $$(foreach data,$$(DATA),$$(eval $$(call install_data_rule,$$(data))))
+ install-prog-dirs:
+ $$(install_dir) $$(DESTDIR)$$(bindir)
+ install-progs: $$(foreach bin,$$(BINARIES),$$(DESTDIR)$$(bindir)/$$(notdir $$(bin)))
+ $$(foreach bin,$$(BINARIES),$$(eval $$(call install_prog_rule,$$(bin))))
+ install: install-progs install-data install-docs
+endef
+RULES_TEMPLATES := $(RULES_TEMPLATES) install
+
+endif
diff --git a/shaders/Dir.mk b/shaders/Dir.mk
new file mode 100644
index 0000000..fefe00e
--- /dev/null
+++ b/shaders/Dir.mk
@@ -0,0 +1,10 @@
+#!/usr/bin/make -f
+
+$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
+$(eval $(call enter_directory))
+
+DATA_$(sp) := $(d)/shader.glslf $(d)/shader.glslv
+
+$(eval $(call leave_directory))
+
+# vim: ft=make
diff --git a/testbuild.sh b/testbuild.sh
index ea552c2..873e06c 100755
--- a/testbuild.sh
+++ b/testbuild.sh
@@ -1,5 +1,6 @@
#!/bin/sh
mkdir -p build && \
cd build && \
../configure --prefix=`pwd` && \
- make appdatadir=.
+ make $MAKEOPTS pkgdatadir=`pwd` && \
+ make $MAKEOPTS install pkgdatadir=`pwd`
|
devnev/skyways
|
22756297c3bcb23e5c7f06661c1127445e7ca9f8
|
Fixed dir enter/leave hooks to force ordering.
|
diff --git a/mk/Dirs.mk b/mk/Dirs.mk
index 33ce990..ef89213 100644
--- a/mk/Dirs.mk
+++ b/mk/Dirs.mk
@@ -1,80 +1,78 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
ifndef DIRS_MK_INCLUDED
DIRS_MK_INCLUDED := 1
#{{{ directory enter hooks
define checkdir_enter_tpl
$$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
endef
define enter_enter_tpl
supsp := $$(sp)
sp := $$(lastsp).x
lastsp := $$(sp)
sp_list := $$(sp_list) $$(sp)
dirstack_$$(sp) := $$(d)
d := $$(directory)
DIRECTORIES := $$(DIRECTORIES) $$(d)
endef
define subdirs_enter_tpl
SUBDIRS :=
endef
-ENTER_TEMPLATES := checkdir enter subdirs $(ENTER_TEMPLATES)
define enter_directory
- $(foreach tpl,$(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
+ $(foreach tpl,checkdir enter subdirs $(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
endef
#}}}
#{{{ directory leave hooks
define addextras_leave_tpl
DATA := $(DATA) $(DATA_$(sp))
DOC := $(DOC) $(DOC_$(sp))
endef
define include_subdir
directory := $$(d)/$(1)
include $$(SRCDIR)/$$(directory)/Dir.mk
endef
define include_subdir_list
$$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
endef
define subdirs_leave_tpl
SUBDIRS_$$(sp) := $$(SUBDIRS)
$$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
SUBDIRS :=
endef
define leave_leave_tpl
d := $$(dirstack_$$(sp))
sp := $$(supsp)
endef
-LEAVE_TEMPLATES := $(LEAVE_TEMPLATES) addextras subdirs leave
define leave_directory
- $(foreach tpl,$(LEAVE_TEMPLATES),$$(eval $$(call $(tpl)_leave_tpl)))
+ $(foreach tpl,$(LEAVE_TEMPLATES) subdirs leave,$$(eval $$(call $(tpl)_leave_tpl)))
endef
#}}}
#{{{ hook top-level directory into loading
define dirs_config_tpl
directory := .
include $(SRCDIR)/Dir.mk
endef
CONFIG_TEMPLATES := dirs $(CONFIG_TEMPLATES)
#}}}
#{{{ create hook mechanism for per-module-and-dir config hooks
define process_module_directory
$$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl)_moddir_tpl,$(1),$(2))))
endef
define dirs_module_tpl
$$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
endef
MOD_TEMPLATES := dirs $(MOD_TEMPLATES)
#}}}
endif
|
devnev/skyways
|
e1fc574a68a39632d7df881b2cffd6486cb66b16
|
Exclude system headers from generated dep makefiles.
|
diff --git a/mk/C_Cpp.mk b/mk/C_Cpp.mk
index 4d016ae..d837703 100644
--- a/mk/C_Cpp.mk
+++ b/mk/C_Cpp.mk
@@ -1,76 +1,76 @@
#!/usr/bin/make -f
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
ifndef CCPP_MK_INCLUDED
CCPP_MK_INCLUDED := 1
include $(ADDONDIR)/Dirs.mk
#{{{ per-directory-and-module source listing hooks
define csrcall_moddir_tpl
CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
endef
define cxxsrcall_moddir_tpl
CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
endef
define hdrall_moddir_tpl
HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
endef
define objects_moddir_tpl
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
endef
MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) csrcall cxxsrcall hdrall objects
#}}}
#{{{ per-module compilation flag hooks
define FLAGS_module_tpl
$$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
endef
define C_module_tpl
%_$(1).o: %.c
- $$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
+ $$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define CXX_module_tpl
%_$(1).o: %.cpp
- $$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
+ $$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MMD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define LD_module_tpl
$$($(1)_BINARY): $$(OBJECTS_$(1))
$$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
endef
MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS C CXX LD
#}}}
#{{{ post-module combination lists
define targets_postmod_tpl
OBJECTS:=$$(foreach mod,$(MODULES),$$(OBJECTS_$$(mod)))
BINARIES:=$$(foreach mod,$(MODULES),$$($$(mod)_BINARY))
#DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
-include $$(DEPENDS)
endef
POSTMOD_TEMPLATES := $(POSTMOD_TEMPLATES) targets
#}}}
#{{{ dependency rule
define depends_rules_tpl
-include $$(DEPENDS)
all: $$(BINARIES)
endef
RULES_TEMPLATES := $(RULES_TEMPATE) depends
#}}}
endif
|
devnev/skyways
|
500cf8dced8923e7daf361cb32713e8f63ff46c7
|
Split Rules.mk into multiple modules.
|
diff --git a/Config.mk.in b/Config.mk.in
index 495edce..1f752b5 100644
--- a/Config.mk.in
+++ b/Config.mk.in
@@ -1,34 +1,36 @@
#!/usr/bin/make -f
# @configure_input@
$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
prefix=@prefix@
datarootdir=@datarootdir@
datadir=@datadir@
appdatadir=${datadir}/@PACKAGE_TARNAME@
srcdir=@srcdir@
vpath %.c @srcdir@
vpath %.cpp @srcdir@
vpath %.hpp @srcdir@
vpath %.o @builddir@
vpath %.d @builddir@
MODULES := @modules@
@glut_backend_stmt@SkywaysGlut_BINARY := skyways.glut
@qt_backend_stmt@SkywaysQt_BINARY := skyways.qt
@sdl_backend_stmt@SkywaysSdl_BINARY := skyways.sdl
CFLAGS := @CFLAGS@ -Wall $(CFLAGS)
CXXFLAGS := @CXXFLAGS@ -Wall $(CXXFLAGS)
CPPFLAGS := @CPPFLAGS@ -Wall @FTGL_CFLAGS@ @BOOST_CPPFLAGS@ -I@top_srcdir@/src -DDATADIR='"$(appdatadir)"' $(CPPFLAGS)
LDFLAGS := @LDFLAGS@ @BOOST_LDFLAGS@ $(LDFLAGS)
LIBS := @LIBS@ @FTGL_LIBS@ @BOOST_FILESYSTEM_LIB@ $(LIBS)
@glut_backend_stmt@SkywaysGlut_LIBS=@GLUT_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
@qt_backend_stmt@SkywaysQt_CPPFLAGS=-D_REENTRANT -DQT_NO_DEBUG -DQT_OPENGL_LIB -DQT_GUI_LIB -DQT_CORE_LIB @QT_CFLAGS@
@qt_backend_stmt@SkywaysQt_LIBS=@QT_LIBS@
@qt_backend_stmt@SkywaysQt_OBJECTS=src/backends/moc_qtwindow_SkywaysQt.o
@sdl_backend_stmt@SkywaysSdl_CPPFLAGS=@SDL_CFLAGS@
@sdl_backend_stmt@SkywaysSdl_LIBS=@SDL_LIBS@ @BOOST_PROGRAM_OPTIONS_LIB@
+
+ADDON_MK := Dirs C_Cpp
diff --git a/Makefile.in b/Makefile.in
index a65567c..74483d9 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -1,10 +1,10 @@
#!/usr/bin/make
# @configure_input@
#
default: all
@:
.PHONY: default
%::
- $(MAKE) -r --no-print-directory -f @top_srcdir@/Rules.mk BUILDDIR=@builddir@ SRCDIR=@top_srcdir@ $@
+ $(MAKE) -r --no-print-directory -f @top_srcdir@/mk/Build.mk BUILDDIR=@builddir@ SRCDIR=@top_srcdir@ $@
diff --git a/Rules.mk b/Rules.mk
deleted file mode 100644
index f0dd5a8..0000000
--- a/Rules.mk
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/make
-
-default: all
-
-MK_INCLUDE:=1
-SRCDIR:=.
-BUILDDIR:=.
-DEPDIR:=.deps
-
-#{{{ load configuration
-#{{{ include build configuration
-
-include $(BUILDDIR)/Config.mk
-
-#}}}
-#{{{ directory enter and leave hooks
-
-define checkdir_enter_tpl
- $$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
-endef
-define enter_enter_tpl
- supsp := $$(sp)
- sp := $$(lastsp).x
- lastsp := $$(sp)
- sp_list := $$(sp_list) $$(sp)
- dirstack_$$(sp) := $$(d)
- d := $$(directory)
- DIRECTORIES := $$(DIRECTORIES) $$(d)
-endef
-define subdirs_enter_tpl
- SUBDIRS :=
-endef
-ENTER_TEMPLATES := checkdir enter subdirs $(ENTER_TEMPLATES)
-define enter_directory
- $(foreach tpl,$(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
-endef
-
-define include_subdir
- directory := $$(d)/$(1)
- include $$(SRCDIR)/$$(directory)/Dir.mk
-endef
-define include_subdir_list
- $$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
-endef
-define subdirs_leave_tpl
- SUBDIRS_$$(sp) := $$(SUBDIRS)
- $$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
- SUBDIRS :=
-endef
-define leave_leave_tpl
- d := $$(dirstack_$$(sp))
- sp := $$(supsp)
-endef
-LEAVE_TEMPLATES := $(LEAVE_TEMPLATES) subdirs leave
-define leave_directory
- $(foreach tpl,$(LEAVE_TEMPLATES),$$(eval $$(call $(tpl)_leave_tpl)))
-endef
-
-#}}}
-#{{{ include top directory configuration
-
-directory := .
-include $(SRCDIR)/Dir.mk
-
-#}}}
-#}}}
-#{{{ process configuration
-#{{{ per-directory-and-module hooks
-
-define csrcall_moddir_tpl
- CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
- CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
-endef
-define cxxsrcall_moddir_tpl
- CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
- CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
-endef
-define hdrall_moddir_tpl
- HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
- HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
-endef
-define objects_moddir_tpl
- OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
- OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
- OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
-endef
-MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) \
- csrcall_moddir_tpl cxxsrcall_moddir_tpl hdrall_moddir_tpl objects_moddir_tpl
-define process_module_directory
- $$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl),$(1),$(2))))
-endef
-
-#}}}
-#{{{ per-modules hooks
-
-define FLAGS_mod_template
- $$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
-endef
-define C_mod_template
- %_$(1).o: %.c
- $$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
- mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
-endef
-define CXX_mod_template
- %_$(1).o: %.cpp
- $$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
- mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
-endef
-define LD_mod_template
- $$($(1)_BINARY): $$(OBJECTS_$(1))
- $$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
-endef
-MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS_mod_template C_mod_template CXX_mod_template LD_mod_template
-define process_module
- $$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
- $$(foreach tpl,$$(MOD_TEMPLATES),$$(eval $$(call $$(tpl),$(1))))
-endef
-
-#}}}
-#{{{ process module list
-
-define process_modules
- $$(foreach mod,$(1),$$(eval $$(call process_module,$$(mod))))
- OBJECTS:=$$(foreach mod,$(1),$$(OBJECTS_$$(mod)))
- BINARIES:=$$(foreach mod,$(1),$$($$(mod)_BINARY))
- #DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
- DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
-endef
-$(eval $(call process_modules,$(MODULES)))
-
-#}}}
-#}}}
-#{{{ misc
-
--include $(DEPENDS)
-
-CLEAN:=$(CLEAN) $(OBJECTS) $(BINARIES) $(DEPENDS)
-
-.PHONY: default all
-all: $(BINARIES)
-
-.PHONY: clean
-clean:
- rm -f $(CLEAN) || true
-
-.PHONY: force
-force:
- @/bin/true
-
-$(SRCDIR)/Rules.mk: force
- @mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES)) $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
-
-#}}}
-# vim: fdm=marker
diff --git a/mk/Build.mk b/mk/Build.mk
new file mode 100644
index 0000000..9546777
--- /dev/null
+++ b/mk/Build.mk
@@ -0,0 +1,42 @@
+#!/usr/bin/make -f
+
+default: all
+
+MK_INCLUDE:=1
+SRCDIR:=.
+BUILDDIR:=.
+DEPDIR:=.deps
+ADDONDIR:=$(SRCDIR)/mk
+
+include $(BUILDDIR)/Config.mk
+$(foreach MK,$(ADDON_MK),$(eval include $(ADDONDIR)/$(MK).mk))
+
+define config_top_tpl
+ $$(foreach tpl,$$(CONFIG_TEMPLATES),$$(eval $$(call $$(tpl)_config_tpl)))
+endef
+define post_config_top_tpl
+ $$(foreach tpl,$$(POSTCONF_TEMPLATES),$$(eval $$(call $$(tpl)_postconfig_tpl)))
+endef
+define modules_top_tpl
+ $$(foreach mod,$(MODULES),$$(foreach tpl,$$(MOD_TEMPLATES),$$(eval $$(call $$(tpl)_module_tpl,$$(mod)))))
+endef
+define post_modules_top_tpl
+ $$(foreach tpl,$$(POSTMOD_TEMPLATES),$$(eval $$(call $$(tpl)_postmod_tpl)))
+endef
+define rules_top_tpl
+ $$(foreach tpl,$$(RULES_TEMPLATES),$$(eval $$(call $$(tpl)_rules_tpl)))
+endef
+TOP_TEMPLATES := config post_config modules post_modules rules
+$(foreach tpl,$(TOP_TEMPLATES),$(eval $(call $(tpl)_top_tpl)))
+
+CLEAN:=$(CLEAN) $(OBJECTS) $(BINARIES) $(DEPENDS)
+
+.PHONY: default all clean force
+clean:
+ rm -f $(CLEAN) || true
+
+force:
+ @/bin/true
+
+$(SRCDIR)/mk/Build.mk: force
+ @mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES)) $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
diff --git a/mk/C_Cpp.mk b/mk/C_Cpp.mk
new file mode 100644
index 0000000..4d016ae
--- /dev/null
+++ b/mk/C_Cpp.mk
@@ -0,0 +1,76 @@
+#!/usr/bin/make -f
+
+$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
+
+ifndef CCPP_MK_INCLUDED
+CCPP_MK_INCLUDED := 1
+
+include $(ADDONDIR)/Dirs.mk
+
+#{{{ per-directory-and-module source listing hooks
+
+define csrcall_moddir_tpl
+ CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
+ CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
+endef
+define cxxsrcall_moddir_tpl
+ CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
+ CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
+endef
+define hdrall_moddir_tpl
+ HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
+ HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
+endef
+define objects_moddir_tpl
+ OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
+ OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
+ OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
+endef
+MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) csrcall cxxsrcall hdrall objects
+
+#}}}
+#{{{ per-module compilation flag hooks
+
+define FLAGS_module_tpl
+ $$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
+endef
+define C_module_tpl
+ %_$(1).o: %.c
+ $$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
+ mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
+endef
+define CXX_module_tpl
+ %_$(1).o: %.cpp
+ $$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
+ mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
+endef
+define LD_module_tpl
+ $$($(1)_BINARY): $$(OBJECTS_$(1))
+ $$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
+endef
+MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS C CXX LD
+
+#}}}
+#{{{ post-module combination lists
+
+define targets_postmod_tpl
+ OBJECTS:=$$(foreach mod,$(MODULES),$$(OBJECTS_$$(mod)))
+ BINARIES:=$$(foreach mod,$(MODULES),$$($$(mod)_BINARY))
+ #DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
+ DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
+ -include $$(DEPENDS)
+endef
+POSTMOD_TEMPLATES := $(POSTMOD_TEMPLATES) targets
+
+#}}}
+#{{{ dependency rule
+
+define depends_rules_tpl
+ -include $$(DEPENDS)
+ all: $$(BINARIES)
+endef
+RULES_TEMPLATES := $(RULES_TEMPATE) depends
+
+#}}}
+
+endif
diff --git a/mk/Dirs.mk b/mk/Dirs.mk
new file mode 100644
index 0000000..33ce990
--- /dev/null
+++ b/mk/Dirs.mk
@@ -0,0 +1,80 @@
+#!/usr/bin/make -f
+
+$(if $(MK_INCLUDE),,$(error This makefile is meant for inclusion by other makefiles))
+
+ifndef DIRS_MK_INCLUDED
+DIRS_MK_INCLUDED := 1
+
+#{{{ directory enter hooks
+
+define checkdir_enter_tpl
+ $$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
+endef
+define enter_enter_tpl
+ supsp := $$(sp)
+ sp := $$(lastsp).x
+ lastsp := $$(sp)
+ sp_list := $$(sp_list) $$(sp)
+ dirstack_$$(sp) := $$(d)
+ d := $$(directory)
+ DIRECTORIES := $$(DIRECTORIES) $$(d)
+endef
+define subdirs_enter_tpl
+ SUBDIRS :=
+endef
+ENTER_TEMPLATES := checkdir enter subdirs $(ENTER_TEMPLATES)
+define enter_directory
+ $(foreach tpl,$(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
+endef
+
+#}}}
+#{{{ directory leave hooks
+
+define addextras_leave_tpl
+ DATA := $(DATA) $(DATA_$(sp))
+ DOC := $(DOC) $(DOC_$(sp))
+endef
+define include_subdir
+ directory := $$(d)/$(1)
+ include $$(SRCDIR)/$$(directory)/Dir.mk
+endef
+define include_subdir_list
+ $$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
+endef
+define subdirs_leave_tpl
+ SUBDIRS_$$(sp) := $$(SUBDIRS)
+ $$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
+ SUBDIRS :=
+endef
+define leave_leave_tpl
+ d := $$(dirstack_$$(sp))
+ sp := $$(supsp)
+endef
+LEAVE_TEMPLATES := $(LEAVE_TEMPLATES) addextras subdirs leave
+define leave_directory
+ $(foreach tpl,$(LEAVE_TEMPLATES),$$(eval $$(call $(tpl)_leave_tpl)))
+endef
+
+#}}}
+#{{{ hook top-level directory into loading
+
+define dirs_config_tpl
+ directory := .
+ include $(SRCDIR)/Dir.mk
+endef
+CONFIG_TEMPLATES := dirs $(CONFIG_TEMPLATES)
+
+#}}}
+#{{{ create hook mechanism for per-module-and-dir config hooks
+
+define process_module_directory
+ $$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl)_moddir_tpl,$(1),$(2))))
+endef
+define dirs_module_tpl
+ $$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
+endef
+MOD_TEMPLATES := dirs $(MOD_TEMPLATES)
+
+#}}}
+
+endif
|
devnev/skyways
|
2cf926930c8293c32c902f16e40486b98da95a52
|
Made Rules.mk more modular by allowing various hooks.
|
diff --git a/Rules.mk b/Rules.mk
index 9f3d662..f0dd5a8 100644
--- a/Rules.mk
+++ b/Rules.mk
@@ -1,116 +1,154 @@
+#!/usr/bin/make
default: all
MK_INCLUDE:=1
SRCDIR:=.
BUILDDIR:=.
DEPDIR:=.deps
+#{{{ load configuration
+#{{{ include build configuration
+
include $(BUILDDIR)/Config.mk
-define enter_directory
+#}}}
+#{{{ directory enter and leave hooks
+
+define checkdir_enter_tpl
$$(if $$(directory),,$$(error Set the `directory` variable before including this makefile in other makefiles))
+endef
+define enter_enter_tpl
supsp := $$(sp)
sp := $$(lastsp).x
lastsp := $$(sp)
sp_list := $$(sp_list) $$(sp)
dirstack_$$(sp) := $$(d)
d := $$(directory)
DIRECTORIES := $$(DIRECTORIES) $$(d)
endef
-
-define leave_directory
- SUBDIRS_$$(sp) := $$(SUBDIRS)
+define subdirs_enter_tpl
SUBDIRS :=
- $$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
- d := $$(dirstack_$$(sp))
- sp := $$(supsp)
+endef
+ENTER_TEMPLATES := checkdir enter subdirs $(ENTER_TEMPLATES)
+define enter_directory
+ $(foreach tpl,$(ENTER_TEMPLATES),$$(eval $$(call $(tpl)_enter_tpl)))
endef
define include_subdir
directory := $$(d)/$(1)
include $$(SRCDIR)/$$(directory)/Dir.mk
endef
-
define include_subdir_list
$$(foreach subdir,$(1),$$(eval $$(call include_subdir,$$(subdir))))
endef
+define subdirs_leave_tpl
+ SUBDIRS_$$(sp) := $$(SUBDIRS)
+ $$(if $$(SUBDIRS_$$(sp)),$$(eval $$(call include_subdir_list,$$(SUBDIRS_$$(sp)))),)
+ SUBDIRS :=
+endef
+define leave_leave_tpl
+ d := $$(dirstack_$$(sp))
+ sp := $$(supsp)
+endef
+LEAVE_TEMPLATES := $(LEAVE_TEMPLATES) subdirs leave
+define leave_directory
+ $(foreach tpl,$(LEAVE_TEMPLATES),$$(eval $$(call $(tpl)_leave_tpl)))
+endef
+
+#}}}
+#{{{ include top directory configuration
directory := .
include $(SRCDIR)/Dir.mk
+#}}}
+#}}}
+#{{{ process configuration
+#{{{ per-directory-and-module hooks
+
define csrcall_moddir_tpl
CSOURCES_$(1)_$(2):=$$(CSOURCES_$(1)_$(2)) $$(CSOURCES_$(2))
CSOURCES_$(1):=$$(CSOURCES_$(1)) $$(CSOURCES_$(1)_$(2))
endef
define cxxsrcall_moddir_tpl
CXXSOURCES_$(1)_$(2):=$$(CXXSOURCES_$(1)_$(2)) $$(CXXSOURCES_$(2))
CXXSOURCES_$(1):=$$(CXXSOURCES_$(1)) $$(CXXSOURCES_$(1)_$(2))
endef
define hdrall_moddir_tpl
HEADERS_$(1)_$(2):=$$(HEADERS_$(1)_$(2)) $$(HEADERS_$(2))
HEADERS_$(1):=$$(HEADERS_$(1)) $$(HEADERS_$(1)_$(2))
endef
define objects_moddir_tpl
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.c,%_$(1).o,$$(CSOURCES_$(1)_$(2)))
OBJECTS_$(1)_$(2):=$$(OBJECTS_$(1)_$(2)) $$(patsubst %.cpp,%_$(1).o,$$(CXXSOURCES_$(1)_$(2)))
OBJECTS_$(1):=$$(OBJECTS_$(1)) $$(OBJECTS_$(1)_$(2))
endef
MODDIR_TEMPLATES := $(MODDIR_TEMPLATES) \
csrcall_moddir_tpl cxxsrcall_moddir_tpl hdrall_moddir_tpl objects_moddir_tpl
-
define process_module_directory
$$(foreach tpl,$$(MODDIR_TEMPLATES),$$(eval $$(call $$(tpl),$(1),$(2))))
endef
+#}}}
+#{{{ per-modules hooks
+
define FLAGS_mod_template
$$(foreach flag,CFLAGS CXXFLAGS CPPFLAGS LDFLAGS LIBS,$$(eval $$(flag)_$(1):=$$($$(flag)) $$($(1)_$$(flag)) $$($$(flag)_$(1))))
endef
define C_mod_template
%_$(1).o: %.c
$$(CC) $$(CFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define CXX_mod_template
%_$(1).o: %.cpp
$$(CXX) $$(CXXFLAGS_$(1)) $$(CPPFLAGS_$(1)) -MT $$@ -MD -MP -MF $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td -c -o $$@ $$<
mv -f $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).Td $$(dir $$*)$$(DEPDIR)/$$(notdir $$*)_$(1).d
endef
define LD_mod_template
$$($(1)_BINARY): $$(OBJECTS_$(1))
$$(LINK.o) $$(LDFLAGS_$(1)) $$(LIBS_$(1)) $$^ $$(LOADLIBES) $$(LDLIBS) -o $$@
endef
MOD_TEMPLATES := $(MOD_TEMPLATES) FLAGS_mod_template C_mod_template CXX_mod_template LD_mod_template
-
define process_module
$$(foreach sp,$$(sp_list),$$(eval $$(call process_module_directory,$(1),$$(sp))))
$$(foreach tpl,$$(MOD_TEMPLATES),$$(eval $$(call $$(tpl),$(1))))
endef
+#}}}
+#{{{ process module list
+
define process_modules
$$(foreach mod,$(1),$$(eval $$(call process_module,$$(mod))))
OBJECTS:=$$(foreach mod,$(1),$$(OBJECTS_$$(mod)))
BINARIES:=$$(foreach mod,$(1),$$($$(mod)_BINARY))
#DEPENDS:=$$(foreach obj,$$(OBJECTS),$$(dir $$(obj))$$(DEPDIR)/$$(basename $$(obj)).d)
DEPENDS:=$$(join $$(dir $$(OBJECTS)),$$(addprefix $$(DEPDIR)/,$$(addsuffix .d,$$(basename $$(notdir $$(OBJECTS))))))
endef
-
$(eval $(call process_modules,$(MODULES)))
+
+#}}}
+#}}}
+#{{{ misc
+
-include $(DEPENDS)
CLEAN:=$(CLEAN) $(OBJECTS) $(BINARIES) $(DEPENDS)
.PHONY: default all
all: $(BINARIES)
.PHONY: clean
clean:
rm -f $(CLEAN) || true
.PHONY: force
force:
@/bin/true
$(SRCDIR)/Rules.mk: force
@mkdir -p $(patsubst %,$(BUILDDIR)/%,$(DIRECTORIES)) $(patsubst %,$(BUILDDIR)/%/$(DEPDIR),$(DIRECTORIES))
+#}}}
+# vim: fdm=marker
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.