repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/util/resources/wn/Examples.java
|
/**
* Java WordNet Library (JWNL)
* See the documentation for copyright information.
*
* @version 1.1
*/
package cc.mallet.util.resources.wn;
import net.didion.jwnl.JWNL;
import net.didion.jwnl.JWNLException;
import net.didion.jwnl.data.IndexWord;
import net.didion.jwnl.data.POS;
import net.didion.jwnl.data.PointerType;
import net.didion.jwnl.data.PointerUtils;
import net.didion.jwnl.data.list.PointerTargetNodeList;
import net.didion.jwnl.data.list.PointerTargetTree;
import net.didion.jwnl.data.relationship.AsymmetricRelationship;
import net.didion.jwnl.data.relationship.Relationship;
import net.didion.jwnl.data.relationship.RelationshipFinder;
import net.didion.jwnl.data.relationship.RelationshipList;
import net.didion.jwnl.dictionary.Dictionary;
import java.io.FileInputStream;
import java.util.Iterator;
/** A class to demonstrate the functionality of the JWNL package. */
public class Examples {
private static final String USAGE = "java Examples <properties file>";
public static void main(String[] args) {
if (args.length != 1) {
System.out.println(USAGE);
System.exit(-1);
}
String propsFile = args[0];
try {
// initialize JWNL (this must be done before JWNL can be used)
JWNL.initialize(new FileInputStream(propsFile));
new Examples().go();
} catch (Exception ex) {
ex.printStackTrace();
System.exit(-1);
}
}
private IndexWord ACCOMPLISH;
private IndexWord DOG;
private IndexWord CAT;
private IndexWord FUNNY;
private IndexWord DROLL;
private String MORPH_PHRASE = "running-away";
public Examples() throws JWNLException {
ACCOMPLISH = Dictionary.getInstance().getIndexWord(POS.VERB, "accomplish");
DOG = Dictionary.getInstance().getIndexWord(POS.NOUN, "dog");
CAT = Dictionary.getInstance().lookupIndexWord(POS.NOUN, "cat");
FUNNY = Dictionary.getInstance().lookupIndexWord(POS.ADJECTIVE, "funny");
DROLL = Dictionary.getInstance().lookupIndexWord(POS.ADJECTIVE, "droll");
}
public void go() throws JWNLException {
demonstrateMorphologicalAnalysis(MORPH_PHRASE);
demonstrateListOperation(ACCOMPLISH);
demonstrateTreeOperation(DOG);
demonstrateAsymmetricRelationshipOperation(DOG, CAT);
demonstrateSymmetricRelationshipOperation(FUNNY, DROLL);
}
private void demonstrateMorphologicalAnalysis(String phrase) throws JWNLException {
// "running-away" is kind of a hard case because it involves
// two words that are joined by a hyphen, and one of the words
// is not stemmed. So we have to both remove the hyphen and stem
// "running" before we get to an entry that is in WordNet
System.out.println("Base form for \"" + phrase + "\": " +
Dictionary.getInstance().lookupIndexWord(POS.VERB, phrase));
}
private void demonstrateListOperation(IndexWord word) throws JWNLException {
// Get all of the hypernyms (parents) of the first sense of <var>word</var>
PointerTargetNodeList hypernyms = PointerUtils.getInstance().getDirectHypernyms(word.getSense(1));
System.out.println("Direct hypernyms of \"" + word.getLemma() + "\":");
hypernyms.print();
}
private void demonstrateTreeOperation(IndexWord word) throws JWNLException {
// Get all the hyponyms (children) of the first sense of <var>word</var>
PointerTargetTree hyponyms = PointerUtils.getInstance().getHyponymTree(word.getSense(1));
System.out.println("Hyponyms of \"" + word.getLemma() + "\":");
hyponyms.print();
}
private void demonstrateAsymmetricRelationshipOperation(IndexWord start, IndexWord end) throws JWNLException {
// Try to find a relationship between the first sense of <var>start</var> and the first sense of <var>end</var>
RelationshipList list = RelationshipFinder.getInstance().findRelationships(start.getSense(1), end.getSense(1), PointerType.HYPERNYM);
System.out.println("Hypernym relationship between \"" + start.getLemma() + "\" and \"" + end.getLemma() + "\":");
for (Iterator itr = list.iterator(); itr.hasNext();) {
((Relationship) itr.next()).getNodeList().print();
}
System.out.println("Common Parent Index: " + ((AsymmetricRelationship) list.get(0)).getCommonParentIndex());
System.out.println("Depth: " + ((Relationship) list.get(0)).getDepth());
}
private void demonstrateSymmetricRelationshipOperation(IndexWord start, IndexWord end) throws JWNLException {
// find all synonyms that <var>start</var> and <var>end</var> have in common
RelationshipList list = RelationshipFinder.getInstance().findRelationships(start.getSense(1), end.getSense(1), PointerType.SIMILAR_TO);
System.out.println("Synonym relationship between \"" + start.getLemma() + "\" and \"" + end.getLemma() + "\":");
for (Iterator itr = list.iterator(); itr.hasNext();) {
((Relationship) itr.next()).getNodeList().print();
}
System.out.println("Depth: " + ((Relationship) list.get(0)).getDepth());
}
}
| 4,840 | 41.840708 | 137 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SimpleTagger.java
|
/* XXX Copyright (C) 2003 University of Pennsylvania.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Reader;
import java.util.ArrayList;
import java.util.Random;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.types.Alphabet;
import cc.mallet.types.AugmentableFeatureVector;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelSequence;
import cc.mallet.types.Sequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.util.CommandOption;
import cc.mallet.util.MalletLogger;
/**
* This class's main method trains, tests, or runs a generic CRF-based
* sequence tagger.
* <p>
* Training and test files consist of blocks of lines, one block for each instance,
* separated by blank lines. Each block of lines should have the first form
* specified for the input of {@link SimpleTaggerSentence2FeatureVectorSequence}.
* A variety of command line options control the operation of the main program, as
* described in the comments for {@link #main main}.
*
* @author Fernando Pereira <a href="mailto:[email protected]">[email protected]</a>
* @version 1.0
*/
public class SimpleTagger
{
private static Logger logger =
MalletLogger.getLogger(SimpleTagger.class.getName());
/**
* No <code>SimpleTagger</code> objects allowed.
*/
private SimpleTagger()
{
}
/**
* Converts an external encoding of a sequence of elements with binary
* features to a {@link FeatureVectorSequence}. If target processing
* is on (training or labeled test data), it extracts element labels
* from the external encoding to create a target {@link LabelSequence}.
* Two external encodings are supported:
* <ol>
* <li> A {@link String} containing lines of whitespace-separated tokens.</li>
* <li> a {@link String}<code>[][]</code>.</li>
* </ol>
*
* Both represent rows of tokens. When target processing is on, the last token
* in each row is the label of the sequence element represented by
* this row. All other tokens in the row, or all tokens in the row if
* not target processing, are the names of features that are on for
* the sequence element described by the row.
*
*/
public static class SimpleTaggerSentence2FeatureVectorSequence extends Pipe
{
// gdruck
// Previously, there was no serialVersionUID. This is ID that would
// have been automatically generated by the compiler. Therefore,
// other changes should not break serialization.
private static final long serialVersionUID = -2059308802200728625L;
/**
* Creates a new
* <code>SimpleTaggerSentence2FeatureVectorSequence</code> instance.
*/
public SimpleTaggerSentence2FeatureVectorSequence ()
{
super (new Alphabet(), new LabelAlphabet());
}
/**
* Parses a string representing a sequence of rows of tokens into an
* array of arrays of tokens.
*
* @param sentence a <code>String</code>
* @return the corresponding array of arrays of tokens.
*/
private String[][] parseSentence(String sentence)
{
String[] lines = sentence.split("\n");
String[][] tokens = new String[lines.length][];
for (int i = 0; i < lines.length; i++)
tokens[i] = lines[i].split(" ");
return tokens;
}
public Instance pipe (Instance carrier)
{
Object inputData = carrier.getData();
Alphabet features = getDataAlphabet();
LabelAlphabet labels;
LabelSequence target = null;
String [][] tokens;
if (inputData instanceof String)
tokens = parseSentence((String)inputData);
else if (inputData instanceof String[][])
tokens = (String[][])inputData;
else
throw new IllegalArgumentException("Not a String or String[][]; got "+inputData);
FeatureVector[] fvs = new FeatureVector[tokens.length];
if (isTargetProcessing())
{
labels = (LabelAlphabet)getTargetAlphabet();
target = new LabelSequence (labels, tokens.length);
}
for (int l = 0; l < tokens.length; l++) {
int nFeatures;
if (isTargetProcessing())
{
if (tokens[l].length < 1)
throw new IllegalStateException ("Missing label at line " + l + " instance "+carrier.getName ());
nFeatures = tokens[l].length - 1;
target.add(tokens[l][nFeatures]);
}
else nFeatures = tokens[l].length;
ArrayList<Integer> featureIndices = new ArrayList<Integer>();
for (int f = 0; f < nFeatures; f++) {
int featureIndex = features.lookupIndex(tokens[l][f]);
// gdruck
// If the data alphabet's growth is stopped, featureIndex
// will be -1. Ignore these features.
if (featureIndex >= 0) {
featureIndices.add(featureIndex);
}
}
int[] featureIndicesArr = new int[featureIndices.size()];
for (int index = 0; index < featureIndices.size(); index++) {
featureIndicesArr[index] = featureIndices.get(index);
}
fvs[l] = featureInductionOption.value ? new AugmentableFeatureVector(features, featureIndicesArr, null, featureIndicesArr.length) :
new FeatureVector(features, featureIndicesArr);
}
carrier.setData(new FeatureVectorSequence(fvs));
if (isTargetProcessing())
carrier.setTarget(target);
else
carrier.setTarget(new LabelSequence(getTargetAlphabet()));
return carrier;
}
}
private static final CommandOption.Double gaussianVarianceOption = new CommandOption.Double
(SimpleTagger.class, "gaussian-variance", "DECIMAL", true, 10.0,
"The gaussian prior variance used for training.", null);
private static final CommandOption.Boolean trainOption = new CommandOption.Boolean
(SimpleTagger.class, "train", "true|false", true, false,
"Whether to train", null);
private static final CommandOption.Boolean printWeights = new CommandOption.Boolean
(SimpleTagger.class, "printweights", "true|false", true, false,
"print out weights?", null);
private static final CommandOption.String testOption = new CommandOption.String
(SimpleTagger.class, "test", "lab or seg=start-1.continue-1,...,start-n.continue-n",
true, null,
"Test measuring labeling or segmentation (start-i, continue-i) accuracy", null);
private static final CommandOption.String stdinOption = new CommandOption.String
(SimpleTagger.class, "stdin", "true",
true, null,
"Read data from stdin (seperated by double newlines)", null);
private static final CommandOption.File modelOption = new CommandOption.File
(SimpleTagger.class, "model-file", "FILENAME", true, null,
"The filename for reading (train/run) or saving (train) the model.", null);
private static final CommandOption.Double trainingFractionOption = new CommandOption.Double
(SimpleTagger.class, "training-proportion", "DECIMAL", true, 0.5,
"Fraction of data to use for training in a random split.", null);
private static final CommandOption.Integer randomSeedOption = new CommandOption.Integer
(SimpleTagger.class, "random-seed", "INTEGER", true, 0,
"The random seed for randomly selecting a proportion of the instance list for training", null);
private static final CommandOption.IntegerArray ordersOption = new CommandOption.IntegerArray
(SimpleTagger.class, "orders", "COMMA-SEP-DECIMALS", true, new int[]{1},
"List of label Markov orders (main and backoff) ", null);
private static final CommandOption.String forbiddenOption = new CommandOption.String(
SimpleTagger.class, "forbidden", "REGEXP", true,
"\\s", "label1,label2 transition forbidden if it matches this", null);
private static final CommandOption.String allowedOption = new CommandOption.String(
SimpleTagger.class, "allowed", "REGEXP", true,
".*", "label1,label2 transition allowed only if it matches this", null);
private static final CommandOption.String defaultOption = new CommandOption.String(
SimpleTagger.class, "default-label", "STRING", true, "O",
"Label for initial context and uninteresting tokens", null);
private static final CommandOption.Integer iterationsOption = new CommandOption.Integer(
SimpleTagger.class, "iterations", "INTEGER", true, 500,
"Number of training iterations", null);
private static final CommandOption.Boolean viterbiOutputOption = new CommandOption.Boolean(
SimpleTagger.class, "viterbi-output", "true|false", true, false,
"Print Viterbi periodically during training", null);
private static final CommandOption.Boolean connectedOption = new CommandOption.Boolean(
SimpleTagger.class, "fully-connected", "true|false", true, true,
"Include all allowed transitions, even those not in training data", null);
private static final CommandOption.String weightsOption = new CommandOption.String(
SimpleTagger.class, "weights", "sparse|some-dense|dense", true, "some-dense",
"Use sparse, some dense (using a heuristic), or dense features on transitions.", null);
private static final CommandOption.Boolean continueTrainingOption = new CommandOption.Boolean(
SimpleTagger.class, "continue-training", "true|false", false, false,
"Continue training from model specified by --model-file", null);
private static final CommandOption.Integer nBestOption = new CommandOption.Integer(
SimpleTagger.class, "n-best", "INTEGER", true, 1,
"How many answers to output", null);
private static final CommandOption.Integer cacheSizeOption = new CommandOption.Integer(
SimpleTagger.class, "cache-size", "INTEGER", true, 100000,
"How much state information to memoize in n-best decoding", null);
private static final CommandOption.Boolean includeInputOption = new CommandOption.Boolean(
SimpleTagger.class, "include-input", "true|false", true, false,
"Whether to include the input features when printing decoding output", null);
private static final CommandOption.Boolean featureInductionOption = new CommandOption.Boolean(
SimpleTagger.class, "feature-induction", "true|false", true, false,
"Whether to perform feature induction during training", null);
private static final CommandOption.Integer numThreads = new CommandOption.Integer(
SimpleTagger.class, "threads", "INTEGER", true, 1,
"Number of threads to use for CRF training.", null);
private static final CommandOption.List commandOptions =
new CommandOption.List (
"Training, testing and running a generic tagger.",
new CommandOption[] {
gaussianVarianceOption,
trainOption,
printWeights,
iterationsOption,
testOption,
stdinOption,
trainingFractionOption,
modelOption,
randomSeedOption,
ordersOption,
forbiddenOption,
allowedOption,
defaultOption,
viterbiOutputOption,
connectedOption,
weightsOption,
continueTrainingOption,
nBestOption,
cacheSizeOption,
includeInputOption,
featureInductionOption,
numThreads
});
/**
* Create and train a CRF model from the given training data,
* optionally testing it on the given test data.
*
* @param training training data
* @param testing test data (possibly <code>null</code>)
* @param eval accuracy evaluator (possibly <code>null</code>)
* @param orders label Markov orders (main and backoff)
* @param defaultLabel default label
* @param forbidden regular expression specifying impossible label
* transitions <em>current</em><code>,</code><em>next</em>
* (<code>null</code> indicates no forbidden transitions)
* @param allowed regular expression specifying allowed label transitions
* (<code>null</code> indicates everything is allowed that is not forbidden)
* @param connected whether to include even transitions not
* occurring in the training data.
* @param iterations number of training iterations
* @param var Gaussian prior variance
* @return the trained model
*/
public static CRF train(InstanceList training, InstanceList testing,
TransducerEvaluator eval, int[] orders,
String defaultLabel,
String forbidden, String allowed,
boolean connected, int iterations, double var, CRF crf)
{
Pattern forbiddenPat = Pattern.compile(forbidden);
Pattern allowedPat = Pattern.compile(allowed);
if (crf == null) {
crf = new CRF(training.getPipe(), (Pipe)null);
String startName =
crf.addOrderNStates(training, orders, null,
defaultLabel, forbiddenPat, allowedPat,
connected);
for (int i = 0; i < crf.numStates(); i++)
crf.getState(i).setInitialWeight (Transducer.IMPOSSIBLE_WEIGHT);
crf.getState(startName).setInitialWeight(0.0);
}
logger.info("Training on " + training.size() + " instances");
if (testing != null)
logger.info("Testing on " + testing.size() + " instances");
assert(numThreads.value > 0);
if (numThreads.value > 1) {
CRFTrainerByThreadedLabelLikelihood crft = new CRFTrainerByThreadedLabelLikelihood (crf,numThreads.value);
crft.setGaussianPriorVariance(var);
if (weightsOption.value.equals("dense")) {
crft.setUseSparseWeights(false);
crft.setUseSomeUnsupportedTrick(false);
}
else if (weightsOption.value.equals("some-dense")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(true);
}
else if (weightsOption.value.equals("sparse")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(false);
}
else {
throw new RuntimeException("Unknown weights option: " + weightsOption.value);
}
if (featureInductionOption.value) {
throw new IllegalArgumentException("Multi-threaded feature induction is not yet supported.");
} else {
boolean converged;
for (int i = 1; i <= iterations; i++) {
converged = crft.train (training, 1);
if (i % 1 == 0 && eval != null) // Change the 1 to higher integer to evaluate less often
eval.evaluate(crft);
if (viterbiOutputOption.value && i % 10 == 0)
new ViterbiWriter("", new InstanceList[] {training, testing}, new String[] {"training", "testing"}).evaluate(crft);
if (converged)
break;
}
}
crft.shutdown();
}
else {
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf);
crft.setGaussianPriorVariance(var);
if (weightsOption.value.equals("dense")) {
crft.setUseSparseWeights(false);
crft.setUseSomeUnsupportedTrick(false);
}
else if (weightsOption.value.equals("some-dense")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(true);
}
else if (weightsOption.value.equals("sparse")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(false);
}
else {
throw new RuntimeException("Unknown weights option: " + weightsOption.value);
}
if (featureInductionOption.value) {
crft.trainWithFeatureInduction(training, null, testing, eval, iterations, 10, 20, 500, 0.5, false, null);
} else {
boolean converged;
for (int i = 1; i <= iterations; i++) {
converged = crft.train (training, 1);
if (i % 1 == 0 && eval != null) // Change the 1 to higher integer to evaluate less often
eval.evaluate(crft);
if (viterbiOutputOption.value && i % 10 == 0)
new ViterbiWriter("", new InstanceList[] {training, testing}, new String[] {"training", "testing"}).evaluate(crft);
if (converged)
break;
}
}
}
return crf;
}
/**
* Test a transducer on the given test data, evaluating accuracy
* with the given evaluator
*
* @param model a <code>Transducer</code>
* @param eval accuracy evaluator
* @param testing test data
*/
public static void test(TransducerTrainer tt, TransducerEvaluator eval,
InstanceList testing)
{
eval.evaluateInstanceList(tt, testing, "Testing");
}
/**
* Apply a transducer to an input sequence to produce the k highest-scoring
* output sequences.
*
* @param model the <code>Transducer</code>
* @param input the input sequence
* @param k the number of answers to return
* @return array of the k highest-scoring output sequences
*/
public static Sequence[] apply(Transducer model, Sequence input, int k)
{
Sequence[] answers;
if (k == 1) {
answers = new Sequence[1];
answers[0] = model.transduce (input);
}
else {
MaxLatticeDefault lattice =
new MaxLatticeDefault (model, input, null, cacheSizeOption.value());
answers = lattice.bestOutputSequences(k).toArray(new Sequence[0]);
}
return answers;
}
/**
* Command-line wrapper to train, test, or run a generic CRF-based tagger.
*
* @param args the command line arguments. Options (shell and Java quoting should be added as needed):
*<dl>
*<dt><code>--help</code> <em>boolean</em></dt>
*<dd>Print this command line option usage information. Give <code>true</code> for longer documentation. Default is <code>false</code>.</dd>
*<dt><code>--prefix-code</code> <em>Java-code</em></dt>
*<dd>Java code you want run before any other interpreted code. Note that the text is interpreted without modification, so unlike some other Java code options, you need to include any necessary 'new's. Default is null.</dd>
*<dt><code>--gaussian-variance</code> <em>positive-number</em></dt>
*<dd>The Gaussian prior variance used for training. Default is 10.0.</dd>
*<dt><code>--train</code> <em>boolean</em></dt>
*<dd>Whether to train. Default is <code>false</code>.</dd>
*<dt><code>--iterations</code> <em>positive-integer</em></dt>
*<dd>Number of training iterations. Default is 500.</dd>
*<dt><code>--test</code> <code>lab</code> or <code>seg=</code><em>start-1</em><code>.</code><em>continue-1</em><code>,</code>...<code>,</code><em>start-n</em><code>.</code><em>continue-n</em></dt>
*<dd>Test measuring labeling or segmentation (<em>start-i</em>, <em>continue-i</em>) accuracy. Default is no testing.</dd>
*<dt><code>--training-proportion</code> <em>number-between-0-and-1</em></dt>
*<dd>Fraction of data to use for training in a random split. Default is 0.5.</dd>
*<dt><code>--model-file</code> <em>filename</em></dt>
*<dd>The filename for reading (train/run) or saving (train) the model. Default is null.</dd>
*<dt><code>--random-seed</code> <em>integer</em></dt>
*<dd>The random seed for randomly selecting a proportion of the instance list for training Default is 0.</dd>
*<dt><code>--orders</code> <em>comma-separated-integers</em></dt>
*<dd>List of label Markov orders (main and backoff) Default is 1.</dd>
*<dt><code>--forbidden</code> <em>regular-expression</em></dt>
*<dd>If <em>label-1</em><code>,</code><em>label-2</em> matches the expression, the corresponding transition is forbidden. Default is <code>\\s</code> (nothing forbidden).</dd>
*<dt><code>--allowed</code> <em>regular-expression</em></dt>
*<dd>If <em>label-1</em><code>,</code><em>label-2</em> does not match the expression, the corresponding expression is forbidden. Default is <code>.*</code> (everything allowed).</dd>
*<dt><code>--default-label</code> <em>string</em></dt>
*<dd>Label for initial context and uninteresting tokens. Default is <code>O</code>.</dd>
*<dt><code>--viterbi-output</code> <em>boolean</em></dt>
*<dd>Print Viterbi periodically during training. Default is <code>false</code>.</dd>
*<dt><code>--fully-connected</code> <em>boolean</em></dt>
*<dd>Include all allowed transitions, even those not in training data. Default is <code>true</code>.</dd>
*<dt><code>--weights</code> <em>sparse|some-dense|dense</em></dt>
*<dd>Create sparse, some dense (using a heuristic), or dense features on transitions. Default is <code>some-dense</code>.</dd>
*<dt><code>--n-best</code> <em>positive-integer</em></dt>
*<dd>Number of answers to output when applying model. Default is 1.</dd>
*<dt><code>--include-input</code> <em>boolean</em></dt>
*<dd>Whether to include input features when printing decoding output. Default is <code>false</code>.</dd>
*<dt><code>--threads</code> <em>positive-integer</em></dt>
*<dd>Number of threads for CRF training. Default is 1.</dd>
*</dl>
* Remaining arguments:
*<ul>
*<li><em>training-data-file</em> if training </li>
*<li><em>training-and-test-data-file</em>, if training and testing with random split</li>
*<li><em>training-data-file</em> <em>test-data-file</em> if training and testing from separate files</li>
*<li><em>test-data-file</em> if testing</li>
*<li><em>input-data-file</em> if applying to new data (unlabeled)</li>
*</ul>
* @exception Exception if an error occurs
*/
public static void main (String[] args) throws Exception
{
Reader trainingFile = null, testFile = null;
InstanceList trainingData = null, testData = null;
int numEvaluations = 0;
int iterationsBetweenEvals = 16;
int restArgs = commandOptions.processOptions(args);
if (restArgs == args.length)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Missing data file(s)");
}
if (trainOption.value)
{
trainingFile = new FileReader(new File(args[restArgs]));
if (testOption.value != null && restArgs < args.length - 1)
testFile = new FileReader(new File(args[restArgs+1]));
} else
testFile = new FileReader(new File(args[restArgs]));
Pipe p = null;
CRF crf = null;
TransducerEvaluator eval = null;
if (continueTrainingOption.value || !trainOption.value) {
if (modelOption.value == null)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Missing model file option");
}
ObjectInputStream s =
new ObjectInputStream(new FileInputStream(modelOption.value));
crf = (CRF) s.readObject();
s.close();
p = crf.getInputPipe();
}
else {
p = new SimpleTaggerSentence2FeatureVectorSequence();
p.getTargetAlphabet().lookupIndex(defaultOption.value);
}
if (trainOption.value)
{
p.setTargetProcessing(true);
trainingData = new InstanceList(p);
trainingData.addThruPipe(
new LineGroupIterator(trainingFile,
Pattern.compile("^\\s*$"), true));
logger.info
("Number of features in training data: "+p.getDataAlphabet().size());
if (testOption.value != null)
{
if (testFile != null)
{
testData = new InstanceList(p);
testData.addThruPipe(
new LineGroupIterator(testFile,
Pattern.compile("^\\s*$"), true));
}
else
{
Random r = new Random (randomSeedOption.value);
InstanceList[] trainingLists =
trainingData.split(
r, new double[] {trainingFractionOption.value,
1-trainingFractionOption.value});
trainingData = trainingLists[0];
testData = trainingLists[1];
}
}
} else if (testOption.value != null)
{
p.setTargetProcessing(true);
testData = new InstanceList(p);
testData.addThruPipe(
new LineGroupIterator(testFile,
Pattern.compile("^\\s*$"), true));
} else
{
p.setTargetProcessing(false);
testData = new InstanceList(p);
testData.addThruPipe(
new LineGroupIterator(testFile,
Pattern.compile("^\\s*$"), true));
}
logger.info ("Number of predicates: "+p.getDataAlphabet().size());
if (printWeights.value) {
crf.print();
System.exit(0);
}
if (testOption.value != null)
{
if (testOption.value.startsWith("lab"))
eval = new TokenAccuracyEvaluator(new InstanceList[] {trainingData, testData}, new String[] {"Training", "Testing"});
else if (testOption.value.startsWith("seg="))
{
String[] pairs = testOption.value.substring(4).split(",");
if (pairs.length < 1)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException(
"Missing segment start/continue labels: " + testOption.value);
}
String startTags[] = new String[pairs.length];
String continueTags[] = new String[pairs.length];
for (int i = 0; i < pairs.length; i++)
{
String[] pair = pairs[i].split("\\.");
if (pair.length != 2)
{
commandOptions.printUsage(true);
throw new
IllegalArgumentException(
"Incorrectly-specified segment start and end labels: " +
pairs[i]);
}
startTags[i] = pair[0];
continueTags[i] = pair[1];
}
eval = new MultiSegmentationEvaluator(new InstanceList[] {trainingData, testData}, new String[] {"Training", "Testing"},
startTags, continueTags);
}
else
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Invalid test option: " +
testOption.value);
}
}
if (p.isTargetProcessing())
{
Alphabet targets = p.getTargetAlphabet();
StringBuffer buf = new StringBuffer("Labels:");
for (int i = 0; i < targets.size(); i++)
buf.append(" ").append(targets.lookupObject(i).toString());
logger.info(buf.toString());
}
if (trainOption.value)
{
crf = train(trainingData, testData, eval,
ordersOption.value, defaultOption.value,
forbiddenOption.value, allowedOption.value,
connectedOption.value, iterationsOption.value,
gaussianVarianceOption.value, crf);
if (modelOption.value != null)
{
ObjectOutputStream s =
new ObjectOutputStream(new FileOutputStream(modelOption.value));
s.writeObject(crf);
s.close();
}
}
else
{
if (crf == null)
{
if (modelOption.value == null)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Missing model file option");
}
ObjectInputStream s =
new ObjectInputStream(new FileInputStream(modelOption.value));
crf = (CRF) s.readObject();
s.close();
}
if (eval != null)
test(new NoopTransducerTrainer(crf), eval, testData);
else
{
boolean includeInput = includeInputOption.value();
for (int i = 0; i < testData.size(); i++)
{
Sequence input = (Sequence)testData.get(i).getData();
Sequence[] outputs = apply(crf, input, nBestOption.value);
int k = outputs.length;
boolean error = false;
for (int a = 0; a < k; a++) {
if (outputs[a].size() != input.size()) {
logger.info("Failed to decode input sequence " + i + ", answer " + a);
error = true;
}
}
if (!error) {
for (int j = 0; j < input.size(); j++)
{
StringBuffer buf = new StringBuffer();
for (int a = 0; a < k; a++)
buf.append(outputs[a].get(j).toString()).append(" ");
if (includeInput) {
FeatureVector fv = (FeatureVector)input.get(j);
buf.append(fv.toString(true));
}
System.out.println(buf.toString());
}
System.out.println();
}
}
}
}
}
}
| 28,782 | 39.653955 | 226 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFOptimizableByBatchLabelLikelihood.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import java.util.logging.Logger;
import cc.mallet.optimize.Optimizable;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.MalletLogger;
/**
* Implements label likelihood gradient computations for batches of data, can be
* easily parallelized. <p>
*
* The gradient computations are the same as that of
* <tt>CRFOptimizableByLabelLikelihood</tt>. <p>
*
* *Note*: Expectations corresponding to each batch of data can be computed in
* parallel. During gradient computation, the prior and the constraints are
* incorporated into the expectations of the last batch (see
* <tt>getBatchValue, getBatchValueGradient</tt>).
*
* *Note*: This implementation ignores instances with infinite weights (see
* <tt>getExpectationValue</tt>).
*
* @author Gaurav Chandalia
*/
public class CRFOptimizableByBatchLabelLikelihood implements Optimizable.ByCombiningBatchGradient, Serializable {
private static Logger logger = MalletLogger.getLogger(CRFOptimizableByBatchLabelLikelihood.class.getName());
static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1.0;
static final double DEFAULT_HYPERBOLIC_PRIOR_SLOPE = 0.2;
static final double DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS = 10.0;
protected CRF crf;
protected InstanceList trainingSet;
// number of batches of training set
protected int numBatches;
// batch specific expectations
protected List<CRF.Factors> expectations;
// constraints over whole training set
protected CRF.Factors constraints;
// value and gradient for each batch, to avoid sharing
protected double[] cachedValue;
protected List<double[]> cachedGradient;
boolean usingHyperbolicPrior = false;
double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
double hyperbolicPriorSlope = DEFAULT_HYPERBOLIC_PRIOR_SLOPE;
double hyperbolicPriorSharpness = DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS;
public CRFOptimizableByBatchLabelLikelihood(CRF crf, InstanceList ilist, int numBatches) {
// set up
this.crf = crf;
this.trainingSet = ilist;
this.numBatches = numBatches;
cachedValue = new double[this.numBatches];
cachedGradient = new ArrayList<double[]>(this.numBatches);
expectations = new ArrayList<CRF.Factors>(this.numBatches);
int numFactors = crf.parameters.getNumFactors();
for (int i = 0; i < this.numBatches; ++i) {
cachedGradient.add(new double[numFactors]);
expectations.add(new CRF.Factors(crf.parameters));
}
constraints = new CRF.Factors(crf.parameters);
gatherConstraints(ilist);
}
/**
* Set the constraints by running forward-backward with the <i>output label
* sequence provided</i>, thus restricting it to only those paths that agree with
* the label sequence.
*/
protected void gatherConstraints(InstanceList ilist) {
logger.info("Gathering constraints...");
assert (constraints.structureMatches(crf.parameters));
constraints.zero();
for (Instance instance : ilist) {
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
double instanceWeight = ilist.getInstanceWeight(instance);
Transducer.Incrementor incrementor =
instanceWeight == 1.0 ? constraints.new Incrementor()
: constraints.new WeightedIncrementor(instanceWeight);
new SumLatticeDefault (this.crf, input, output, incrementor);
}
constraints.assertNotNaNOrInfinite();
}
/**
* Computes log probability of a batch of training data, fill in corresponding
* expectations as well
*/
protected double getExpectationValue(int batchIndex, int[] batchAssignments) {
// Reset expectations to zero before we fill them again
CRF.Factors batchExpectations = expectations.get(batchIndex);
batchExpectations.zero();
// count the number of instances that have infinite weight
int numInfLabeledWeight = 0;
int numInfUnlabeledWeight = 0;
int numInfWeight = 0;
double value = 0;
double unlabeledWeight, labeledWeight, weight;
for (int ii = batchAssignments[0]; ii < batchAssignments[1]; ii++) {
Instance instance = trainingSet.get(ii);
double instanceWeight = trainingSet.getInstanceWeight(instance);
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
labeledWeight = new SumLatticeDefault (this.crf, input, output, null).getTotalWeight();
if (Double.isInfinite (labeledWeight)) {
++numInfLabeledWeight;
}
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? batchExpectations.new Incrementor()
: batchExpectations.new WeightedIncrementor (instanceWeight);
unlabeledWeight = new SumLatticeDefault (this.crf, input, null, incrementor).getTotalWeight();
if (Double.isInfinite (unlabeledWeight)) {
++numInfUnlabeledWeight;
}
// weight is log(conditional probability correct label sequence)
weight = labeledWeight - unlabeledWeight;
if (Double.isInfinite(weight)) {
++numInfWeight;
} else {
// Weights are log probabilities, and we want to return a log probability
value += weight * instanceWeight;
}
}
batchExpectations.assertNotNaNOrInfinite();
if (numInfLabeledWeight > 0 || numInfUnlabeledWeight > 0 || numInfWeight > 0) {
logger.warning("Batch: " + batchIndex + ", Number of instances with:\n" +
"\t -infinite labeled weight: " + numInfLabeledWeight + "\n" +
"\t -infinite unlabeled weight: " + numInfUnlabeledWeight + "\n" +
"\t -infinite weight: " + numInfWeight);
}
return value;
}
/**
* Returns the log probability of a batch of training sequence labels and the prior over
* parameters, if last batch then incorporate the prior on parameters as well.
*/
public double getBatchValue(int batchIndex, int[] batchAssignments) {
assert(batchIndex < this.numBatches) : "Incorrect batch index: " + batchIndex + ", range(0, " +
this.numBatches + ")";
assert(batchAssignments.length == 2 && batchAssignments[0] <= batchAssignments[1])
: "Invalid batch assignments: " + Arrays.toString(batchAssignments);
// Get the value of all the true labels for current batch, also filling in expectations
double value = getExpectationValue(batchIndex, batchAssignments);
if (batchIndex == numBatches-1) {
if (usingHyperbolicPrior) // Hyperbolic prior
value += crf.parameters.hyberbolicPrior(hyperbolicPriorSlope, hyperbolicPriorSharpness);
else // Gaussian prior
value += crf.parameters.gaussianPrior(gaussianPriorVariance);
}
assert(!(Double.isNaN(value) || Double.isInfinite(value)))
: "Label likelihood is NaN/Infinite, batchIndex: " + batchIndex + "batchAssignments: " + Arrays.toString(batchAssignments);
// update cache
cachedValue[batchIndex] = value;
return value;
}
public void getBatchValueGradient(double[] buffer, int batchIndex, int[] batchAssignments) {
assert(batchIndex < this.numBatches) : "Incorrect batch index: " + batchIndex + ", range(0, " +
this.numBatches + ")";
assert(batchAssignments.length == 2 && batchAssignments[0] <= batchAssignments[1])
: "Invalid batch assignments: " + Arrays.toString(batchAssignments);
CRF.Factors batchExpectations = expectations.get(batchIndex);
if (batchIndex == numBatches-1) {
// crf parameters' check has to be done only once, infinite values are allowed
crf.parameters.assertNotNaN();
// factor the constraints and the prior into the expectations of last batch
// Gradient = (constraints - expectations + prior) = -(expectations - constraints - prior)
// The minus sign is factored in combineGradients method after all gradients are computed
batchExpectations.plusEquals(constraints, -1.0);
if (usingHyperbolicPrior)
batchExpectations.plusEqualsHyperbolicPriorGradient(crf.parameters, -hyperbolicPriorSlope, hyperbolicPriorSharpness);
else
batchExpectations.plusEqualsGaussianPriorGradient(crf.parameters, -gaussianPriorVariance);
batchExpectations.assertNotNaNOrInfinite();
}
double[] gradient = cachedGradient.get(batchIndex);
// set the cached gradient
batchExpectations.getParameters(gradient);
System.arraycopy(gradient, 0, buffer, 0, gradient.length);
}
/**
* Adds gradients from all batches. <p>
* <b>Note:</b> assumes buffer is already initialized.
*/
public void combineGradients(Collection<double[]> batchGradients, double[] buffer) {
assert(buffer.length == crf.parameters.getNumFactors())
: "Incorrect buffer length: " + buffer.length + ", expected: " + crf.parameters.getNumFactors();
Arrays.fill(buffer, 0);
for (double[] gradient : batchGradients) {
MatrixOps.plusEquals(buffer, gradient);
}
// -(...) from getBatchValueGradient
MatrixOps.timesEquals(buffer, -1.0);
}
public int getNumBatches() { return numBatches; }
public void setUseHyperbolicPrior (boolean f) { usingHyperbolicPrior = f; }
public void setHyperbolicPriorSlope (double p) { hyperbolicPriorSlope = p; }
public void setHyperbolicPriorSharpness (double p) { hyperbolicPriorSharpness = p; }
public double getUseHyperbolicPriorSlope () { return hyperbolicPriorSlope; }
public double getUseHyperbolicPriorSharpness () { return hyperbolicPriorSharpness; }
public void setGaussianPriorVariance (double p) { gaussianPriorVariance = p; }
public double getGaussianPriorVariance () { return gaussianPriorVariance; }
public int getNumParameters () {return crf.parameters.getNumFactors();}
public void getParameters (double[] buffer) {
crf.parameters.getParameters(buffer);
}
public double getParameter (int index) {
return crf.parameters.getParameter(index);
}
public void setParameters (double [] buff) {
crf.parameters.setParameters(buff);
crf.weightsValueChanged();
}
public void setParameter (int index, double value) {
crf.parameters.setParameter(index, value);
crf.weightsValueChanged();
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(trainingSet);
out.writeObject(crf);
out.writeInt(numBatches);
out.writeObject(cachedValue);
for (double[] gradient : cachedGradient)
out.writeObject(gradient);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
trainingSet = (InstanceList) in.readObject();
crf = (CRF)in.readObject();
numBatches = in.readInt();
cachedValue = (double[]) in.readObject();
cachedGradient = new ArrayList<double[]>(numBatches);
for (int i = 0; i < numBatches; ++i)
cachedGradient.set(i, (double[]) in.readObject());
}
public static class Factory {
public Optimizable.ByCombiningBatchGradient newCRFOptimizable (CRF crf, InstanceList trainingData, int numBatches) {
return new CRFOptimizableByBatchLabelLikelihood (crf, trainingData, numBatches);
}
}
}
| 11,283 | 36.993266 | 126 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLatticeFactory.java
|
package cc.mallet.fst;
import java.io.Serializable;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.Sequence;
/**
* Provides factory methods to create inference engine for training a transducer.
*/
public abstract class SumLatticeFactory implements Serializable {
public SumLattice newSumLattice (Transducer trans, Sequence input)
{
return newSumLattice (trans, input, null, (Transducer.Incrementor)null, false, null);
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Transducer.Incrementor incrementor)
{
return newSumLattice (trans, input, null, incrementor, false, null);
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output)
{
return newSumLattice (trans, input, output, (Transducer.Incrementor)null, false, null);
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor)
{
return newSumLattice (trans, input, output, incrementor, false, null);
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, LabelAlphabet outputAlphabet)
{
return newSumLattice (trans, input, output, incrementor, false, outputAlphabet);
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, boolean saveXis)
{
return newSumLattice (trans, input, output, incrementor, saveXis, null);
}
/**
* Returns a SumLattice object to run forward-backward.
*
* @param trans Transducer model
* @param input Input sequence
* @param output If output is null then the forward-backward is not constrained to match the output
* @param incrementor If null then do not update the weights
* @param saveXis If true then save the transition weights as well
* @param outputAlphabet If outputAlphabet is non-null, this will create a LabelVector for each
* position in the output sequence indicating the probability distribution
* over possible outputs at that time index.
*/
public abstract SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, boolean saveXis, LabelAlphabet outputAlphabet);
}
| 2,267 | 37.440678 | 150 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MaxLattice.java
|
package cc.mallet.fst;
import java.util.List;
import cc.mallet.types.Sequence;
import cc.mallet.fst.Transducer.State;
/** The interface to classes implementing the Viterbi algorithm,
* finding the best sequence of states for a given input sequence. */
public interface MaxLattice {
public double getDelta (int inputPosition, int stateIndex);
public Sequence<Object> bestOutputSequence ();
public List<Sequence<Object>> bestOutputSequences (int n);
public Sequence<State> bestStateSequence ();
public List<Sequence<State>> bestStateSequences (int n);
public Transducer getTransducer ();
public double elementwiseAccuracy (Sequence referenceOutput);
}
| 663 | 32.2 | 69 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLatticeDefault.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.logging.Level;
import java.util.logging.Logger;
import cc.mallet.fst.Transducer.State;
import cc.mallet.fst.Transducer.TransitionIterator;
import cc.mallet.types.DenseVector;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelVector;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
/** Default, full dynamic programming implementation of the Forward-Backward "Sum-(Product)-Lattice" algorithm */
public class SumLatticeDefault implements SumLattice
{
private static Logger logger = MalletLogger.getLogger(SumLatticeDefault.class.getName());
//{logger.setLevel(Level.FINE);}
// Static variables acting as default values for the correspondingly-named instance variables.
// Can be overridden sort of like named parameters, like this:
// SumLattice lattice = new SumLatticeDefault(transducer, input) {{ saveXis=true; }}
protected static boolean saveXis = false;
// "ip" == "input position", "op" == "output position", "i" == "state index"
Transducer t;
double totalWeight;
Sequence input, output;
LatticeNode[][] nodes; // indexed by ip,i
int latticeLength;
double[][] gammas; // indexed by ip,i
double[][][] xis; // indexed by ip,i,j; saved only if saveXis is true;
LabelVector labelings[]; // indexed by op, created only if "outputAlphabet" is non-null in constructor
// Ensure that instances cannot easily be created by a zero arg constructor.
protected SumLatticeDefault() { }
protected LatticeNode getLatticeNode (int ip, int stateIndex)
{
if (nodes[ip][stateIndex] == null)
nodes[ip][stateIndex] = new LatticeNode (ip, t.getState (stateIndex));
return nodes[ip][stateIndex];
}
public SumLatticeDefault (Transducer trans, Sequence input)
{
this (trans, input, null, (Transducer.Incrementor)null, saveXis, null);
}
public SumLatticeDefault (Transducer trans, Sequence input, boolean saveXis)
{
this (trans, input, null, (Transducer.Incrementor)null, saveXis, null);
}
public SumLatticeDefault (Transducer trans, Sequence input, Transducer.Incrementor incrementor)
{
this (trans, input, null, incrementor, saveXis, null);
}
public SumLatticeDefault (Transducer trans, Sequence input, Sequence output)
{
this (trans, input, output, (Transducer.Incrementor)null, saveXis, null);
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
public SumLatticeDefault (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor)
{
this (trans, input, output, incrementor, saveXis, null);
}
public SumLatticeDefault (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, LabelAlphabet outputAlphabet)
{
this (trans, input, output, incrementor, saveXis, outputAlphabet);
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
public SumLatticeDefault (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, boolean saveXis)
{
this (trans, input, output, incrementor, saveXis, null);
}
// If outputAlphabet is non-null, this will create a LabelVector
// for each position in the output sequence indicating the
// probability distribution over possible outputs at that time
// index
public SumLatticeDefault (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, boolean saveXis, LabelAlphabet outputAlphabet)
{
assert (output == null || input.size() == output.size());
if (false && logger.isLoggable (Level.FINE)) {
logger.fine ("Starting Lattice");
logger.fine ("Input: ");
for (int ip = 0; ip < input.size(); ip++)
logger.fine (" " + input.get(ip));
logger.fine ("\nOutput: ");
if (output == null)
logger.fine ("null");
else
for (int op = 0; op < output.size(); op++)
logger.fine (" " + output.get(op));
logger.fine ("\n");
}
// Initialize some structures
this.t = trans;
this.input = input;
this.output = output;
// xxx Not very efficient when the lattice is actually sparse,
// especially when the number of states is large and the
// sequence is long.
latticeLength = input.size()+1;
int numStates = t.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// xxx Yipes, this could get big; something sparse might be better?
gammas = new double[latticeLength][numStates];
if (saveXis) xis = new double[latticeLength][numStates][numStates];
double outputCounts[][] = null;
if (outputAlphabet != null)
outputCounts = new double[latticeLength][outputAlphabet.size()];
for (int i = 0; i < numStates; i++) {
for (int ip = 0; ip < latticeLength; ip++)
gammas[ip][i] = Transducer.IMPOSSIBLE_WEIGHT;
if (saveXis)
for (int j = 0; j < numStates; j++)
for (int ip = 0; ip < latticeLength; ip++)
xis[ip][i][j] = Transducer.IMPOSSIBLE_WEIGHT;
}
// Forward pass
logger.fine ("Starting Foward pass");
boolean atLeastOneInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
//System.out.println ("Forward pass initialCost = "+initialCost);
if (initialWeight > Transducer.IMPOSSIBLE_WEIGHT) {
getLatticeNode(0, i).alpha = initialWeight;
//System.out.println ("nodes[0][i].alpha="+nodes[0][i].alpha);
atLeastOneInitialState = true;
}
}
if (atLeastOneInitialState == false)
logger.warning ("There are no starting states!");
for (int ip = 0; ip < latticeLength-1; ip++)
for (int i = 0; i < numStates; i++) {
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// xxx if we end up doing this a lot,
// we could save a list of the non-null ones
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
if (logger.isLoggable (Level.FINE))
logger.fine (" Starting Foward transition iteration from state "
+ s.getName() + " on input " + input.get(ip).toString()
+ " and output "
+ (output==null ? "(null)" : output.get(ip).toString()));
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Forward Lattice[inputPos="+ip+"][source="+s.getName()+"][dest="+destination.getName()+"]");
LatticeNode destinationNode = getLatticeNode (ip+1, destination.getIndex());
destinationNode.output = iter.getOutput();
double transitionWeight = iter.getWeight();
if (logger.isLoggable (Level.FINE))
logger.fine ("BEFORE update: destinationNode.alpha="+destinationNode.alpha);
destinationNode.alpha = Transducer.sumLogProb (destinationNode.alpha, nodes[ip][i].alpha + transitionWeight);
if (logger.isLoggable (Level.FINE))
logger.fine ("transitionWeight="+transitionWeight+" nodes["+ip+"]["+i+"].alpha="+nodes[ip][i].alpha
+" destinationNode.alpha="+destinationNode.alpha);
//System.out.println ("destinationNode.alpha <- "+destinationNode.alpha);
}
}
if (logger.isLoggable (Level.FINE)) {
logger.fine("Forward Lattice:");
for (int ip = 0; ip < latticeLength; ip++) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numStates; i++)
sb.append (" "+(nodes[ip][i] == null ? "<null>" : nodes[ip][i].alpha));
logger.fine(sb.toString());
}
}
// Calculate total weight of Lattice. This is the normalizer
totalWeight = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
//System.out.println ("Ending alpha, state["+i+"] = "+nodes[latticeLength-1][i].alpha);
//System.out.println ("Ending beta, state["+i+"] = "+t.getState(i).getFinalWeight());
totalWeight = Transducer.sumLogProb (totalWeight, (nodes[latticeLength-1][i].alpha + t.getState(i).getFinalWeight()));
}
logger.fine ("totalWeight="+totalWeight);
// totalWeight is now an "unnormalized weight" of the entire Lattice
// If the sequence has -infinite weight, just return.
// Usefully this avoids calling any incrementX methods.
// It also relies on the fact that the gammas[][] and .alpha (but not .beta) values
// are already initialized to values that reflect -infinite weight
// TODO Is it important to fill in the betas before we return?
if (totalWeight == Transducer.IMPOSSIBLE_WEIGHT)
return;
// Backward pass
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
State s = t.getState(i);
nodes[latticeLength-1][i].beta = s.getFinalWeight();
gammas[latticeLength-1][i] = nodes[latticeLength-1][i].alpha + nodes[latticeLength-1][i].beta - totalWeight;
if (incrementor != null) {
double p = Math.exp(gammas[latticeLength-1][i]);
// gsc: reducing from 1e-10 to 1e-6
// gsc: removing the isNaN check, range check will catch the NaN error as well
// assert (p >= 0.0 && p <= 1.0+1e-10 && !Double.isNaN(p)) : "p="+p+" gamma="+gammas[latticeLength-1][i];
assert (p >= 0.0 && p <= 1.0+1e-6) : "p="+p+", gamma="+gammas[latticeLength-1][i];
incrementor.incrementFinalState (s, p);
}
}
for (int ip = latticeLength-2; ip >= 0; ip--) {
for (int i = 0; i < numStates; i++) {
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// Note that skipping here based on alpha means that beta values won't
// be correct, but since alpha is infinite anyway, it shouldn't matter.
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Backward Lattice[inputPos="+ip+"][source="+s.getName()+"][dest="+destination.getName()+"]");
int j = destination.getIndex();
LatticeNode destinationNode = nodes[ip+1][j];
if (destinationNode != null) {
double transitionWeight = iter.getWeight();
assert (!Double.isNaN(transitionWeight));
double oldBeta = nodes[ip][i].beta;
assert (!Double.isNaN(nodes[ip][i].beta));
nodes[ip][i].beta = Transducer.sumLogProb (nodes[ip][i].beta, destinationNode.beta + transitionWeight);
assert (!Double.isNaN(nodes[ip][i].beta))
: "dest.beta="+destinationNode.beta+" trans="+transitionWeight+" sum="+(destinationNode.beta+transitionWeight) + " oldBeta="+oldBeta;
double xi = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - totalWeight;
if (saveXis) xis[ip][i][j] = xi;
assert (!Double.isNaN(nodes[ip][i].alpha));
assert (!Double.isNaN(transitionWeight));
assert (!Double.isNaN(nodes[ip+1][j].beta));
assert (!Double.isNaN(totalWeight));
if (incrementor != null || outputAlphabet != null) {
double p = Math.exp(xi);
// gsc: reducing from 1e-10 to 1e-6
// gsc: removing the isNaN check, range check will catch the NaN error as well
// assert (p >= 0.0 && p <= 1.0+1e-10 && !Double.isNaN(p)) : "xis["+ip+"]["+i+"]["+j+"]="+xi;
assert (p >= 0.0 && p <= 1.0+1e-6) : "p="+p+", xis["+ip+"]["+i+"]["+j+"]="+xi;
if (incrementor != null)
incrementor.incrementTransition(iter, p);
if (outputAlphabet != null) {
int outputIndex = outputAlphabet.lookupIndex (iter.getOutput(), false);
assert (outputIndex >= 0);
// xxx This assumes that "ip" == "op"!
outputCounts[ip][outputIndex] += p;
//System.out.println ("CRF Lattice outputCounts["+ip+"]["+outputIndex+"]+="+p);
}
}
}
}
gammas[ip][i] = nodes[ip][i].alpha + nodes[ip][i].beta - totalWeight;
}
}
if (incrementor != null)
for (int i = 0; i < numStates; i++) {
double p = Math.exp(gammas[0][i]);
// gsc: reducing from 1e-10 to 1e-6
// gsc: removing the isNaN check, range check will catch the NaN error as well
// assert (p >= 0.0 && p <= 1.0+1e-10 && !Double.isNaN(p)) : "p="+p;
assert (p >= 0.0 && p <= 1.0+1e-6) : "p="+p;
incrementor.incrementInitialState(t.getState(i), p);
}
if (outputAlphabet != null) {
labelings = new LabelVector[latticeLength];
for (int ip = latticeLength-2; ip >= 0; ip--) {
assert (Math.abs(1.0-MatrixOps.sum (outputCounts[ip])) < 0.000001);;
labelings[ip] = new LabelVector (outputAlphabet, outputCounts[ip]);
}
}
if (logger.isLoggable (Level.FINE)) {
logger.fine("Lattice:");
for (int ip = 0; ip < latticeLength; ip++) {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numStates; i++)
sb.append (" "+gammas[ip][i]);
logger.fine(sb.toString());
}
}
}
public double[][][] getXis(){
return xis;
}
public double[][] getGammas(){
return gammas;
}
public double getTotalWeight () {
assert (!Double.isNaN(totalWeight));
return totalWeight; }
public double getGammaWeight(int inputPosition, State s) {
return gammas[inputPosition][s.getIndex()]; }
public double getGammaWeight(int inputPosition, int stateIndex) {
return gammas[inputPosition][stateIndex]; }
public double getGammaProbability (int inputPosition, State s) {
return Math.exp (gammas[inputPosition][s.getIndex()]); }
public double getGammaProbability (int inputPosition, int stateIndex) {
return Math.exp (gammas[inputPosition][stateIndex]); }
public double getXiProbability (int ip, State s1, State s2) {
if (xis == null)
throw new IllegalStateException ("xis were not saved.");
int i = s1.getIndex ();
int j = s2.getIndex ();
return Math.exp (xis[ip][i][j]);
}
public double getXiWeight(int ip, State s1, State s2)
{
if (xis == null)
throw new IllegalStateException ("xis were not saved.");
int i = s1.getIndex ();
int j = s2.getIndex ();
return xis[ip][i][j];
}
public int length () { return latticeLength; }
public double getAlpha (int ip, State s) {
LatticeNode node = getLatticeNode (ip, s.getIndex ());
return node.alpha;
}
public double getBeta (int ip, State s) {
LatticeNode node = getLatticeNode (ip, s.getIndex ());
return node.beta;
}
public LabelVector getLabelingAtPosition (int outputPosition) {
if (labelings != null)
return labelings[outputPosition];
return null;
}
public Transducer getTransducer ()
{
return t;
}
// A container for some information about a particular input position and state
protected class LatticeNode
{
int inputPosition;
// outputPosition not really needed until we deal with asymmetric epsilon.
State state;
Object output;
double alpha = Transducer.IMPOSSIBLE_WEIGHT;
double beta = Transducer.IMPOSSIBLE_WEIGHT;
LatticeNode (int inputPosition, State state) {
this.inputPosition = inputPosition;
this.state = state;
assert (this.alpha == Transducer.IMPOSSIBLE_WEIGHT); // xxx Remove this check
}
}
public static class Factory extends SumLatticeFactory implements Serializable
{
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, boolean saveXis, LabelAlphabet outputAlphabet)
{
return new SumLatticeDefault (trans, input, output, incrementor, saveXis, outputAlphabet);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt();
}
}
}
| 15,839 | 37.26087 | 160 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFWriter.java
|
package cc.mallet.fst;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.util.logging.Logger;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletLogger;
/**
* Saves a trained model to specified filename. <p>
*
* Can be used to save the model every few iterations, e.g. to save every 5 iterations: <p>
* <code>
* new CRFWriter(filePrefix) { public boolean precondition (TransducerTrainer tt) { return tt.getIteration() % 5 == 0; };
* </code> <p>
*
* The trained model is saved in the format: filenamePrefix.<iteration>.bin.
*
* @author Gaurav Chandalia
*/
public class CRFWriter extends TransducerEvaluator {
private static Logger logger = MalletLogger.getLogger(CRFWriter.class.getName());
String filenamePrefix;
public CRFWriter (String filenamePrefix) {
super (new InstanceList[]{}, new String[]{});
this.filenamePrefix = filenamePrefix;
}
protected void preamble (TransducerTrainer tt) {
int iteration = tt.getIteration();
String filename = filenamePrefix + "." + iteration + ".bin";
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(filename));
oos.writeObject(tt.getTransducer());
logger.info("Trained model saved: " + filename + ", iter: " + iteration);
} catch (FileNotFoundException fnfe) {
logger.warning("Could not save model: " + filename + ", iter: " + iteration);
fnfe.printStackTrace();
} catch (IOException ioe) {
logger.warning("Could not save model: " + filename + ", iter: " + iteration);
ioe.printStackTrace();
}
}
@Override
public void evaluateInstanceList(TransducerTrainer transducer, InstanceList instances, String description) { }
}
| 1,759 | 31.592593 | 121 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLattice.java
|
package cc.mallet.fst;
import cc.mallet.fst.Transducer.State;
import cc.mallet.types.LabelVector;
/**
* Interface to perform forward-backward during training of a transducer.
*/
public interface SumLattice {
public double[][][] getXis();
public double[][] getGammas();
public double getTotalWeight ();
public double getGammaWeight (int inputPosition, State s);
public double getGammaProbability (int inputPosition, State s);
public double getXiProbability (int ip, State s1, State s2);
public double getXiWeight (int ip, State s1, State s2);
public int length ();
public double getAlpha (int ip, State s);
public double getBeta (int ip, State s);
public LabelVector getLabelingAtPosition (int outputPosition);
public Transducer getTransducer ();
}
| 764 | 32.26087 | 73 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CacheStaleIndicator.java
|
package cc.mallet.fst;
/**
* Indicates when the value/gradient during training becomes stale. <p>
*
* See <tt>ThreadedOptimizable</tt>.
*
* @author Gaurav Chandalia
*/
public interface CacheStaleIndicator {
public boolean isValueStale();
public boolean isGradientStale();
}
| 284 | 18 | 71 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MEMM.java
|
/* Copyright (C) 2004 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
MEMM might have been simply implemented with a MaxEnt classifier object at each state,
but I chose not to do that so that tied features could be used in different parts of the
FSM, just as in CRF. So, the expectation-gathering is done (in MEMM-style) without
forward-backward, just with local (normalized) distributions over destination states
from source states, but there is a global MaximizebleMEMM, and all the MEMMs parameters
are set together as part of a single optimization.
*/
package cc.mallet.fst;
import java.io.Serializable;
import java.text.DecimalFormat;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.pipe.Pipe;
/**
* A Maximum Entropy Markov Model.
*/
@SuppressWarnings("serial")
public class MEMM extends CRF implements Serializable
{
// private static Logger logger = MalletLogger.getLogger(MEMM.class.getName());
public MEMM (Pipe inputPipe, Pipe outputPipe)
{
super (inputPipe, outputPipe);
}
public MEMM (Alphabet inputAlphabet, Alphabet outputAlphabet)
{
super (inputAlphabet, outputAlphabet);
}
public MEMM (CRF crf)
{
super (crf);
}
protected CRF.State newState (String name, int index,
double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[][] weightNames,
CRF crf)
{
return new State (name, index, initialWeight, finalWeight,
destinationNames, labelNames, weightNames, crf);
}
public static class State extends CRF.State implements Serializable
{
InstanceList trainingSet;
protected State (String name, int index,
double initialCost, double finalCost,
String[] destinationNames,
String[] labelNames,
String[][] weightNames,
CRF crf)
{
super (name, index, initialCost, finalCost, destinationNames, labelNames, weightNames, crf);
}
// Necessary because the CRF4 implementation will return CRF4.TransitionIterator
public Transducer.TransitionIterator transitionIterator (
Sequence inputSequence, int inputPosition,
Sequence outputSequence, int outputPosition)
{
if (inputPosition < 0 || outputPosition < 0)
throw new UnsupportedOperationException ("Epsilon transitions not implemented.");
if (inputSequence == null)
throw new UnsupportedOperationException ("CRFs are not generative models; must have an input sequence.");
return new TransitionIterator (
this, (FeatureVectorSequence)inputSequence, inputPosition,
(outputSequence == null ? null : (String)outputSequence.get(outputPosition)), crf);
}
}
protected static class TransitionIterator extends CRF.TransitionIterator implements Serializable
{
private double sum;
public TransitionIterator (State source, FeatureVectorSequence inputSeq,
int inputPosition, String output, CRF memm)
{
super (source, inputSeq, inputPosition, output, memm);
normalizeCosts ();
}
public TransitionIterator (State source, FeatureVector fv, String output, CRF memm)
{
super (source, fv, output, memm);
normalizeCosts ();
}
private void normalizeCosts ()
{
// Normalize the next-state costs, so they are -(log-probabilities)
// This is the heart of the difference between the locally-normalized MEMM
// and the globally-normalized CRF
sum = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < weights.length; i++)
sum = sumLogProb (sum, weights[i]);
assert (!Double.isNaN (sum));
if (!Double.isInfinite (sum)) {
for (int i = 0; i < weights.length; i++)
weights[i] -= sum;
}
}
public String describeTransition (double cutoff)
{
DecimalFormat f = new DecimalFormat ("0.###");
return super.describeTransition (cutoff) + "Log Z = "+f.format(sum)+"\n";
}
}
}
| 4,375 | 30.941606 | 109 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFTrainerByStochasticGradient.java
|
package cc.mallet.fst;
import java.util.ArrayList;
import java.util.Collections;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.fst.TransducerTrainer.ByInstanceIncrements;
/**
* Trains CRF by stochastic gradient. Most effective on large training sets.
*
* @author kedarb
*/
public class CRFTrainerByStochasticGradient extends ByInstanceIncrements {
protected CRF crf;
// t is the decaying factor. lambda is some regularization depending on the
// training set size and the gaussian prior.
protected double learningRate, t, lambda;
protected int iterationCount = 0;
protected boolean converged = false;
protected CRF.Factors expectations, constraints;
public CRFTrainerByStochasticGradient(CRF crf, InstanceList trainingSample) {
this.crf = crf;
this.expectations = new CRF.Factors(crf);
this.constraints = new CRF.Factors(crf);
this.setLearningRateByLikelihood(trainingSample);
}
public CRFTrainerByStochasticGradient(CRF crf, double learningRate) {
this.crf = crf;
this.learningRate = learningRate;
this.expectations = new CRF.Factors(crf);
this.constraints = new CRF.Factors(crf);
}
public int getIteration() {
return iterationCount;
}
public Transducer getTransducer() {
return crf;
}
public boolean isFinishedTraining() {
return converged;
}
// Best way to choose learning rate is to run training on a sample and set
// it to the rate that produces maximum increase in likelihood or accuracy.
// Then, to be conservative just halve the learning rate.
// In general, eta = 1/(lambda*t) where
// lambda=priorVariance*numTrainingInstances
// After an initial eta_0 is set, t_0 = 1/(lambda*eta_0)
// After each training step eta = 1/(lambda*(t+t_0)), t=0,1,2,..,Infinity
/** Automatically sets the learning rate to one that would be good */
public void setLearningRateByLikelihood(InstanceList trainingSample) {
int numIterations = 5; // was 10 -akm 1/25/08
double bestLearningRate = Double.NEGATIVE_INFINITY;
double bestLikelihoodChange = Double.NEGATIVE_INFINITY;
double currLearningRate = 5e-11;
while (currLearningRate < 1) {
currLearningRate *= 2;
crf.parameters.zero();
double beforeLikelihood = computeLikelihood(trainingSample);
double likelihoodChange = trainSample(trainingSample,
numIterations, currLearningRate)
- beforeLikelihood;
System.out.println("likelihood change = " + likelihoodChange
+ " for learningrate=" + currLearningRate);
if (likelihoodChange > bestLikelihoodChange) {
bestLikelihoodChange = likelihoodChange;
bestLearningRate = currLearningRate;
}
}
// reset the parameters
crf.parameters.zero();
// conservative estimate for learning rate
bestLearningRate /= 2;
System.out.println("Setting learning rate to " + bestLearningRate);
setLearningRate(bestLearningRate);
}
private double trainSample(InstanceList trainingSample, int numIterations,
double rate) {
double lambda = trainingSample.size();
double t = 1 / (lambda * rate);
double loglik = Double.NEGATIVE_INFINITY;
for (int i = 0; i < numIterations; i++) {
loglik = 0.0;
for (int j = 0; j < trainingSample.size(); j++) {
rate = 1 / (lambda * t);
loglik += trainIncrementalLikelihood(trainingSample.get(j),
rate);
t += 1.0;
}
}
return loglik;
}
private double computeLikelihood(InstanceList trainingSample) {
double loglik = 0.0;
for (int i = 0; i < trainingSample.size(); i++) {
Instance trainingInstance = trainingSample.get(i);
FeatureVectorSequence fvs = (FeatureVectorSequence) trainingInstance
.getData();
Sequence labelSequence = (Sequence) trainingInstance.getTarget();
loglik += new SumLatticeDefault(crf, fvs, labelSequence, null)
.getTotalWeight();
loglik -= new SumLatticeDefault(crf, fvs, null, null)
.getTotalWeight();
}
constraints.zero();
expectations.zero();
return loglik;
}
public void setLearningRate(double r) {
this.learningRate = r;
}
public double getLearningRate() {
return this.learningRate;
}
public boolean train(InstanceList trainingSet, int numIterations) {
return train(trainingSet, numIterations, 1);
}
public boolean train(InstanceList trainingSet, int numIterations,
int numIterationsBetweenEvaluation) {
assert (expectations.structureMatches(crf.parameters));
assert (constraints.structureMatches(crf.parameters));
lambda = 1.0 / trainingSet.size();
t = 1.0 / (lambda * learningRate);
converged = false;
ArrayList<Integer> trainingIndices = new ArrayList<Integer>();
for (int i = 0; i < trainingSet.size(); i++)
trainingIndices.add(i);
double oldLoglik = Double.NEGATIVE_INFINITY;
while (numIterations-- > 0) {
iterationCount++;
// shuffle the indices
Collections.shuffle(trainingIndices);
double loglik = 0.0;
for (int i = 0; i < trainingSet.size(); i++) {
learningRate = 1.0 / (lambda * t);
loglik += trainIncrementalLikelihood(trainingSet
.get(trainingIndices.get(i)));
t += 1.0;
}
System.out.println("loglikelihood[" + numIterations + "] = "
+ loglik);
if (Math.abs(loglik - oldLoglik) < 1e-3) {
converged = true;
break;
}
oldLoglik = loglik;
Runtime.getRuntime().gc();
if (iterationCount % numIterationsBetweenEvaluation == 0)
runEvaluators();
}
return converged;
}
// TODO Add some way to train by batches of instances, where the batch
// memberships are determined externally? Or provide some easy interface for
// creating batches.
public boolean trainIncremental(InstanceList trainingSet) {
this.train(trainingSet, 1);
return false;
}
public boolean trainIncremental(Instance trainingInstance) {
assert (expectations.structureMatches(crf.parameters));
trainIncrementalLikelihood(trainingInstance);
return false;
}
/**
* Adjust the parameters by default learning rate according to the gradient
* of this single Instance, and return the true label sequence likelihood.
*/
public double trainIncrementalLikelihood(Instance trainingInstance) {
return trainIncrementalLikelihood(trainingInstance, learningRate);
}
/**
* Adjust the parameters by learning rate according to the gradient of this
* single Instance, and return the true label sequence likelihood.
*/
public double trainIncrementalLikelihood(Instance trainingInstance,
double rate) {
double singleLoglik;
constraints.zero();
expectations.zero();
FeatureVectorSequence fvs = (FeatureVectorSequence) trainingInstance
.getData();
Sequence labelSequence = (Sequence) trainingInstance.getTarget();
singleLoglik = new SumLatticeDefault(crf, fvs, labelSequence,
constraints.new Incrementor()).getTotalWeight();
singleLoglik -= new SumLatticeDefault(crf, fvs, null,
expectations.new Incrementor()).getTotalWeight();
// Calculate parameter gradient given these instances: (constraints -
// expectations)
constraints.plusEquals(expectations, -1);
// Change the parameters a little by this difference, obeying
// weightsFrozen
crf.parameters.plusEquals(constraints, rate, true);
return singleLoglik;
}
}
| 7,237 | 29.669492 | 78 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/LabelDistributionEvaluator.java
|
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst;
import java.util.logging.Logger;
import com.wcohen.secondstring.PrintfFormat;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
/**
* Prints predicted and true label distribution.
*
* Created: March 31st, 2009
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
*/
public class LabelDistributionEvaluator extends TransducerEvaluator {
private static final Logger logger = MalletLogger.getLogger (InstanceAccuracyEvaluator.class.getName());
public LabelDistributionEvaluator (InstanceList[] instanceLists, String[] descriptions) {
super (instanceLists, descriptions);
}
@Override
public void evaluateInstanceList(TransducerTrainer transducer,
InstanceList instances, String description) {
double[] predCounts = new double[instances.getTargetAlphabet().size()];
double[] trueCounts = new double[instances.getTargetAlphabet().size()];
int total = 0;
for (int i = 0; i < instances.size(); i++) {
Instance instance = instances.get(i);
Sequence trueOutput = (Sequence) instance.getTarget();
Sequence predOutput = (Sequence) transducer.getTransducer().transduce((Sequence)instance.getData());
for (int j = 0; j < predOutput.size(); j++) {
total++;
predCounts[instances.getTargetAlphabet().lookupIndex(predOutput.get(j))]++;
trueCounts[instances.getTargetAlphabet().lookupIndex(trueOutput.get(j))]++;
}
}
for (int li = 0; li < predCounts.length; li++) {
double ppred = predCounts[li] / total;
double ptrue = trueCounts[li] / total;
logger.info(description + " " + instances.getTargetAlphabet().lookupObject(li) + " predicted: " + round(ppred,4) + " - true: " + round(ptrue,4));
}
}
private static String round(double val, int n) {
String format = "%." + n + "f";
return new PrintfFormat(format).sprintf(val);
}
}
| 2,413 | 36.138462 | 151 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLatticeBeam.java
|
package cc.mallet.fst;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import cc.mallet.fst.Transducer.State;
import cc.mallet.fst.Transducer.TransitionIterator;
import cc.mallet.types.DenseVector;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelVector;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Sequence;
import cc.mallet.types.SequencePair;
import cc.mallet.util.MalletLogger;
//******************************************************************************
//CPAL - NEW "BEAM" Version of Forward Backward
//******************************************************************************
public class SumLatticeBeam implements SumLattice // CPAL - like Lattice but using max-product to get the viterbiPath
{
// CPAL - these worked well for nettalk
//private int beamWidth = 10;
//private double KLeps = .005;
boolean UseForwardBackwardBeam = false;
protected static int beamWidth = 3;
private double KLeps = 0;
private double Rmin = 0.1;
private double nstatesExpl[];
private int curIter = 0;
int tctIter = 0; // The number of times we have been called this iteration
private double curAvgNstatesExpl;
public int getBeamWidth ()
{
return beamWidth;
}
public void setBeamWidth (int beamWidth)
{
this.beamWidth = beamWidth;
}
public int getTctIter(){
return this.tctIter;
}
public void setCurIter (int curIter)
{
this.curIter = curIter;
this.tctIter = 0;
}
public void incIter ()
{
this.tctIter++;
}
public void setKLeps (double KLeps)
{
this.KLeps = KLeps;
}
public void setRmin (double Rmin) {
this.Rmin = Rmin;
}
public double[] getNstatesExpl()
{
return nstatesExpl;
}
public boolean getUseForwardBackwardBeam(){
return this.UseForwardBackwardBeam;
}
public void setUseForwardBackwardBeam (boolean state) {
this.UseForwardBackwardBeam = state;
}
private static Logger logger = MalletLogger.getLogger(SumLatticeBeam.class.getName());
// "ip" == "input position", "op" == "output position", "i" == "state index"
Transducer t;
double weight;
Sequence input, output;
LatticeNode[][] nodes; // indexed by ip,i
int latticeLength;
int curBeamWidth; // CPAL - can be adapted if maximizer is confused
// xxx Now that we are incrementing here directly, there isn't
// necessarily a need to save all these arrays...
// log(probability) of being in state "i" at input position "ip"
double[][] gammas; // indexed by ip,i
double[][][] xis; // indexed by ip,i,j; saved only if saveXis is true;
LabelVector labelings[]; // indexed by op, created only if "outputAlphabet" is non-null in constructor
private LatticeNode getLatticeNode (int ip, int stateIndex)
{
if (nodes[ip][stateIndex] == null)
nodes[ip][stateIndex] = new LatticeNode (ip, t.getState (stateIndex));
return nodes[ip][stateIndex];
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
public SumLatticeBeam (Transducer t, Sequence input, Sequence output, Transducer.Incrementor incrementor)
{
this (t, input, output, incrementor, false, null);
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
public SumLatticeBeam (Transducer t, Sequence input, Sequence output, Transducer.Incrementor incrementor, boolean saveXis)
{
this (t, input, output, incrementor, saveXis, null);
}
// If outputAlphabet is non-null, this will create a LabelVector
// for each position in the output sequence indicating the
// probability distribution over possible outputs at that time
// index
public SumLatticeBeam (Transducer t, Sequence input, Sequence output, Transducer.Incrementor incrementor, boolean saveXis, LabelAlphabet outputAlphabet)
{
this.t = t;
if (false && logger.isLoggable (Level.FINE)) {
logger.fine ("Starting Lattice");
logger.fine ("Input: ");
for (int ip = 0; ip < input.size(); ip++)
logger.fine (" " + input.get(ip));
logger.fine ("\nOutput: ");
if (output == null)
logger.fine ("null");
else
for (int op = 0; op < output.size(); op++)
logger.fine (" " + output.get(op));
logger.fine ("\n");
}
// Initialize some structures
this.input = input;
this.output = output;
// xxx Not very efficient when the lattice is actually sparse,
// especially when the number of states is large and the
// sequence is long.
latticeLength = input.size()+1;
int numStates = t.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// xxx Yipes, this could get big; something sparse might be better?
gammas = new double[latticeLength][numStates];
if (saveXis) xis = new double[latticeLength][numStates][numStates];
double outputCounts[][] = null;
if (outputAlphabet != null)
outputCounts = new double[latticeLength][outputAlphabet.size()];
for (int i = 0; i < numStates; i++) {
for (int ip = 0; ip < latticeLength; ip++)
gammas[ip][i] = Transducer.IMPOSSIBLE_WEIGHT;
if (saveXis)
for (int j = 0; j < numStates; j++)
for (int ip = 0; ip < latticeLength; ip++)
xis[ip][i][j] = Transducer.IMPOSSIBLE_WEIGHT;
}
// Forward pass
logger.fine ("Starting Foward pass");
boolean atLeastOneInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
//System.out.println ("Forward pass initialWeight = "+initialWeight);
if (initialWeight < Transducer.IMPOSSIBLE_WEIGHT) {
getLatticeNode(0, i).alpha = initialWeight;
//System.out.println ("nodes[0][i].alpha="+nodes[0][i].alpha);
atLeastOneInitialState = true;
}
}
if (atLeastOneInitialState == false)
logger.warning ("There are no starting states!");
// CPAL - a sorted list for our beam experiments
NBestSlist[] slists = new NBestSlist[latticeLength];
// CPAL - used for stats
nstatesExpl = new double[latticeLength];
// CPAL - used to adapt beam if optimizer is getting confused
// tctIter++;
if(curIter == 0) {
curBeamWidth = numStates;
} else if(tctIter > 1 && curIter != 0) {
//curBeamWidth = Math.min((int)Math.round(curAvgNstatesExpl*2),numStates);
//System.out.println ("Doubling Minimum Beam Size to: "+curBeamWidth);
curBeamWidth = beamWidth;
} else {
curBeamWidth = beamWidth;
}
// ************************************************************
for (int ip = 0; ip < latticeLength-1; ip++) {
// CPAL - add this to construct the beam
// ***************************************************
// CPAL - sets up the sorted list
slists[ip] = new NBestSlist(numStates);
// CPAL - set the
slists[ip].setKLMinE(curBeamWidth);
slists[ip].setKLeps(KLeps);
slists[ip].setRmin(Rmin);
for(int i = 0 ; i< numStates ; i++){
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
continue;
//State s = t.getState(i);
// CPAL - give the NB viterbi node the (Weight, position)
NBForBackNode cnode = new NBForBackNode(nodes[ip][i].alpha, i);
slists[ip].push(cnode);
}
// CPAL - unlike std. n-best beam we now filter the list based
// on a KL divergence like measure
// ***************************************************
// use method which computes the cumulative log sum and
// finds the point at which the sum is within KLeps
int KLMaxPos=1;
int RminPos=1;
if(KLeps > 0) {
KLMaxPos = slists[ip].getKLpos();
nstatesExpl[ip]=(double)KLMaxPos;
} else if(KLeps == 0) {
if(Rmin > 0) {
RminPos = slists[ip].getTHRpos();
} else {
slists[ip].setRmin(-Rmin);
RminPos = slists[ip].getTHRposSTRAWMAN();
}
nstatesExpl[ip]=(double)RminPos;
} else {
// Trick, negative values for KLeps mean use the max of KL an Rmin
slists[ip].setKLeps(-KLeps);
KLMaxPos = slists[ip].getKLpos();
//RminPos = slists[ip].getTHRpos();
if(Rmin > 0) {
RminPos = slists[ip].getTHRpos();
} else {
slists[ip].setRmin(-Rmin);
RminPos = slists[ip].getTHRposSTRAWMAN();
}
if(KLMaxPos > RminPos) {
nstatesExpl[ip]=(double)KLMaxPos;
} else {
nstatesExpl[ip]=(double)RminPos;
}
}
//System.out.println(nstatesExpl[ip] + " ");
// CPAL - contemplating setting values to something else
int tmppos;
for (int i = (int) nstatesExpl[ip]+1; i < slists[ip].size(); i++) {
tmppos = slists[ip].getPosByIndex(i);
nodes[ip][tmppos].alpha = Transducer.IMPOSSIBLE_WEIGHT;
nodes[ip][tmppos] = null; // Null is faster and seems to work the same
}
// - done contemplation
//for (int i = 0; i < numStates; i++) {
for(int jj=0 ; jj< nstatesExpl[ip]; jj++) {
int i = slists[ip].getPosByIndex(jj);
// CPAL - dont need this anymore
// should be taken care of in the lists
//if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// xxx if we end up doing this a lot,
// we could save a list of the non-null ones
// continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
if (logger.isLoggable (Level.FINE))
logger.fine (" Starting Foward transition iteration from state "
+ s.getName() + " on input " + input.get(ip).toString()
+ " and output "
+ (output==null ? "(null)" : output.get(ip).toString()));
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Forward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
LatticeNode destinationNode = getLatticeNode (ip+1, destination.getIndex());
destinationNode.output = iter.getOutput();
double transitionWeight = iter.getWeight();
if (logger.isLoggable (Level.FINE))
logger.fine ("transitionWeight="+transitionWeight
+" nodes["+ip+"]["+i+"].alpha="+nodes[ip][i].alpha
+" destinationNode.alpha="+destinationNode.alpha);
destinationNode.alpha = Transducer.sumLogProb (destinationNode.alpha,
nodes[ip][i].alpha + transitionWeight);
//System.out.println ("destinationNode.alpha <- "+destinationNode.alpha);
}
}
}
//System.out.println("Mean Nodes Explored: " + MatrixOps.mean(nstatesExpl));
curAvgNstatesExpl = MatrixOps.mean(nstatesExpl);
// Calculate total cost of Lattice. This is the normalizer
weight = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
// Note: actually we could sum at any ip index,
// the choice of latticeLength-1 is arbitrary
//System.out.println ("Ending alpha, state["+i+"] = "+nodes[latticeLength-1][i].alpha);
//System.out.println ("Ending beta, state["+i+"] = "+t.getState(i).finalWeight);
weight = Transducer.sumLogProb (weight,
(nodes[latticeLength-1][i].alpha + t.getState(i).getFinalWeight()));
}
// Weight is now an "unnormalized weight" of the entire Lattice
//assert (weight >= 0) : "weight = "+weight;
// If the sequence has -infinite weight, just return.
// Usefully this avoids calling any incrementX methods.
// It also relies on the fact that the gammas[][] and .alpha and .beta values
// are already initialized to values that reflect -infinite weight
// xxx Although perhaps not all (alphas,betas) exactly correctly reflecting?
if (weight == Transducer.IMPOSSIBLE_WEIGHT)
return;
// Backward pass
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
State s = t.getState(i);
nodes[latticeLength-1][i].beta = s.getFinalWeight();
gammas[latticeLength-1][i] =
nodes[latticeLength-1][i].alpha + nodes[latticeLength-1][i].beta - weight;
if (incrementor != null) {
double p = Math.exp(gammas[latticeLength-1][i]);
assert (p > Transducer.IMPOSSIBLE_WEIGHT && !Double.isNaN(p))
: "p="+p+" gamma="+gammas[latticeLength-1][i];
incrementor.incrementFinalState(s, p);
}
}
for (int ip = latticeLength-2; ip >= 0; ip--) {
for (int i = 0; i < numStates; i++) {
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// Note that skipping here based on alpha means that beta values won't
// be correct, but since alpha is infinite anyway, it shouldn't matter.
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Backward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
int j = destination.getIndex();
LatticeNode destinationNode = nodes[ip+1][j];
if (destinationNode != null) {
double transitionWeight = iter.getWeight();
assert (!Double.isNaN(transitionWeight));
// assert (transitionWeight >= 0); Not necessarily
double oldBeta = nodes[ip][i].beta;
assert (!Double.isNaN(nodes[ip][i].beta));
nodes[ip][i].beta = Transducer.sumLogProb (nodes[ip][i].beta,
destinationNode.beta + transitionWeight);
assert (!Double.isNaN(nodes[ip][i].beta))
: "dest.beta="+destinationNode.beta+" trans="+transitionWeight+" sum="+(destinationNode.beta+transitionWeight)
+ " oldBeta="+oldBeta;
double xi = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - weight;
if (saveXis) xis[ip][i][j] = xi;
assert (!Double.isNaN(nodes[ip][i].alpha));
assert (!Double.isNaN(transitionWeight));
assert (!Double.isNaN(nodes[ip+1][j].beta));
assert (!Double.isNaN(weight));
if (incrementor != null || outputAlphabet != null) {
double p = Math.exp(xi);
assert (p > Transducer.IMPOSSIBLE_WEIGHT && !Double.isNaN(p)) : "xis["+ip+"]["+i+"]["+j+"]="+xi;
if (incrementor != null)
incrementor.incrementTransition(iter, p);
if (outputAlphabet != null) {
int outputIndex = outputAlphabet.lookupIndex (iter.getOutput(), false);
assert (outputIndex >= 0);
// xxx This assumes that "ip" == "op"!
outputCounts[ip][outputIndex] += p;
//System.out.println ("CRF Lattice outputCounts["+ip+"]["+outputIndex+"]+="+p);
}
}
}
}
gammas[ip][i] = nodes[ip][i].alpha + nodes[ip][i].beta - weight;
}
if(true){
// CPAL - check the normalization
double checknorm = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < numStates; i++)
if (nodes[ip][i] != null) {
// Note: actually we could sum at any ip index,
// the choice of latticeLength-1 is arbitrary
//System.out.println ("Ending alpha, state["+i+"] = "+nodes[latticeLength-1][i].alpha);
//System.out.println ("Ending beta, state["+i+"] = "+t.getState(i).finalWeight);
checknorm = Transducer.sumLogProb (checknorm, gammas[ip][i]);
}
// System.out.println ("Check Gamma, sum="+checknorm);
// CPAL - done check of normalization
// CPAL - normalize
for (int i = 0; i < numStates; i++)
if (nodes[ip][i] != null) {
gammas[ip][i] = gammas[ip][i] - checknorm;
}
//System.out.println ("Check Gamma, sum="+checknorm);
// CPAL - normalization
}
}
if (incrementor != null)
for (int i = 0; i < numStates; i++) {
double p = Math.exp(gammas[0][i]);
assert (p > Transducer.IMPOSSIBLE_WEIGHT && !Double.isNaN(p));
incrementor.incrementInitialState(t.getState(i), p);
}
if (outputAlphabet != null) {
labelings = new LabelVector[latticeLength];
for (int ip = latticeLength-2; ip >= 0; ip--) {
assert (Math.abs(1.0-MatrixOps.sum (outputCounts[ip])) < 0.000001);;
labelings[ip] = new LabelVector (outputAlphabet, outputCounts[ip]);
}
}
}
// CPAL - a simple node holding a weight and position of the state
private class NBForBackNode
{
double weight;
int pos;
NBForBackNode(double weight, int pos)
{
this.weight = weight;
this.pos = pos;
}
}
private class NBestSlist
{
ArrayList list = new ArrayList();
int MaxElements;
int KLMinElements;
int KLMaxPos;
double KLeps;
double Rmin;
NBestSlist(int MaxElements)
{
this.MaxElements = MaxElements;
}
boolean setKLMinE(int KLMinElements){
this.KLMinElements = KLMinElements;
return true;
}
int size()
{
return list.size();
}
boolean empty()
{
return list.isEmpty();
}
Object pop()
{
return list.remove(0);
}
int getPosByIndex(int ii){
NBForBackNode tn = (NBForBackNode)list.get(ii);
return tn.pos;
}
double getWeightByIndex(int ii){
NBForBackNode tn = (NBForBackNode)list.get(ii);
return tn.weight;
}
void setKLeps(double KLeps){
this.KLeps = KLeps;
}
void setRmin(double Rmin){
this.Rmin = Rmin;
}
int getTHRpos(){
NBForBackNode tn;
double lc1, lc2;
tn = (NBForBackNode)list.get(0);
lc1 = tn.weight;
tn = (NBForBackNode)list.get(list.size()-1);
lc2 = tn.weight;
double minc = lc1 - lc2;
double mincTHR = minc - minc*Rmin;
for(int i=1;i<list.size();i++){
tn = (NBForBackNode)list.get(i);
lc1 = tn.weight - lc2;
if(lc1 > mincTHR){
return i+1;
}
}
return list.size();
}
int getTHRposSTRAWMAN(){
NBForBackNode tn;
double lc1, lc2;
tn = (NBForBackNode)list.get(0);
lc1 = tn.weight;
double mincTHR = -lc1*Rmin;
//double minc = lc1 - lc2;
//double mincTHR = minc - minc*Rmin;
for(int i=1;i<list.size();i++){
tn = (NBForBackNode)list.get(i);
lc1 = -tn.weight;
if(lc1 < mincTHR){
return i+1;
}
}
return list.size();
}
int getKLpos(){
//double KLeps = 0.1;
double CSNLP[];
CSNLP = new double[MaxElements];
double worstc;
NBForBackNode tn;
tn = (NBForBackNode)list.get(list.size()-1);
worstc = tn.weight;
for(int i=0;i<list.size();i++){
tn = (NBForBackNode)list.get(i);
// NOTE: sometimes we can have positive numbers !
double lc = tn.weight;
//double lc = tn.weight-worstc;
//if(lc >0){
// int asdf=1;
//}
if (i==0) {
CSNLP[i] = lc;
} else {
CSNLP[i] = Transducer.sumLogProb(CSNLP[i-1], lc);
}
}
// normalize
for(int i=0;i<list.size();i++){
CSNLP[i]=CSNLP[i]-CSNLP[list.size()-1];
if(CSNLP[i] < KLeps){
KLMaxPos = i+1;
if(KLMaxPos >= KLMinElements) {
return KLMaxPos;
} else if(list.size() >= KLMinElements){
return KLMinElements;
}
}
}
KLMaxPos = list.size();
return KLMaxPos;
}
ArrayList push(NBForBackNode vn)
{
double tc = vn.weight;
boolean atEnd = true;
for(int i=0;i<list.size();i++){
NBForBackNode tn = (NBForBackNode)list.get(i);
double lc = tn.weight;
if(tc < lc){
list.add(i,vn);
atEnd = false;
break;
}
}
if(atEnd) {
list.add(vn);
}
// CPAL - if the list is too big,
// remove the first, largest weight element
if(list.size()>MaxElements) {
list.remove(MaxElements);
}
//double f = o.totalWeight[o.nextBestStateIndex];
//boolean atEnd = true;
//for(int i=0; i<list.size(); i++){
// ASearchNode_NBest tempNode = (ASearchNode_NBest)list.get(i);
// double f1 = tempNode.totalWeight[tempNode.nextBestStateIndex];
// if(f < f1) {
// list.add(i, o);
// atEnd = false;
// break;
// }
//}
//if(atEnd) list.add(o);
return list;
}
} // CPAL - end NBestSlist
// culotta: interface for constrained lattice
/**
Create constrained lattice such that all paths pass through the
the labeling of <code> requiredSegment </code> as indicated by
<code> constrainedSequence </code>
@param inputSequence input sequence
@param outputSequence output sequence
@param requiredSegment segment of sequence that must be labelled
@param constrainedSequence lattice must have labels of this
sequence from <code> requiredSegment.start </code> to <code>
requiredSegment.end </code> correctly
*/
SumLatticeBeam (Transducer t, Sequence inputSequence, Sequence outputSequence, Segment requiredSegment, Sequence constrainedSequence)
{
this (t, inputSequence, outputSequence, (Transducer.Incrementor)null, null,
makeConstraints(t, inputSequence, outputSequence, requiredSegment, constrainedSequence));
}
private static int[] makeConstraints (Transducer t, Sequence inputSequence, Sequence outputSequence, Segment requiredSegment, Sequence constrainedSequence) {
if (constrainedSequence.size () != inputSequence.size ())
throw new IllegalArgumentException ("constrainedSequence.size [" + constrainedSequence.size () + "] != inputSequence.size [" + inputSequence.size () + "]");
// constraints tells the lattice which states must emit which
// observations. positive values say all paths must pass through
// this state index, negative values say all paths must _not_
// pass through this state index. 0 means we don't
// care. initialize to 0. include 1 extra node for start state.
int [] constraints = new int [constrainedSequence.size() + 1];
for (int c = 0; c < constraints.length; c++)
constraints[c] = 0;
for (int i=requiredSegment.getStart (); i <= requiredSegment.getEnd(); i++) {
int si = t.stateIndexOfString ((String)constrainedSequence.get (i));
if (si == -1)
logger.warning ("Could not find state " + constrainedSequence.get (i) + ". Check that state labels match startTages and inTags, and that all labels are seen in training data.");
// throw new IllegalArgumentException ("Could not find state " + constrainedSequence.get(i) + ". Check that state labels match startTags and InTags.");
constraints[i+1] = si + 1;
}
// set additional negative constraint to ensure state after
// segment is not a continue tag
// xxx if segment length=1, this actually constrains the sequence
// to B-tag (B-tag)', instead of the intended constraint of B-tag
// (I-tag)'
// the fix below is unsafe, but will have to do for now.
// FIXED BELOW
/* String endTag = (String) constrainedSequence.get (requiredSegment.getEnd ());
if (requiredSegment.getEnd()+2 < constraints.length) {
if (requiredSegment.getStart() == requiredSegment.getEnd()) { // segment has length 1
if (endTag.startsWith ("B-")) {
endTag = "I" + endTag.substring (1, endTag.length());
}
else if (!(endTag.startsWith ("I-") || endTag.startsWith ("0")))
throw new IllegalArgumentException ("Constrained Lattice requires that states are tagged in B-I-O format.");
}
int statei = stateIndexOfString (endTag);
if (statei == -1) // no I- tag for this B- tag
statei = stateIndexOfString ((String)constrainedSequence.get (requiredSegment.getStart ()));
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
*/
if (requiredSegment.getEnd() + 2 < constraints.length) { // if
String endTag = requiredSegment.getInTag().toString();
int statei = t.stateIndexOfString (endTag);
if (statei == -1)
throw new IllegalArgumentException ("Could not find state " + endTag + ". Check that state labels match startTags and InTags.");
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
// printStates ();
logger.fine ("Segment:\n" + requiredSegment.sequenceToString () +
"\nconstrainedSequence:\n" + constrainedSequence +
"\nConstraints:\n");
for (int i=0; i < constraints.length; i++) {
logger.fine (constraints[i] + "\t");
}
logger.fine ("");
return constraints;
}
// culotta: constructor for constrained lattice
/** Create a lattice that constrains its transitions such that the
* <position,label> pairs in "constraints" are adhered
* to. constraints is an array where each entry is the index of
* the required label at that position. An entry of 0 means there
* are no constraints on that <position, label>. Positive values
* mean the path must pass through that state. Negative values
* mean the path must _not_ pass through that state. NOTE -
* constraints.length must be equal to output.size() + 1. A
* lattice has one extra position for the initial
* state. Generally, this should be unconstrained, since it does
* not produce an observation.
*/
public SumLatticeBeam (Transducer t, Sequence input, Sequence output, Transducer.Incrementor incrementor, LabelAlphabet outputAlphabet, int [] constraints)
{
this.t = t;
if (false && logger.isLoggable (Level.FINE)) {
logger.fine ("Starting Lattice");
logger.fine ("Input: ");
for (int ip = 0; ip < input.size(); ip++)
logger.fine (" " + input.get(ip));
logger.fine ("\nOutput: ");
if (output == null)
logger.fine ("null");
else
for (int op = 0; op < output.size(); op++)
logger.fine (" " + output.get(op));
logger.fine ("\n");
}
// Initialize some structures
this.input = input;
this.output = output;
// xxx Not very efficient when the lattice is actually sparse,
// especially when the number of states is large and the
// sequence is long.
latticeLength = input.size()+1;
int numStates = t.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// xxx Yipes, this could get big; something sparse might be better?
gammas = new double[latticeLength][numStates];
// xxx Move this to an ivar, so we can save it? But for what?
// Commenting this out, because it's a memory hog and not used right now.
// Uncomment and conditionalize under a flag if ever needed. -cas
// double xis[][][] = new double[latticeLength][numStates][numStates];
double outputCounts[][] = null;
if (outputAlphabet != null)
outputCounts = new double[latticeLength][outputAlphabet.size()];
for (int i = 0; i < numStates; i++) {
for (int ip = 0; ip < latticeLength; ip++)
gammas[ip][i] = Transducer.IMPOSSIBLE_WEIGHT;
/* Commenting out xis -cas
for (int j = 0; j < numStates; j++)
for (int ip = 0; ip < latticeLength; ip++)
xis[ip][i][j] = Transducer.IMPOSSIBLE_WEIGHT;
*/
}
// Forward pass
logger.fine ("Starting Constrained Foward pass");
// ensure that at least one state has initial weight less than Infinity
// so we can start from there
boolean atLeastOneInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
//System.out.println ("Forward pass initialWeight = "+initialWeight);
if (initialWeight > Transducer.IMPOSSIBLE_WEIGHT) {
getLatticeNode(0, i).alpha = initialWeight;
//System.out.println ("nodes[0][i].alpha="+nodes[0][i].alpha);
atLeastOneInitialState = true;
}
}
if (atLeastOneInitialState == false)
logger.warning ("There are no starting states!");
for (int ip = 0; ip < latticeLength-1; ip++)
for (int i = 0; i < numStates; i++) {
logger.fine ("ip=" + ip+", i=" + i);
// check if this node is possible at this <position,
// label>. if not, skip it.
if (constraints[ip] > 0) { // must be in state indexed by constraints[ip] - 1
if (constraints[ip]-1 != i) {
logger.fine ("Current state does not match positive constraint. position="+ip+", constraint="+(constraints[ip]-1)+", currState="+i);
continue;
}
}
else if (constraints[ip] < 0) { // must _not_ be in state indexed by constraints[ip]
if (constraints[ip]+1 == -i) {
logger.fine ("Current state does not match negative constraint. position="+ip+", constraint="+(constraints[ip]+1)+", currState="+i);
continue;
}
}
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT) {
// xxx if we end up doing this a lot,
// we could save a list of the non-null ones
if (nodes[ip][i] == null) logger.fine ("nodes[ip][i] is NULL");
else if (nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT) logger.fine ("nodes[ip][i].alpha is Inf");
logger.fine ("-INFINITE weight or NULL...skipping");
continue;
}
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
if (logger.isLoggable (Level.FINE))
logger.fine (" Starting Forward transition iteration from state "
+ s.getName() + " on input " + input.get(ip).toString()
+ " and output "
+ (output==null ? "(null)" : output.get(ip).toString()));
while (iter.hasNext()) {
State destination = iter.nextState();
boolean legalTransition = true;
// check constraints to see if node at <ip,i> can transition to destination
if (ip+1 < constraints.length && constraints[ip+1] > 0 && ((constraints[ip+1]-1) != destination.getIndex())) {
logger.fine ("Destination state does not match positive constraint. Assigning -infinite weight. position="+(ip+1)+", constraint="+(constraints[ip+1]-1)+", source ="+i+", destination="+destination.getIndex());
legalTransition = false;
}
else if (((ip+1) < constraints.length) && constraints[ip+1] < 0 && (-(constraints[ip+1]+1) == destination.getIndex())) {
logger.fine ("Destination state does not match negative constraint. Assigning -infinite weight. position="+(ip+1)+", constraint="+(constraints[ip+1]+1)+", destination="+destination.getIndex());
legalTransition = false;
}
if (logger.isLoggable (Level.FINE))
logger.fine ("Forward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
LatticeNode destinationNode = getLatticeNode (ip+1, destination.getIndex());
destinationNode.output = iter.getOutput();
double transitionWeight = iter.getWeight();
if (legalTransition) {
//if (logger.isLoggable (Level.FINE))
logger.fine ("transitionWeight="+transitionWeight
+" nodes["+ip+"]["+i+"].alpha="+nodes[ip][i].alpha
+" destinationNode.alpha="+destinationNode.alpha);
destinationNode.alpha = Transducer.sumLogProb (destinationNode.alpha,
nodes[ip][i].alpha + transitionWeight);
//System.out.println ("destinationNode.alpha <- "+destinationNode.alpha);
logger.fine ("Set alpha of latticeNode at ip = "+ (ip+1) + " stateIndex = " + destination.getIndex() + ", destinationNode.alpha = " + destinationNode.alpha);
}
else {
// this is an illegal transition according to our
// constraints, so set its prob to 0 . NO, alpha's are
// unnormalized weights...set to Inf //
// destinationNode.alpha = 0.0;
// destinationNode.alpha = Transducer.IMPOSSIBLE_WEIGHT;
logger.fine ("Illegal transition from state " + i + " to state " + destination.getIndex() + ". Setting alpha to Inf");
}
}
}
// Calculate total weight of Lattice. This is the normalizer
weight = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
// Note: actually we could sum at any ip index,
// the choice of latticeLength-1 is arbitrary
//System.out.println ("Ending alpha, state["+i+"] = "+nodes[latticeLength-1][i].alpha);
//System.out.println ("Ending beta, state["+i+"] = "+t.getState(i).finalWeight);
if (constraints[latticeLength-1] > 0 && i != constraints[latticeLength-1]-1)
continue;
if (constraints[latticeLength-1] < 0 && -i == constraints[latticeLength-1]+1)
continue;
logger.fine ("Summing final lattice weight. state="+i+", alpha="+nodes[latticeLength-1][i].alpha + ", final weight = "+t.getState(i).getFinalWeight());
weight = Transducer.sumLogProb (weight,
(nodes[latticeLength-1][i].alpha + t.getState(i).getFinalWeight()));
}
// Weight is now an "unnormalized weight" of the entire Lattice
//assert (weight >= 0) : "weight = "+weight;
// If the sequence has -infinite weight, just return.
// Usefully this avoids calling any incrementX methods.
// It also relies on the fact that the gammas[][] and .alpha and .beta values
// are already initialized to values that reflect -infinite weight
// xxx Although perhaps not all (alphas,betas) exactly correctly reflecting?
if (weight == Transducer.IMPOSSIBLE_WEIGHT)
return;
// Backward pass
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
State s = t.getState(i);
nodes[latticeLength-1][i].beta = s.getFinalWeight();
gammas[latticeLength-1][i] =
nodes[latticeLength-1][i].alpha + nodes[latticeLength-1][i].beta - weight;
if (incrementor != null) {
double p = Math.exp(gammas[latticeLength-1][i]);
assert (p >= 0 && p <= 1.0 && !Double.isNaN(p)) : "p="+p+" gamma="+gammas[latticeLength-1][i];
incrementor.incrementFinalState(s, p);
}
}
for (int ip = latticeLength-2; ip >= 0; ip--) {
for (int i = 0; i < numStates; i++) {
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// Note that skipping here based on alpha means that beta values won't
// be correct, but since alpha is infinite anyway, it shouldn't matter.
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Backward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
int j = destination.getIndex();
LatticeNode destinationNode = nodes[ip+1][j];
if (destinationNode != null) {
double transitionWeight = iter.getWeight();
assert (!Double.isNaN(transitionWeight));
// assert (transitionWeight >= 0); Not necessarily
double oldBeta = nodes[ip][i].beta;
assert (!Double.isNaN(nodes[ip][i].beta));
nodes[ip][i].beta = Transducer.sumLogProb (nodes[ip][i].beta,
destinationNode.beta + transitionWeight);
assert (!Double.isNaN(nodes[ip][i].beta))
: "dest.beta="+destinationNode.beta+" trans="+transitionWeight+" sum="+(destinationNode.beta+transitionWeight)
+ " oldBeta="+oldBeta;
// xis[ip][i][j] = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - weight;
assert (!Double.isNaN(nodes[ip][i].alpha));
assert (!Double.isNaN(transitionWeight));
assert (!Double.isNaN(nodes[ip+1][j].beta));
assert (!Double.isNaN(weight));
if (incrementor != null || outputAlphabet != null) {
double xi = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - weight;
double p = Math.exp(xi);
assert (p >= 0 && p <= 1.0 && !Double.isNaN(p)) : "xis["+ip+"]["+i+"]["+j+"]="+-xi;
if (incrementor != null)
incrementor.incrementTransition(iter, p);
if (outputAlphabet != null) {
int outputIndex = outputAlphabet.lookupIndex (iter.getOutput(), false);
assert (outputIndex >= 0);
// xxx This assumes that "ip" == "op"!
outputCounts[ip][outputIndex] += p;
//System.out.println ("CRF Lattice outputCounts["+ip+"]["+outputIndex+"]+="+p);
}
}
}
}
gammas[ip][i] = nodes[ip][i].alpha + nodes[ip][i].beta - weight;
}
}
if (incrementor != null)
for (int i = 0; i < numStates; i++) {
double p = Math.exp(gammas[0][i]);
assert (p >= 0.0 && p <= 1.0 && !Double.isNaN(p));
incrementor.incrementInitialState(t.getState(i), p);
}
if (outputAlphabet != null) {
labelings = new LabelVector[latticeLength];
for (int ip = latticeLength-2; ip >= 0; ip--) {
assert (Math.abs(1.0-MatrixOps.sum (outputCounts[ip])) < 0.000001);;
labelings[ip] = new LabelVector (outputAlphabet, outputCounts[ip]);
}
}
}
public double getTotalWeight () {
assert (!Double.isNaN(weight));
return weight; }
// No, this.weight is an "unnormalized weight"
//public double getProbability () { return Math.exp (weight); }
public double getGammaWeight (int inputPosition, State s) {
return gammas[inputPosition][s.getIndex()]; }
public double getGammaProbability (int inputPosition, State s) {
return Math.exp (gammas[inputPosition][s.getIndex()]); }
public double[][][] getXis() {
return xis;
}
public double[][] getGammas () {
return gammas;
}
public double getXiProbability (int ip, State s1, State s2) {
if (xis == null)
throw new IllegalStateException ("xis were not saved.");
int i = s1.getIndex ();
int j = s2.getIndex ();
return Math.exp (xis[ip][i][j]);
}
public double getXiWeight (int ip, State s1, State s2)
{
if (xis == null)
throw new IllegalStateException ("xis were not saved.");
int i = s1.getIndex ();
int j = s2.getIndex ();
return xis[ip][i][j];
}
public int length () { return latticeLength; }
public double getAlpha (int ip, State s) {
LatticeNode node = getLatticeNode (ip, s.getIndex ());
return node.alpha;
}
public double getBeta (int ip, State s) {
LatticeNode node = getLatticeNode (ip, s.getIndex ());
return node.beta;
}
public LabelVector getLabelingAtPosition (int outputPosition) {
if (labelings != null)
return labelings[outputPosition];
return null;
}
public Transducer getTransducer ()
{
return t;
}
// A container for some information about a particular input position and state
private class LatticeNode
{
int inputPosition;
// outputPosition not really needed until we deal with asymmetric epsilon.
State state;
Object output;
double alpha = Transducer.IMPOSSIBLE_WEIGHT;
double beta = Transducer.IMPOSSIBLE_WEIGHT;
LatticeNode (int inputPosition, State state) {
this.inputPosition = inputPosition;
this.state = state;
assert (this.alpha == Transducer.IMPOSSIBLE_WEIGHT); // xxx Remove this check
}
}
public static class Factory extends SumLatticeFactory
{
int bw;
public Factory (int beamWidth) {
bw = beamWidth;
}
public SumLattice newSumLattice (Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, boolean saveXis, LabelAlphabet outputAlphabet)
{
return new SumLatticeBeam (trans, input, output, incrementor, saveXis, outputAlphabet) {{ beamWidth = bw; }};
}
}
}
| 38,215 | 33.710263 | 214 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/ShallowTransducerTrainer.java
|
package cc.mallet.fst;
import cc.mallet.types.InstanceList;
/**
* Wraps around an already trained <tt>Transducer</tt> model. <p>
*
* Use this class to pass to the <tt>*Evaluator.evaluateInstanceList</tt> when we
* don't have access to the *Trainer that was used to train the Transducer model.
*
* @author Gaurav Chandalia
* @deprecated Use <tt>NoopTransducerTrainer</tt> instead
*/
public class ShallowTransducerTrainer extends TransducerTrainer {
protected Transducer transducer;
public ShallowTransducerTrainer(Transducer transducer) {
this.transducer = transducer;
}
public int getIteration() { return 0; }
public Transducer getTransducer() { return transducer; }
public boolean isFinishedTraining() { return false; }
public boolean train(InstanceList trainingSet, int numIterations) {
throw new IllegalStateException("Cannot use this class for training");
}
}
| 896 | 31.035714 | 82 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/Transducer.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.logging.Logger;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.types.SequencePairAlignment;
import cc.mallet.pipe.Pipe;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Sequences;
/**
* A base class for all sequence models, analogous to {@link classify.Classifier}.
*/
public abstract class Transducer implements Serializable
{
// Variable name key:
// "ip" = "input position"
// "op" = "output position"
private static Logger logger = MalletLogger.getLogger(Transducer.class.getName());
//public static final double ZERO_COST = 0;
//public static final double INFINITE_COST = Double.POSITIVE_INFINITY;
public static final double CERTAIN_WEIGHT = Double.POSITIVE_INFINITY; // TODO Remove this because it should never be used; results in NaN's
public static final double IMPOSSIBLE_WEIGHT = Double.NEGATIVE_INFINITY;
// A factory instance from which we can ask for a newSumLattice(...)
SumLatticeFactory sumLatticeFactory;
// A factory instance from which we can ask for a newMaxLattice(...)
MaxLatticeFactory maxLatticeFactory;
/** A pipe that should produce a Sequence in the "data" slot, (and possibly one in the "target" slot also */
protected Pipe inputPipe;
/** A pipe that should expect the Transducer's output sequence in the "target" slot,
and should produce something printable in the "source" slot that
indicates the results of transduction. */
protected Pipe outputPipe;
/**
* Initializes default sum-product and max-product inference engines.
*/
public Transducer ()
{
sumLatticeFactory = new SumLatticeDefault.Factory();
maxLatticeFactory = new MaxLatticeDefault.Factory();
}
public Transducer (Pipe inputPipe, Pipe outputPipe)
{
this();
this.inputPipe = inputPipe;
this.outputPipe = outputPipe;
}
public Pipe getInputPipe () { return inputPipe; }
public Pipe getOutputPipe () { return outputPipe; }
public void setSumLatticeFactory (SumLatticeFactory fbf) { sumLatticeFactory = fbf; }
public void setMaxLatticeFactory (MaxLatticeFactory vf) { maxLatticeFactory = vf; }
public SumLatticeFactory getSumLatticeFactory () { return sumLatticeFactory; }
public MaxLatticeFactory getMaxLatticeFactory () { return maxLatticeFactory; }
/** Take input sequence from instance.data and put the output sequence in instance.target.
* Like transduce(Instance), but put best output sequence into instance.target rather than instance.data. */
// TODO Consider a different method name.
public Instance label (Instance instance)
{
if (inputPipe != null)
instance = inputPipe.instanceFrom(instance);
// TODO Use MaxLatticeFactory instead of hardcoding
instance.setTarget(new MaxLatticeDefault(this, (Sequence)instance.getData()).bestOutputSequence());
if (outputPipe != null)
instance = outputPipe.instanceFrom(instance);
return instance;
}
/** Take input sequence from instance.data and put the output sequence in instance.data. */
public Instance transduce (Instance instance)
{
if (inputPipe != null)
instance = inputPipe.instanceFrom(instance);
// TODO Use MaxLatticeFactory instead of hardcoding
instance.setData(new MaxLatticeDefault(this, (Sequence)instance.getData()).bestOutputSequence());
if (outputPipe != null)
instance = outputPipe.instanceFrom(instance);
return instance;
}
/**
* Converts the given sequence into another sequence according to this transducer.
* For exmaple, probabilistic transducer may do something like Viterbi here.
* Subclasses of transducer may specify that they only accept special kinds of sequence.
* @param input Input sequence
* @return Sequence output by this transudcer
*/
public Sequence transduce (Sequence input)
{
return maxLatticeFactory.newMaxLattice(this, (Sequence)input).bestOutputSequence();
}
public abstract int numStates ();
public abstract State getState (int index);
// Note that this method is allowed to return states with impossible (-infinity) initialWeights.
public abstract Iterator initialStateIterator ();
/** Some transducers are "generative", meaning that you can get a
sequence out of them without giving them an input sequence. In
this case State.transitionIterator() should return all available
transitions, but attempts to obtain the input and weight fields may
throw an exception. */
// TODO Why could obtaining "weight" be a problem???
public boolean canIterateAllTransitions () { return false; }
/** If true, this is a "generative transducer". In this case
State.transitionIterator() should return transitions that have
valid input and cost fields. True returned here should imply
that canIterateAllTransitions() is true. */
public boolean isGenerative () { return false; }
/**
* Runs inference across all the instances and returns the average token
* accuracy.
*/
public double averageTokenAccuracy (InstanceList ilist)
{
double accuracy = 0;
for (int i = 0; i < ilist.size(); i++) {
Instance instance = ilist.get(i);
Sequence input = (Sequence) instance.getData();
Sequence output = (Sequence) instance.getTarget();
assert (input.size() == output.size());
Sequence predicted = maxLatticeFactory.newMaxLattice(this, input).bestOutputSequence();
double pathAccuracy = Sequences.elementwiseAccuracy(output, predicted);
accuracy += pathAccuracy;
logger.fine ("Transducer path accuracy = "+pathAccuracy);
}
return accuracy/ilist.size();
}
// Treat the costs as if they are -log(probabilies); we will
// normalize them if necessary
public SequencePairAlignment generatePath ()
{
if (isGenerative() == false)
throw new IllegalStateException ("Transducer is not generative.");
ArrayList initialStates = new ArrayList ();
Iterator iter = initialStateIterator ();
while (iter.hasNext()) { initialStates.add (iter.next()); }
// xxx Not yet finished.
throw new UnsupportedOperationException ();
}
/**
* Returns the index of the input state name, returns -1 if name not found.
*/
public int stateIndexOfString (String s)
{
for (int i = 0; i < this.numStates(); i++) {
String state = this.getState (i).getName();
if (state.equals (s))
return i;
}
return -1;
}
private void printStates () {
for (int i = 0; i < this.numStates(); i++)
logger.fine (i + ":" + this.getState (i).getName());
}
public void print () {
logger.fine ("Transducer "+this);
printStates();
}
// Serialization of Transducer
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
// gsc: fixed serialization, writing/reading *LatticeFactory objects
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(inputPipe);
out.writeObject(outputPipe);
out.writeObject(sumLatticeFactory);
out.writeObject(maxLatticeFactory);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
inputPipe = (Pipe) in.readObject();
outputPipe = (Pipe) in.readObject();
sumLatticeFactory = (SumLatticeFactory) in.readObject();
maxLatticeFactory = (MaxLatticeFactory) in.readObject();
}
/**
* An abstract class used to represent the states of the transducer.
*/
public abstract static class State implements Serializable
{
public abstract String getName();
public abstract int getIndex ();
public abstract double getInitialWeight ();
public abstract void setInitialWeight (double c);
public abstract double getFinalWeight ();
public abstract void setFinalWeight (double c);
public abstract Transducer getTransducer ();
// Pass negative positions for a sequence to request "epsilon
// transitions" for either input or output. (-position-1) should be
// the position in the sequence after which we are trying to insert
// the espilon transition.
public abstract TransitionIterator transitionIterator
(Sequence input, int inputPosition, Sequence output, int outputPosition);
// Pass negative input position for a sequence to request "epsilon
// transitions". (-position-1) should be the position in the
// sequence after which we are trying to insert the espilon transition.
public TransitionIterator transitionIterator (Sequence input, int inputPosition) {
return transitionIterator (input, inputPosition, null, 0);
}
// For generative transducers:
// Return all possible transitions, independent of input
public TransitionIterator transitionIterator () {
return transitionIterator (null, 0, null, 0);
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
}
}
/** Methods to be called by inference methods to indicate partial counts of sufficient statistics.
* That is, how much probability mass is falling on a transition, or in an initial state or a final state. */
public interface Incrementor {
public void incrementTransition (TransitionIterator ti, double count);
public void incrementInitialState (State s, double count);
public void incrementFinalState (State s, double count);
}
/**
* An abstract class to iterate over the states of the transducer.
*/
public abstract static class TransitionIterator implements Iterator<State>, Serializable
{
public abstract boolean hasNext ();
@Deprecated // What is this method for? I've forgotten. -akm 11/2007
public int numberNext() { return -1;}
@Deprecated
public abstract State nextState (); // returns the destination state
public State next () { return nextState(); }
public void remove () {
throw new UnsupportedOperationException (); }
/** An implementation-specific index for this transition object,
can be used to index into arrays of per-transition parameters. */
public abstract int getIndex();
/** The input symbol or object appearing on this transition. */
public abstract Object getInput ();
/** The output symbol or object appearing on this transition. */
public abstract Object getOutput ();
/** The weight (between infinity and -infinity) associated with taking this transition with this input/output. */
public abstract double getWeight ();
/** The state we were in before taking this transition. */
public abstract State getSourceState ();
/** The state we are in after taking this transition. */
public abstract State getDestinationState ();
/** The number of input positions that this transition consumes.
* This allows for transition that consume variable amounts of the sequences. */
public int getInputPositionIncrement () { return 1; }
/** The number of output positions that this transition produces.
* This allows for transition that consume variable amounts of the sequences. */
public int getOutputPositionIncrement () { return 1; }
public Transducer getTransducer () { return getSourceState().getTransducer(); }
// I hate that I need this; there's really no other way -cas
public String describeTransition (double cutoff) { return ""; }
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
@SuppressWarnings("unused")
int version = in.readInt ();
}
}
/* sumLogProb()
We need to be able to sum probabilities that are represented as
weights (which are log(probabilities)). Naively, we would just
convert them into probabilities, sum them, and then convert them
back into weights. This would be:
double sumLogProb (double a, double b) {
return Math.log (Math.exp(a) + Math.exp(b));
}
But this would fail when a or b is too negative. The machine would have the
resolution to represent the final weight, but not the resolution to
represent the intermediate exponentiated weights, and we
would get infinity as our answer.
What we want is a method for getting the sum by exponentiating a
number that is not too large. We can do this with the following.
Starting with the equation above, then:
sumProb = log (exp(a) + exp(b))
exp(sumProb) = exp(a) + exp(b)
exp(sumProb)/exp(a) = 1 + exp(b)/exp(a)
exp(sumProb-a) = 1 + exp(b-a)
sumProb - a = log(1 + exp(b-a))
sumProb = a + log(1 + exp(b-a))
We want to make sure that "b-a" is negative or a small positive
number. We can assure this by noticing that we could have
equivalently derived
sumProb = b + log (1 + exp(a-b)),
and we can simply select among the two alternative equations the
one that would have the smallest (or most negative) exponent.
*/
public static double no_longer_needed_sumNegLogProb (double a, double b)
{
if (a == Double.POSITIVE_INFINITY && b == Double.POSITIVE_INFINITY)
return Double.POSITIVE_INFINITY;
else if (a > b)
return b - Math.log (1 + Math.exp(b-a));
else
return a - Math.log (1 + Math.exp(a-b));
}
/**
* Returns <tt>Math.log(Math.exp(a) + Math.exp(b))</tt>.
* <p>
* <tt>a, b</tt> represent weights.
*/
public static double sumLogProb (double a, double b)
{
if (a == Double.NEGATIVE_INFINITY) {
if (b == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
return b;
}
else if (b == Double.NEGATIVE_INFINITY)
return a;
else if (a > b)
return a + Math.log (1 + Math.exp(b-a));
else
return b + Math.log (1 + Math.exp(a-b));
}
public static double less_efficient_sumLogProb (double a, double b)
{
if (a == Double.NEGATIVE_INFINITY && b == Double.NEGATIVE_INFINITY)
return Double.NEGATIVE_INFINITY;
else if (a > b)
return a + Math.log (1 + Math.exp(b-a));
else
return b + Math.log (1 + Math.exp(a-b));
}
}
| 14,943 | 35.627451 | 140 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/HMMTrainerByLikelihood.java
|
package cc.mallet.fst;
import java.util.logging.Logger;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletLogger;
public class HMMTrainerByLikelihood extends TransducerTrainer {
private static Logger logger = MalletLogger
.getLogger(HMMTrainerByLikelihood.class.getName());
HMM hmm;
InstanceList trainingSet, unlabeledSet;
int iterationCount = 0;
boolean converged = false;
public HMMTrainerByLikelihood(HMM hmm) {
this.hmm = hmm;
}
@Override
public Transducer getTransducer() {
return hmm;
}
@Override
public int getIteration() {
return iterationCount;
}
@Override
public boolean isFinishedTraining() {
return converged;
}
@Override
public boolean train(InstanceList trainingSet, int numIterations) {
return train(trainingSet, null, numIterations);
}
public boolean train(InstanceList trainingSet, InstanceList unlabeledSet,
int numIterations) {
if (hmm.emissionEstimator == null)
hmm.reset();
converged = false;
double threshold = 0.001;
double logLikelihood = Double.NEGATIVE_INFINITY, prevLogLikelihood;
for (int iter = 0; iter < numIterations; iter++) {
prevLogLikelihood = logLikelihood;
logLikelihood = 0;
for (Instance inst : trainingSet) {
FeatureSequence input = (FeatureSequence) inst.getData();
FeatureSequence output = (FeatureSequence) inst.getTarget();
double obsLikelihood = new SumLatticeDefault(hmm, input,
output, hmm.new Incrementor()).getTotalWeight();
logLikelihood += obsLikelihood;
}
logger.info("getValue() (observed log-likelihood) = "
+ logLikelihood);
if (unlabeledSet != null) {
int numEx = 0;
for (Instance inst : unlabeledSet) {
numEx++;
if (numEx % 100 == 0) {
System.err.print(numEx + ". ");
System.err.flush();
}
FeatureSequence input = (FeatureSequence) inst.getData();
double hiddenLikelihood = new SumLatticeDefault(hmm, input,
null, hmm.new Incrementor()).getTotalWeight();
logLikelihood += hiddenLikelihood;
}
System.err.println();
}
logger.info("getValue() (log-likelihood) = " + logLikelihood);
hmm.estimate();
iterationCount++;
logger.info("HMM finished one iteration of maximizer, i=" + iter);
runEvaluators();
if (Math.abs(logLikelihood - prevLogLikelihood) < threshold) {
converged = true;
logger.info("HMM training has converged, i=" + iter);
break;
}
}
return converged;
}
}
| 2,526 | 25.051546 | 74 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/InstanceAccuracyEvaluator.java
|
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst;
import java.util.HashMap;
import java.util.logging.Logger;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
/**
* Reports the percentage of instances for which the entire predicted sequence was
* correct.
*
* Created: May 12, 2004
*
* @author <A HREF="mailto:[email protected]>[email protected]</A>
* @version $Id: InstanceAccuracyEvaluator.java,v 1.1 2007/10/22 21:37:48 mccallum Exp $
*/
public class InstanceAccuracyEvaluator extends TransducerEvaluator {
private static final Logger logger = MalletLogger.getLogger (InstanceAccuracyEvaluator.class.getName());
private HashMap<String,Double> accuracy = new HashMap<String,Double>();
public void evaluateInstanceList (TransducerTrainer tt, InstanceList data, String description)
{
int correct = 0;
for (int i = 0; i < data.size(); i++) {
Instance instance = data.get(i);
Sequence input = (Sequence) instance.getData();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = tt.getTransducer().transduce (input);
assert (predOutput.size() == trueOutput.size());
if (sequencesMatch (trueOutput, predOutput))
correct++;
}
double acc = ((double)correct) / data.size();
accuracy.put(description, acc);
logger.info (description+" Num instances = "+data.size()+" Num correct = "+correct+" Per-instance accuracy = "+acc);
}
public double getAccuracy(String description) {
return accuracy.get(description).doubleValue();
}
private boolean sequencesMatch (Sequence trueOutput, Sequence predOutput)
{
for (int j = 0; j < trueOutput.size(); j++) {
Object tru = trueOutput.get(j);
Object pred = predOutput.get(j);
if (!tru.toString().equals (pred.toString())) {
return false;
}
}
return true;
}
}
| 2,407 | 34.411765 | 121 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MaxLatticeDefault.java
|
/* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Fernando Pereira <a href="mailto:[email protected]">[email protected]</a>
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintWriter;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
import cc.mallet.types.ArraySequence;
import cc.mallet.types.Sequence;
import cc.mallet.types.SequencePairAlignment;
import cc.mallet.fst.Transducer.State;
import cc.mallet.fst.Transducer.TransitionIterator;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.search.AStar;
import cc.mallet.util.search.AStarState;
import cc.mallet.util.search.SearchNode;
import cc.mallet.util.search.SearchState;
/** Default, full dynamic programming version of the Viterbi "Max-(Product)-Lattice" algorithm.
*
* @author Fernando Pereira
* @author Andrew McCallum
*/
public class MaxLatticeDefault implements MaxLattice
{
private static Logger logger = MalletLogger.getLogger(MaxLatticeDefault.class.getName());
//{ logger.setLevel(Level.INFO); }
private Transducer t;
private Sequence<Object> input, providedOutput;
private int latticeLength;
private ViterbiNode[][] lattice;
private WeightCache first, last;
private WeightCache[] caches;
private int numCaches, maxCaches;
public Transducer getTransducer () { return t; }
public Sequence getInput() { return input; }
public Sequence getProvidedOutput() { return providedOutput; }
private class ViterbiNode implements AStarState {
int inputPosition; // Position of input used to enter this node
State state; // Transducer state from which this node entered
Object output; // Transducer output produced on entering this node
double delta = Transducer.IMPOSSIBLE_WEIGHT;
ViterbiNode maxWeightPredecessor = null;
ViterbiNode (int inputPosition, State state) {
this.inputPosition = inputPosition;
this.state = state;
}
// The one method required by AStarState
public double completionCost () { return -delta; }
public boolean isFinal() {
return inputPosition == 0 && state.getInitialWeight() > Transducer.IMPOSSIBLE_WEIGHT;
}
private class PreviousStateIterator extends AStarState.NextStateIterator {
private int prev;
private boolean found;
private double weight;
private double[] weights;
private PreviousStateIterator() {
prev = 0;
if (inputPosition > 0) {
int j = state.getIndex();
weights = new double[t.numStates()];
WeightCache c = getCache(inputPosition-1);
for (int s = 0; s < t.numStates(); s++)
weights[s] = c.weight[s][j];
}
}
private void lookAhead() {
if (weights != null && !found) {
for (; prev < t.numStates(); prev++)
if (weights[prev] > Transducer.IMPOSSIBLE_WEIGHT) {
found = true;
return;
}
}
}
public boolean hasNext() {
lookAhead();
return weights != null && prev < t.numStates();
}
public SearchState nextState() {
lookAhead();
weight = weights[prev++];
found = false;
return getViterbiNode(inputPosition-1, prev-1);
}
// Required by SearchState, super-interface of AStarState
public double cost() {
return -weight;
}
public double weight() {
return weight;
}
}
public NextStateIterator getNextStates() {
return new PreviousStateIterator();
}
}
private class WeightCache {
private WeightCache prev, next;
private double weight[][];
private int position;
private WeightCache(int position) {
weight = new double[t.numStates()][t.numStates()];
init(position);
}
private void init(int position) {
this.position = position;
for (int i = 0; i < t.numStates(); i++)
for (int j = 0; j < t.numStates(); j++)
weight[i][j] = Transducer.IMPOSSIBLE_WEIGHT;
}
}
private WeightCache getCache(int position) {
WeightCache cache = caches[position];
if (cache == null) { // No cache for this position
// System.out.println("cache " + numCaches + "/" + maxCaches);
if (numCaches < maxCaches) { // Create another cache
cache = new WeightCache(position);
if (numCaches++ == 0)
first = last = cache;
}
else { // Steal least used cache
cache = last;
caches[cache.position] = null;
cache.init(position);
}
for (int i = 0; i < t.numStates(); i++) {
if (lattice[position][i] == null || lattice[position][i].delta == Transducer.IMPOSSIBLE_WEIGHT)
continue;
State s = t.getState(i);
TransitionIterator iter =
s.transitionIterator (input, position, providedOutput, position);
while (iter.hasNext()) {
State d = iter.next();
cache.weight[i][d.getIndex()] = iter.getWeight();
}
}
caches[position] = cache;
}
if (cache != first) { // Move to front
if (cache == last)
last = cache.prev;
if (cache.prev != null)
cache.prev.next = cache.next;
cache.next = first;
cache.prev = null;
first.prev = cache;
first = cache;
}
return cache;
}
protected ViterbiNode getViterbiNode (int ip, int stateIndex)
{
if (lattice[ip][stateIndex] == null)
lattice[ip][stateIndex] = new ViterbiNode (ip, t.getState (stateIndex));
return lattice[ip][stateIndex];
}
public MaxLatticeDefault (Transducer t, Sequence inputSequence)
{
this (t, inputSequence, null, 100000);
}
public MaxLatticeDefault (Transducer t, Sequence inputSequence, Sequence outputSequence)
{
this (t, inputSequence, outputSequence, 100000);
}
/** Initiate Viterbi decoding of the inputSequence, contrained to match non-null parts of the outputSequence.
* maxCaches indicates how much state information to memoize in n-best decoding. */
public MaxLatticeDefault (Transducer t, Sequence inputSequence, Sequence outputSequence, int maxCaches)
{
// This method initializes the forward path, but does not yet do the backward pass.
this.t = t;
if (maxCaches < 1)
maxCaches = 1;
this.maxCaches = maxCaches;
assert (inputSequence != null);
if (logger.isLoggable (Level.FINE)) {
logger.fine ("Starting ViterbiLattice");
logger.fine ("Input: ");
for (int ip = 0; ip < inputSequence.size(); ip++)
logger.fine (" " + inputSequence.get(ip));
logger.fine ("\nOutput: ");
if (outputSequence == null)
logger.fine ("null");
else
for (int op = 0; op < outputSequence.size(); op++)
logger.fine (" " + outputSequence.get(op));
logger.fine ("\n");
}
this.input = inputSequence;
this.providedOutput = outputSequence;
latticeLength = input.size()+1;
int numStates = t.numStates();
lattice = new ViterbiNode[latticeLength][numStates];
caches = new WeightCache[latticeLength-1];
// Viterbi Forward
logger.fine ("Starting Viterbi");
boolean anyInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
if (initialWeight > Transducer.IMPOSSIBLE_WEIGHT) {
ViterbiNode n = getViterbiNode (0, i);
n.delta = initialWeight;
anyInitialState = true;
}
}
if (!anyInitialState) {
logger.warning ("Viterbi: No initial states!");
}
for (int ip = 0; ip < latticeLength-1; ip++)
for (int i = 0; i < numStates; i++) {
if (lattice[ip][i] == null || lattice[ip][i].delta == Transducer.IMPOSSIBLE_WEIGHT)
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, providedOutput, ip);
if (logger.isLoggable (Level.FINE))
logger.fine (" Starting Viterbi transition iteration from state "
+ s.getName() + " on input " + input.get(ip));
while (iter.hasNext()) {
State destination = iter.next();
if (logger.isLoggable (Level.FINE))
logger.fine ("Viterbi[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
ViterbiNode destinationNode = getViterbiNode (ip+1, destination.getIndex());
destinationNode.output = iter.getOutput();
double weight = lattice[ip][i].delta + iter.getWeight();
if (ip == latticeLength-2) {
weight += destination.getFinalWeight();
}
if (weight > destinationNode.delta) {
if (logger.isLoggable (Level.FINE))
logger.fine ("Viterbi[inputPos="+ip
+"][source][dest="+destination.getName()
+"] weight increased to "+weight+" by source="+
s.getName());
destinationNode.delta = weight;
destinationNode.maxWeightPredecessor = lattice[ip][i];
}
}
}
}
public double getDelta (int ip, int stateIndex) {
if (lattice != null) {
return getViterbiNode (ip, stateIndex).delta;
}
throw new RuntimeException ("Attempt to called getDelta() when lattice not stored.");
}
private List<SequencePairAlignment<Object,ViterbiNode>> viterbiNodeAlignmentCache = null;
/**
* Perform the backward pass of Viterbi, returning the n-best sequences of
* ViterbiNodes. Each ViterbiNode contains the state, output symbol, and other
* information. Note that the length of each ViterbiNode Sequence is
* inputLength+1, because the first element of the sequence is the start
* state, and the first input/output symbols occur on the transition from a
* start-state to the next state. These first input/output symbols are stored
* in the second ViterbiNode in the sequence. The last ViterbiNode in the
* sequence corresponds to the final state and has the last input/output
* symbols.
*/
public List<SequencePairAlignment<Object,ViterbiNode>> bestViterbiNodeSequences (int n) {
if (viterbiNodeAlignmentCache != null && viterbiNodeAlignmentCache.size() >= n)
return viterbiNodeAlignmentCache;
int numFinal = 0;
for (int i = 0; i < t.numStates(); i++) {
if (lattice[latticeLength-1][i] != null && lattice[latticeLength-1][i].delta > Transducer.IMPOSSIBLE_WEIGHT)
numFinal++;
}
ViterbiNode[] finalNodes = new ViterbiNode[numFinal];
int f = 0;
for (int i = 0; i < t.numStates(); i++) {
if (lattice[latticeLength-1][i] != null && lattice[latticeLength-1][i].delta > Transducer.IMPOSSIBLE_WEIGHT)
finalNodes[f++] = lattice[latticeLength-1][i];
}
AStar search = new AStar(finalNodes, latticeLength * t.numStates());
List<SequencePairAlignment<Object,ViterbiNode>> outputs = new ArrayList<SequencePairAlignment<Object,ViterbiNode>>(n);
for (int i = 0; i < n && search.hasNext(); i++) {
// gsc: removing unnecessary cast
SearchNode ans = search.next();
double weight = -ans.getCost();
ViterbiNode[] seq = new ViterbiNode[latticeLength];
// Commented out so we get the start state ViterbiNode -akm 12/2007
//ans = ans.getParent(); // ans now corresponds to the Viterbi node after the first transition
for (int j = 0; j < latticeLength; j++) {
ViterbiNode v = (ViterbiNode)ans.getState();
assert(v.inputPosition == j); // was == j+1
seq[j] = v;
ans = ans.getParent();
}
outputs.add(new SequencePairAlignment<Object,ViterbiNode>(input, new ArraySequence<ViterbiNode>(seq), weight));
}
viterbiNodeAlignmentCache = outputs;
return outputs;
}
private List<SequencePairAlignment<Object,State>> stateAlignmentCache = null;
/**
* Perform the backward pass of Viterbi, returning the n-best sequences of
* States. Note that the length of each State Sequence is inputLength+1,
* because the first element of the sequence is the start state, and the first
* input/output symbols occur on the transition from a start state to the next
* state. The last State in the sequence corresponds to the final state.
*/
public List<SequencePairAlignment<Object,State>> bestStateAlignments (int n) {
if (stateAlignmentCache != null && stateAlignmentCache.size() >= n)
return stateAlignmentCache;
bestViterbiNodeSequences(n); // ensure that viterbiNodeAlignmentCache has at least size n
ArrayList<SequencePairAlignment<Object,State>> ret = new ArrayList<SequencePairAlignment<Object,State>>(n);
for (int i = 0; i < n; i++) {
State[] ss = new State[latticeLength];
Sequence<ViterbiNode> vs = viterbiNodeAlignmentCache.get(i).output();
for (int j = 0; j < latticeLength; j++)
ss[j] = vs.get(j).state; // Here is where we grab the state from the ViterbiNode
ret.add(new SequencePairAlignment<Object,State>(input, new ArraySequence<State>(ss), viterbiNodeAlignmentCache.get(i).getWeight()));
}
stateAlignmentCache = ret;
return ret;
}
public SequencePairAlignment<Object,State> bestStateAlignment () {
return bestStateAlignments(1).get(0);
}
public List<Sequence<State>> bestStateSequences(int n) {
List<SequencePairAlignment<Object,State>> a = bestStateAlignments(n);
ArrayList<Sequence<State>> ret = new ArrayList<Sequence<State>>(n);
for (int i = 0; i < n; i++)
ret.add (a.get(i).output());
return ret;
}
public Sequence<State> bestStateSequence() {
return bestStateAlignments(1).get(0).output();
}
private List<SequencePairAlignment<Object,Object>> outputAlignmentCache = null;
public List<SequencePairAlignment<Object,Object>> bestOutputAlignments (int n) {
if (outputAlignmentCache != null && outputAlignmentCache.size() >= n)
return outputAlignmentCache;
bestViterbiNodeSequences(n); // ensure that viterbiNodeAlignmentCache has at least size n
ArrayList<SequencePairAlignment<Object,Object>> ret = new ArrayList<SequencePairAlignment<Object,Object>>(n);
for (int i = 0; i < n; i++) {
Object[] ss = new Object[latticeLength-1];
Sequence<ViterbiNode> vs = viterbiNodeAlignmentCache.get(i).output();
for (int j = 0; j < latticeLength-1; j++)
ss[j] = vs.get(j+1).output; // Here is where we grab the output from the ViterbiNode destination
ret.add(new SequencePairAlignment<Object,Object>(input, new ArraySequence<Object>(ss), viterbiNodeAlignmentCache.get(i).getWeight()));
}
outputAlignmentCache = ret;
return ret;
}
public SequencePairAlignment<Object,Object> bestOutputAlignment () {
return bestOutputAlignments(1).get(0);
}
public List<Sequence<Object>> bestOutputSequences (int n) {
bestOutputAlignments(n); // ensure that outputAlignmentCache has at least size n
ArrayList<Sequence<Object>> ret = new ArrayList<Sequence<Object>>(n);
for (int i = 0; i < n; i++)
ret.add (outputAlignmentCache.get(i).output());
return ret;
// TODO consider caching this result
}
public Sequence<Object> bestOutputSequence () {
return bestOutputAlignments(1).get(0).output();
}
public double bestWeight() {
return bestOutputAlignments(1).get(0).getWeight();
}
/** Increment states and transitions with a count of 1.0 along the best state sequence.
* This provides for a so-called "Viterbi training" approximation. */
public void incrementTransducer (Transducer.Incrementor incrementor)
{
// We are only going to increment along the single best path ".get(0)" below.
// We could consider having a version of this method:
// incrementTransducer(Transducer.Incrementor incrementor, double[] counts)
// where the number of n-best paths to increment would be determined by counts.length
SequencePairAlignment<Object,ViterbiNode> viterbiNodeAlignment = this.bestViterbiNodeSequences(1).get(0);
int sequenceLength = viterbiNodeAlignment.output().size();
assert (sequenceLength == viterbiNodeAlignment.input().size()); // Not sure this works for unequal input/output lengths
// Increment the initial state
incrementor.incrementInitialState(viterbiNodeAlignment.output().get(0).state, 1.0);
// Increment the final state
incrementor.incrementFinalState(viterbiNodeAlignment.output().get(sequenceLength-1).state, 1.0);
for (int ip = 0; ip < viterbiNodeAlignment.input().size()-1; ip++) {
TransitionIterator iter =
viterbiNodeAlignment.output().get(ip).state.transitionIterator (input, ip, providedOutput, ip);
// xxx This assumes that a transition is completely
// identified, and made unique by its destination state and
// output. This may not be true!
int numIncrements = 0;
while (iter.hasNext()) {
if (iter.next().equals (viterbiNodeAlignment.output().get(ip+1).state)
&& iter.getOutput().equals (viterbiNodeAlignment.output().get(ip).output)) {
incrementor.incrementTransition(iter, 1.0);
numIncrements++;
}
}
if (numIncrements > 1)
throw new IllegalStateException ("More than one satisfying transition found.");
if (numIncrements == 0)
throw new IllegalStateException ("No satisfying transition found.");
}
}
public double elementwiseAccuracy (Sequence referenceOutput)
{
int accuracy = 0;
Sequence output = bestOutputSequence();
assert (referenceOutput.size() == output.size());
for (int i = 0; i < output.size(); i++) {
//logger.fine("tokenAccuracy: ref: "+referenceOutput.get(i)+" viterbi: "+output.get(i));
if (referenceOutput.get(i).toString().equals (output.get(i).toString())) {
accuracy++;
}
}
logger.info ("Number correct: " + accuracy + " out of " + output.size());
return ((double)accuracy)/output.size();
}
public double tokenAccuracy (Sequence referenceOutput, PrintWriter out)
{
Sequence output = bestOutputSequence();
int accuracy = 0;
String testString;
assert (referenceOutput.size() == output.size());
for (int i = 0; i < output.size(); i++) {
//logger.fine("tokenAccuracy: ref: "+referenceOutput.get(i)+" viterbi: "+output.get(i));
testString = output.get(i).toString();
if (out != null) {
out.println(testString);
}
if (referenceOutput.get(i).toString().equals (testString)) {
accuracy++;
}
}
logger.info ("Number correct: " + accuracy + " out of " + output.size());
return ((double)accuracy)/output.size();
}
public static class Factory extends MaxLatticeFactory implements Serializable
{
public MaxLattice newMaxLattice (Transducer trans, Sequence inputSequence, Sequence outputSequence)
{
return new MaxLatticeDefault (trans, inputSequence, outputSequence);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt();
}
}
}
| 18,893 | 36.047059 | 137 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLatticeScaling.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.logging.Level;
import java.util.logging.Logger;
import cc.mallet.fst.Transducer.State;
import cc.mallet.fst.Transducer.TransitionIterator;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelVector;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
public class SumLatticeScaling implements SumLattice {
private static Logger logger = MalletLogger
.getLogger(SumLatticeScaling.class.getName());
protected static boolean saveXis = false;
// "ip" == "input position", "op" == "output position", "i" == "state index"
@SuppressWarnings("unchecked")
Sequence input, output;
Transducer t;
double totalWeight;
LatticeNode[][] nodes; // indexed by ip,i
double[] alphaLogScaling, betaLogScaling;
double zLogScaling;
int latticeLength;
double[][] gammas; // indexed by ip,i
double[][][] xis; // indexed by ip,i,j; saved only if saveXis is true;
// Ensure that instances cannot easily be created by a zero arg constructor.
protected SumLatticeScaling() {
}
protected LatticeNode getLatticeNode(int ip, int stateIndex) {
if (nodes[ip][stateIndex] == null)
nodes[ip][stateIndex] = new LatticeNode(ip, t.getState(stateIndex));
return nodes[ip][stateIndex];
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input) {
this(trans, input, null, (Transducer.Incrementor) null, saveXis, null);
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, boolean saveXis) {
this(trans, input, null, (Transducer.Incrementor) null, saveXis, null);
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input,
Transducer.Incrementor incrementor) {
this(trans, input, null, incrementor, saveXis, null);
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, Sequence output) {
this(trans, input, output, (Transducer.Incrementor) null, saveXis, null);
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor) {
this(trans, input, output, incrementor, saveXis, null);
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, LabelAlphabet outputAlphabet) {
this(trans, input, output, incrementor, saveXis, outputAlphabet);
}
// You may pass null for output, meaning that the lattice
// is not constrained to match the output
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, boolean saveXis) {
this(trans, input, output, incrementor, saveXis, null);
}
@SuppressWarnings("unchecked")
public SumLatticeScaling(Transducer trans, Sequence input, Sequence output,
Transducer.Incrementor incrementor, boolean saveXis,
LabelAlphabet outputAlphabet) {
assert (output == null || input.size() == output.size());
// Initialize some structures
this.t = trans;
this.input = input;
this.output = output;
latticeLength = input.size() + 1;
int numStates = t.numStates();
nodes = new LatticeNode[latticeLength][numStates];
alphaLogScaling = new double[latticeLength];
betaLogScaling = new double[latticeLength];
gammas = new double[latticeLength][numStates];
if (saveXis)
xis = new double[latticeLength][numStates][numStates];
double outputCounts[][] = null;
if (outputAlphabet != null)
outputCounts = new double[latticeLength][outputAlphabet.size()];
for (int ip = 0; ip < latticeLength; ip++) {
alphaLogScaling[ip] = 0.0;
betaLogScaling[ip] = 0.0;
for (int i = 0; i < numStates; i++) {
gammas[ip][i] = Transducer.IMPOSSIBLE_WEIGHT;
if (saveXis)
for (int j = 0; j < numStates; j++)
xis[ip][i][j] = Transducer.IMPOSSIBLE_WEIGHT;
}
}
// Forward pass
logger.fine("Starting Foward pass");
boolean atLeastOneInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
if (initialWeight > Transducer.IMPOSSIBLE_WEIGHT) {
getLatticeNode(0, i).alpha = Math.exp(initialWeight);
atLeastOneInitialState = true;
}
}
rescaleAlphas(0);
if (atLeastOneInitialState == false)
logger.warning("There are no starting states!");
for (int ip = 0; ip < latticeLength - 1; ip++) {
for (int i = 0; i < numStates; i++) {
if (isInvalidNode(ip, i))
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator(input, ip,
output, ip);
while (iter.hasNext()) {
State destination = iter.next();
LatticeNode destinationNode = getLatticeNode(ip + 1,
destination.getIndex());
if (Double.isNaN(destinationNode.alpha))
destinationNode.alpha = 0;
destinationNode.output = iter.getOutput();
double transitionWeight = iter.getWeight();
destinationNode.alpha += nodes[ip][i].alpha
* Math.exp(transitionWeight);
}
}
// re-scale alphas to so that \sum_i \alpha[ip][i] = 1
rescaleAlphas(ip + 1);
}
// Calculate total weight of Lattice. This is the normalizer
double Z = Double.NaN;
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength - 1][i] != null) {
if (Double.isNaN(Z))
Z = 0;
Z += nodes[latticeLength - 1][i].alpha
* Math.exp(t.getState(i).getFinalWeight());
}
zLogScaling = alphaLogScaling[latticeLength - 1];
if (Double.isNaN(Z)) {
totalWeight = Transducer.IMPOSSIBLE_WEIGHT;
return;
} else
totalWeight = Math.log(Z) + zLogScaling;
// Backward pass
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength - 1][i] != null) {
State s = t.getState(i);
nodes[latticeLength - 1][i].beta = Math.exp(s.getFinalWeight());
double gamma = nodes[latticeLength - 1][i].alpha
* nodes[latticeLength - 1][i].beta / Z;
gammas[latticeLength - 1][i] = Math.log(gamma);
if (incrementor != null) {
double p = gamma;
assert (p >= 0.0 && p <= 1.0 + 1e-6) : "p=" + p
+ ", gamma=" + gammas[latticeLength - 1][i];
incrementor.incrementFinalState(s, p);
}
}
rescaleBetas(latticeLength - 1);
for (int ip = latticeLength - 2; ip >= 0; ip--) {
for (int i = 0; i < numStates; i++) {
if (isInvalidNode(ip, i))
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator(input, ip,
output, ip);
double logScaling = alphaLogScaling[ip]
+ betaLogScaling[ip + 1] - zLogScaling;
double pscaling = Math.exp(logScaling);
while (iter.hasNext()) {
State destination = iter.next();
int j = destination.getIndex();
LatticeNode destinationNode = nodes[ip + 1][j];
if (destinationNode != null) {
double transitionWeight = iter.getWeight();
if (Double.isNaN(nodes[ip][i].beta))
nodes[ip][i].beta = 0;
double transitionProb = Math.exp(transitionWeight);
nodes[ip][i].beta += destinationNode.beta
* transitionProb;
double xi = nodes[ip][i].alpha * transitionProb
* nodes[ip + 1][j].beta / Z;
if (saveXis)
xis[ip][i][j] = Math.log(xi) + logScaling;
if (incrementor != null || outputAlphabet != null) {
double p = xi * pscaling;
assert (p >= 0.0 && p <= 1.0 + 1e-6) : "p=" + p
+ ", xis[" + ip + "][" + i + "][" + j
+ "]=" + xi;
if (incrementor != null)
incrementor.incrementTransition(iter, p);
if (outputAlphabet != null) {
int outputIndex = outputAlphabet.lookupIndex(
iter.getOutput(), false);
assert (outputIndex >= 0);
outputCounts[ip][outputIndex] += p;
}
}
}
}
gammas[ip][i] = Math.log(nodes[ip][i].alpha * nodes[ip][i].beta
/ Z)
+ logScaling;
}
// re-scale betas so that they are normalized
rescaleBetas(ip);
}
if (incrementor != null)
for (int i = 0; i < numStates; i++) {
double p = Math.exp(gammas[0][i]);
assert (p >= 0.0 && p <= 1.0 + 1e-6) : "p=" + p;
incrementor.incrementInitialState(t.getState(i), p);
}
}
private boolean isInvalidNode(int ip, int i) {
return nodes[ip][i] == null || Double.isNaN(nodes[ip][i].alpha);
}
private void rescaleAlphas(int ip) {
double sumAlpha = 0;
for (int i = 0; i < t.numStates(); i++) {
if (!isInvalidNode(ip, i))
sumAlpha += nodes[ip][i].alpha;
}
assert sumAlpha > 0 : "Invalid sum over alphas for ip=" + ip;
alphaLogScaling[ip] = Math.log(sumAlpha)
+ (ip == 0 ? 0 : alphaLogScaling[ip - 1]);
for (int i = 0; i < t.numStates(); i++) {
if (!isInvalidNode(ip, i))
nodes[ip][i].alpha /= sumAlpha;
}
}
private void rescaleBetas(int ip) {
double sumBeta = 0;
for (int i = 0; i < t.numStates(); i++) {
if (!isInvalidNode(ip, i))
sumBeta += nodes[ip][i].beta;
}
assert sumBeta > 0 : "Invalid sum over betas for ip=" + ip;
betaLogScaling[ip] = Math.log(sumBeta)
+ (ip == latticeLength - 1 ? 0 : betaLogScaling[ip + 1]);
for (int i = 0; i < t.numStates(); i++) {
if (!isInvalidNode(ip, i))
nodes[ip][i].beta /= sumBeta;
}
}
public double[][][] getXis() {
return xis;
}
public double[][] getGammas() {
return gammas;
}
public double getTotalWeight() {
return totalWeight;
}
public double getGammaWeight(int inputPosition, State s) {
return gammas[inputPosition][s.getIndex()];
}
public double getGammaWeight(int inputPosition, int stateIndex) {
return gammas[inputPosition][stateIndex];
}
public double getGammaProbability(int inputPosition, State s) {
return Math.exp(gammas[inputPosition][s.getIndex()]);
}
public double getGammaProbability(int inputPosition, int stateIndex) {
return getGammaProbability(inputPosition, t.getState(stateIndex));
}
public double getXiProbability(int ip, State s1, State s2) {
return Math.exp(getXiWeight(ip, s1, s2));
}
public double getXiWeight(int ip, State s1, State s2) {
if (xis == null)
throw new IllegalStateException("xis were not saved.");
int i = s1.getIndex();
int j = s2.getIndex();
return xis[ip][i][j];
}
public int length() {
return latticeLength;
}
public double getAlpha(int ip, State s) {
LatticeNode node = getLatticeNode(ip, s.getIndex());
return node.alpha * Math.exp(alphaLogScaling[ip]);
}
public double getBeta(int ip, State s) {
LatticeNode node = getLatticeNode(ip, s.getIndex());
return node.beta * Math.exp(betaLogScaling[ip]);
}
public LabelVector getLabelingAtPosition(int outputPosition) {
throw new RuntimeException("Not implemented for SumLatticeScaling!");
}
public Transducer getTransducer() {
return t;
}
protected class LatticeNode {
int inputPosition;
State state;
Object output;
double alpha = Double.NaN;
double beta = Double.NaN;
LatticeNode(int inputPosition, State state) {
this.inputPosition = inputPosition;
this.state = state;
}
}
public static class Factory extends SumLatticeFactory implements
Serializable {
@SuppressWarnings("unchecked")
public SumLattice newSumLattice(Transducer trans, Sequence input,
Sequence output, Transducer.Incrementor incrementor,
boolean saveXis, LabelAlphabet outputAlphabet) {
return new SumLatticeScaling(trans, input, output, incrementor,
saveXis, outputAlphabet);
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
@SuppressWarnings("unused")
int version = in.readInt();
}
}
}
| 12,029 | 30.492147 | 78 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFTrainerByL1LabelLikelihood.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import cc.mallet.optimize.Optimizer;
import cc.mallet.optimize.OrthantWiseLimitedMemoryBFGS;
import cc.mallet.types.InstanceList;
/**
* CRF trainer that implements L1-regularization.
*
* @author Kedar Bellare
*/
public class CRFTrainerByL1LabelLikelihood extends CRFTrainerByLabelLikelihood {
static final double SPARSE_PRIOR = 0.0;
double l1Weight = SPARSE_PRIOR;
public CRFTrainerByL1LabelLikelihood(CRF crf) {
this(crf, SPARSE_PRIOR);
}
/**
* Constructor for CRF trainer.
*
* @param crf
* CRF to train.
* @param l1Weight
* Weight of L1 term in objective (l1Weight*|w|). Higher L1
* weight means sparser solutions.
*/
public CRFTrainerByL1LabelLikelihood(CRF crf, double l1Weight) {
super(crf);
this.l1Weight = l1Weight;
}
public void setL1RegularizationWeight(double l1Weight) {
this.l1Weight = l1Weight;
}
public Optimizer getOptimizer(InstanceList trainingSet) {
getOptimizableCRF(trainingSet);
if (opt == null || ocrf != opt.getOptimizable())
opt = new OrthantWiseLimitedMemoryBFGS(ocrf, l1Weight);
return opt;
}
// Serialization
private static final long serialVersionUID = 1L;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeDouble(l1Weight);
}
private void readObject(ObjectInputStream in) throws IOException {
in.readInt(); // version
l1Weight = in.readDouble();
}
}
| 1,629 | 24.076923 | 80 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFCacheStaleIndicator.java
|
package cc.mallet.fst;
/**
* Indicates when the value/gradient becomes stale based on updates to CRF's
* parameters.
*
* @author Gaurav Chandalia
*/
public class CRFCacheStaleIndicator implements CacheStaleIndicator {
protected CRF crf;
protected int cachedValueChangeStamp = -1;
protected int cachedGradientChangeStamp = -1;
public CRFCacheStaleIndicator(CRF crf) {
this.crf = crf;
cachedValueChangeStamp = -1;
cachedGradientChangeStamp = -1;
}
/**
* Returns true if the value is stale, also updates the cacheValueStamp.
*/
public boolean isValueStale() {
if (crf.weightsValueChangeStamp != cachedValueChangeStamp) {
cachedValueChangeStamp = crf.weightsValueChangeStamp;
return true;
}
return false;
}
/**
* Returns true if the gradient is stale, also updates the cacheGradientStamp.
*/
public boolean isGradientStale() {
if (crf.weightsValueChangeStamp != cachedGradientChangeStamp) {
cachedGradientChangeStamp = crf.weightsValueChangeStamp;
return true;
}
return false;
}
}
| 1,041 | 22.681818 | 79 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SimpleTaggerStdin.java
|
/* Copyright (C) 2003 University of Pennsylvania.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Reader;
import java.io.StringReader;
import java.util.Scanner;
import java.util.ArrayList;
import java.util.Random;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.types.Alphabet;
import cc.mallet.types.AugmentableFeatureVector;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelSequence;
import cc.mallet.types.Sequence;
import cc.mallet.fst.confidence.ConstrainedForwardBackwardConfidenceEstimator;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.util.CommandOption;
import cc.mallet.util.MalletLogger;
/**
* This class's main method trains, tests, or runs a generic CRF-based
* sequence tagger.
* <p>
* Training and test files consist of blocks of lines, one block for each instance,
* separated by blank lines. Each block of lines should have the first form
* specified for the input of {@link SimpleTaggerSentence2FeatureVectorSequence}.
* A variety of command line options control the operation of the main program, as
* described in the comments for {@link #main main}.
*
* @author Fernando Pereira <a href="mailto:[email protected]">[email protected]</a>
* @version 1.0
*/
public class SimpleTaggerStdin
{
private static Logger logger =
MalletLogger.getLogger(SimpleTaggerStdin.class.getName());
/**
* No <code>SimpleTaggerStdin</code> objects allowed.
*/
private SimpleTaggerStdin()
{
}
/**
* Converts an external encoding of a sequence of elements with binary
* features to a {@link FeatureVectorSequence}. If target processing
* is on (training or labeled test data), it extracts element labels
* from the external encoding to create a target {@link LabelSequence}.
* Two external encodings are supported:
* <ol>
* <li> A {@link String} containing lines of whitespace-separated tokens.</li>
* <li> a {@link String}<code>[][]</code>.</li>
* </ol>
*
* Both represent rows of tokens. When target processing is on, the last token
* in each row is the label of the sequence element represented by
* this row. All other tokens in the row, or all tokens in the row if
* not target processing, are the names of features that are on for
* the sequence element described by the row.
*
*/
public static class SimpleTaggerSentence2FeatureVectorSequence extends Pipe
{
// gdruck
// Previously, there was no serialVersionUID. This is ID that would
// have been automatically generated by the compiler. Therefore,
// other changes should not break serialization.
private static final long serialVersionUID = -2059308802200728625L;
/**
* Creates a new
* <code>SimpleTaggerSentence2FeatureVectorSequence</code> instance.
*/
public SimpleTaggerSentence2FeatureVectorSequence ()
{
super (new Alphabet(), new LabelAlphabet());
}
/**
* Parses a string representing a sequence of rows of tokens into an
* array of arrays of tokens.
*
* @param sentence a <code>String</code>
* @return the corresponding array of arrays of tokens.
*/
private String[][] parseSentence(String sentence)
{
String[] lines = sentence.split("\n");
String[][] tokens = new String[lines.length][];
for (int i = 0; i < lines.length; i++)
tokens[i] = lines[i].split(" ");
return tokens;
}
public Instance pipe (Instance carrier)
{
Object inputData = carrier.getData();
Alphabet features = getDataAlphabet();
LabelAlphabet labels;
LabelSequence target = null;
String [][] tokens;
if (inputData instanceof String)
tokens = parseSentence((String)inputData);
else if (inputData instanceof String[][])
tokens = (String[][])inputData;
else
throw new IllegalArgumentException("Not a String or String[][]; got "+inputData);
FeatureVector[] fvs = new FeatureVector[tokens.length];
if (isTargetProcessing())
{
labels = (LabelAlphabet)getTargetAlphabet();
target = new LabelSequence (labels, tokens.length);
}
for (int l = 0; l < tokens.length; l++) {
int nFeatures;
if (isTargetProcessing())
{
if (tokens[l].length < 1)
throw new IllegalStateException ("Missing label at line " + l + " instance "+carrier.getName ());
nFeatures = tokens[l].length - 1;
target.add(tokens[l][nFeatures]);
}
else nFeatures = tokens[l].length;
ArrayList<Integer> featureIndices = new ArrayList<Integer>();
for (int f = 0; f < nFeatures; f++) {
int featureIndex = features.lookupIndex(tokens[l][f]);
// gdruck
// If the data alphabet's growth is stopped, featureIndex
// will be -1. Ignore these features.
if (featureIndex >= 0) {
featureIndices.add(featureIndex);
}
}
int[] featureIndicesArr = new int[featureIndices.size()];
for (int index = 0; index < featureIndices.size(); index++) {
featureIndicesArr[index] = featureIndices.get(index);
}
fvs[l] = featureInductionOption.value ? new AugmentableFeatureVector(features, featureIndicesArr, null, featureIndicesArr.length) :
new FeatureVector(features, featureIndicesArr);
}
carrier.setData(new FeatureVectorSequence(fvs));
if (isTargetProcessing())
carrier.setTarget(target);
else
carrier.setTarget(new LabelSequence(getTargetAlphabet()));
return carrier;
}
}
private static final CommandOption.Double gaussianVarianceOption = new CommandOption.Double
(SimpleTaggerStdin.class, "gaussian-variance", "DECIMAL", true, 10.0,
"The gaussian prior variance used for training.", null);
private static final CommandOption.Boolean trainOption = new CommandOption.Boolean
(SimpleTaggerStdin.class, "train", "true|false", true, false,
"Whether to train", null);
private static final CommandOption.String testOption = new CommandOption.String
(SimpleTaggerStdin.class, "test", "lab or seg=start-1.continue-1,...,start-n.continue-n",
true, null,
"Test measuring labeling or segmentation (start-i, continue-i) accuracy", null);
private static final CommandOption.String stdinOption = new CommandOption.String
(SimpleTaggerStdin.class, "stdin", "true",
true, null,
"Read data from stdin (seperated by double newlines)", null);
private static final CommandOption.File modelOption = new CommandOption.File
(SimpleTaggerStdin.class, "model-file", "FILENAME", true, null,
"The filename for reading (train/run) or saving (train) the model.", null);
private static final CommandOption.Double trainingFractionOption = new CommandOption.Double
(SimpleTaggerStdin.class, "training-proportion", "DECIMAL", true, 0.5,
"Fraction of data to use for training in a random split.", null);
private static final CommandOption.Integer randomSeedOption = new CommandOption.Integer
(SimpleTaggerStdin.class, "random-seed", "INTEGER", true, 0,
"The random seed for randomly selecting a proportion of the instance list for training", null);
private static final CommandOption.IntegerArray ordersOption = new CommandOption.IntegerArray
(SimpleTaggerStdin.class, "orders", "COMMA-SEP-DECIMALS", true, new int[]{1},
"List of label Markov orders (main and backoff) ", null);
private static final CommandOption.String forbiddenOption = new CommandOption.String(
SimpleTaggerStdin.class, "forbidden", "REGEXP", true,
"\\s", "label1,label2 transition forbidden if it matches this", null);
private static final CommandOption.String allowedOption = new CommandOption.String(
SimpleTaggerStdin.class, "allowed", "REGEXP", true,
".*", "label1,label2 transition allowed only if it matches this", null);
private static final CommandOption.String defaultOption = new CommandOption.String(
SimpleTaggerStdin.class, "default-label", "STRING", true, "O",
"Label for initial context and uninteresting tokens", null);
private static final CommandOption.Integer iterationsOption = new CommandOption.Integer(
SimpleTaggerStdin.class, "iterations", "INTEGER", true, 500,
"Number of training iterations", null);
private static final CommandOption.Boolean viterbiOutputOption = new CommandOption.Boolean(
SimpleTaggerStdin.class, "viterbi-output", "true|false", true, false,
"Print Viterbi periodically during training", null);
private static final CommandOption.Boolean connectedOption = new CommandOption.Boolean(
SimpleTaggerStdin.class, "fully-connected", "true|false", true, true,
"Include all allowed transitions, even those not in training data", null);
private static final CommandOption.String weightsOption = new CommandOption.String(
SimpleTaggerStdin.class, "weights", "sparse|some-dense|dense", true, "some-dense",
"Use sparse, some dense (using a heuristic), or dense features on transitions.", null);
private static final CommandOption.Boolean continueTrainingOption = new CommandOption.Boolean(
SimpleTaggerStdin.class, "continue-training", "true|false", false, false,
"Continue training from model specified by --model-file", null);
private static final CommandOption.Integer nBestOption = new CommandOption.Integer(
SimpleTaggerStdin.class, "n-best", "INTEGER", true, 1,
"How many answers to output", null);
private static final CommandOption.Integer cacheSizeOption = new CommandOption.Integer(
SimpleTaggerStdin.class, "cache-size", "INTEGER", true, 100000,
"How much state information to memoize in n-best decoding", null);
private static final CommandOption.Boolean includeInputOption = new CommandOption.Boolean(
SimpleTaggerStdin.class, "include-input", "true|false", true, false,
"Whether to include the input features when printing decoding output", null);
private static final CommandOption.Boolean featureInductionOption = new CommandOption.Boolean(
SimpleTaggerStdin.class, "feature-induction", "true|false", true, false,
"Whether to perform feature induction during training", null);
private static final CommandOption.Integer numThreads = new CommandOption.Integer(
SimpleTaggerStdin.class, "threads", "INTEGER", true, 1,
"Number of threads to use for CRF training.", null);
private static final CommandOption.List commandOptions =
new CommandOption.List (
"Training, testing and running a generic tagger.",
new CommandOption[] {
gaussianVarianceOption,
trainOption,
iterationsOption,
testOption,
stdinOption,
trainingFractionOption,
modelOption,
randomSeedOption,
ordersOption,
forbiddenOption,
allowedOption,
defaultOption,
viterbiOutputOption,
connectedOption,
weightsOption,
continueTrainingOption,
nBestOption,
cacheSizeOption,
includeInputOption,
featureInductionOption,
numThreads
});
/**
* Create and train a CRF model from the given training data,
* optionally testing it on the given test data.
*
* @param training training data
* @param testing test data (possibly <code>null</code>)
* @param eval accuracy evaluator (possibly <code>null</code>)
* @param orders label Markov orders (main and backoff)
* @param defaultLabel default label
* @param forbidden regular expression specifying impossible label
* transitions <em>current</em><code>,</code><em>next</em>
* (<code>null</code> indicates no forbidden transitions)
* @param allowed regular expression specifying allowed label transitions
* (<code>null</code> indicates everything is allowed that is not forbidden)
* @param connected whether to include even transitions not
* occurring in the training data.
* @param iterations number of training iterations
* @param var Gaussian prior variance
* @return the trained model
*/
public static CRF train(InstanceList training, InstanceList testing,
TransducerEvaluator eval, int[] orders,
String defaultLabel,
String forbidden, String allowed,
boolean connected, int iterations, double var, CRF crf)
{
Pattern forbiddenPat = Pattern.compile(forbidden);
Pattern allowedPat = Pattern.compile(allowed);
if (crf == null) {
crf = new CRF(training.getPipe(), (Pipe)null);
String startName =
crf.addOrderNStates(training, orders, null,
defaultLabel, forbiddenPat, allowedPat,
connected);
for (int i = 0; i < crf.numStates(); i++)
crf.getState(i).setInitialWeight (Transducer.IMPOSSIBLE_WEIGHT);
crf.getState(startName).setInitialWeight(0.0);
}
logger.info("Training on " + training.size() + " instances");
if (testing != null)
logger.info("Testing on " + testing.size() + " instances");
assert(numThreads.value > 0);
if (numThreads.value > 1) {
CRFTrainerByThreadedLabelLikelihood crft = new CRFTrainerByThreadedLabelLikelihood (crf,numThreads.value);
crft.setGaussianPriorVariance(var);
if (weightsOption.value.equals("dense")) {
crft.setUseSparseWeights(false);
crft.setUseSomeUnsupportedTrick(false);
}
else if (weightsOption.value.equals("some-dense")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(true);
}
else if (weightsOption.value.equals("sparse")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(false);
}
else {
throw new RuntimeException("Unknown weights option: " + weightsOption.value);
}
if (featureInductionOption.value) {
throw new IllegalArgumentException("Multi-threaded feature induction is not yet supported.");
} else {
boolean converged;
for (int i = 1; i <= iterations; i++) {
converged = crft.train (training, 1);
if (i % 1 == 0 && eval != null) // Change the 1 to higher integer to evaluate less often
eval.evaluate(crft);
if (viterbiOutputOption.value && i % 10 == 0)
new ViterbiWriter("", new InstanceList[] {training, testing}, new String[] {"training", "testing"}).evaluate(crft);
if (converged)
break;
}
}
crft.shutdown();
}
else {
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf);
crft.setGaussianPriorVariance(var);
if (weightsOption.value.equals("dense")) {
crft.setUseSparseWeights(false);
crft.setUseSomeUnsupportedTrick(false);
}
else if (weightsOption.value.equals("some-dense")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(true);
}
else if (weightsOption.value.equals("sparse")) {
crft.setUseSparseWeights(true);
crft.setUseSomeUnsupportedTrick(false);
}
else {
throw new RuntimeException("Unknown weights option: " + weightsOption.value);
}
if (featureInductionOption.value) {
crft.trainWithFeatureInduction(training, null, testing, eval, iterations, 10, 20, 500, 0.5, false, null);
} else {
boolean converged;
for (int i = 1; i <= iterations; i++) {
converged = crft.train (training, 1);
if (i % 1 == 0 && eval != null) // Change the 1 to higher integer to evaluate less often
eval.evaluate(crft);
if (viterbiOutputOption.value && i % 10 == 0)
new ViterbiWriter("", new InstanceList[] {training, testing}, new String[] {"training", "testing"}).evaluate(crft);
if (converged)
break;
}
}
}
return crf;
}
/**
* Test a transducer on the given test data, evaluating accuracy
* with the given evaluator
*
* @param model a <code>Transducer</code>
* @param eval accuracy evaluator
* @param testing test data
*/
public static void test(TransducerTrainer tt, TransducerEvaluator eval,
InstanceList testing)
{
eval.evaluateInstanceList(tt, testing, "Testing");
}
/**
* Apply a transducer to an input sequence to produce the k highest-scoring
* output sequences.
*
* @param model the <code>Transducer</code>
* @param input the input sequence
* @param k the number of answers to return
* @return array of the k highest-scoring output sequences
*/
public static Sequence[] apply(Transducer model, Sequence input, int k)
{
Sequence[] answers;
if (k == 1) {
answers = new Sequence[1];
answers[0] = model.transduce (input);
}
else {
MaxLatticeDefault lattice =
new MaxLatticeDefault (model, input, null, cacheSizeOption.value());
answers = lattice.bestOutputSequences(k).toArray(new Sequence[0]);
}
return answers;
}
/**
* Command-line wrapper to train, test, or run a generic CRF-based tagger.
*
* @param args the command line arguments. Options (shell and Java quoting should be added as needed):
*<dl>
*<dt><code>--help</code> <em>boolean</em></dt>
*<dd>Print this command line option usage information. Give <code>true</code> for longer documentation. Default is <code>false</code>.</dd>
*<dt><code>--prefix-code</code> <em>Java-code</em></dt>
*<dd>Java code you want run before any other interpreted code. Note that the text is interpreted without modification, so unlike some other Java code options, you need to include any necessary 'new's. Default is null.</dd>
*<dt><code>--gaussian-variance</code> <em>positive-number</em></dt>
*<dd>The Gaussian prior variance used for training. Default is 10.0.</dd>
*<dt><code>--train</code> <em>boolean</em></dt>
*<dd>Whether to train. Default is <code>false</code>.</dd>
*<dt><code>--iterations</code> <em>positive-integer</em></dt>
*<dd>Number of training iterations. Default is 500.</dd>
*<dt><code>--test</code> <code>lab</code> or <code>seg=</code><em>start-1</em><code>.</code><em>continue-1</em><code>,</code>...<code>,</code><em>start-n</em><code>.</code><em>continue-n</em></dt>
*<dd>Test measuring labeling or segmentation (<em>start-i</em>, <em>continue-i</em>) accuracy. Default is no testing.</dd>
*<dt><code>--training-proportion</code> <em>number-between-0-and-1</em></dt>
*<dd>Fraction of data to use for training in a random split. Default is 0.5.</dd>
*<dt><code>--model-file</code> <em>filename</em></dt>
*<dd>The filename for reading (train/run) or saving (train) the model. Default is null.</dd>
*<dt><code>--random-seed</code> <em>integer</em></dt>
*<dd>The random seed for randomly selecting a proportion of the instance list for training Default is 0.</dd>
*<dt><code>--orders</code> <em>comma-separated-integers</em></dt>
*<dd>List of label Markov orders (main and backoff) Default is 1.</dd>
*<dt><code>--forbidden</code> <em>regular-expression</em></dt>
*<dd>If <em>label-1</em><code>,</code><em>label-2</em> matches the expression, the corresponding transition is forbidden. Default is <code>\\s</code> (nothing forbidden).</dd>
*<dt><code>--allowed</code> <em>regular-expression</em></dt>
*<dd>If <em>label-1</em><code>,</code><em>label-2</em> does not match the expression, the corresponding expression is forbidden. Default is <code>.*</code> (everything allowed).</dd>
*<dt><code>--default-label</code> <em>string</em></dt>
*<dd>Label for initial context and uninteresting tokens. Default is <code>O</code>.</dd>
*<dt><code>--viterbi-output</code> <em>boolean</em></dt>
*<dd>Print Viterbi periodically during training. Default is <code>false</code>.</dd>
*<dt><code>--fully-connected</code> <em>boolean</em></dt>
*<dd>Include all allowed transitions, even those not in training data. Default is <code>true</code>.</dd>
*<dt><code>--weights</code> <em>sparse|some-dense|dense</em></dt>
*<dd>Create sparse, some dense (using a heuristic), or dense features on transitions. Default is <code>some-dense</code>.</dd>
*<dt><code>--n-best</code> <em>positive-integer</em></dt>
*<dd>Number of answers to output when applying model. Default is 1.</dd>
*<dt><code>--include-input</code> <em>boolean</em></dt>
*<dd>Whether to include input features when printing decoding output. Default is <code>false</code>.</dd>
*<dt><code>--threads</code> <em>positive-integer</em></dt>
*<dd>Number of threads for CRF training. Default is 1.</dd>
*</dl>
* Remaining arguments:
*<ul>
*<li><em>training-data-file</em> if training </li>
*<li><em>training-and-test-data-file</em>, if training and testing with random split</li>
*<li><em>training-data-file</em> <em>test-data-file</em> if training and testing from separate files</li>
*<li><em>test-data-file</em> if testing</li>
*<li><em>input-data-file</em> if applying to new data (unlabeled)</li>
*</ul>
* @exception Exception if an error occurs
*/
public static void main (String[] args) throws Exception
{
Reader trainingFile = null, testFile = null;
InstanceList trainingData = null, testData = null;
int numEvaluations = 0;
int iterationsBetweenEvals = 16;
int restArgs = commandOptions.processOptions(args);
Pipe p = null;
CRF crf = null;
TransducerEvaluator eval = null;
if (continueTrainingOption.value || !trainOption.value) {
if (modelOption.value == null)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Missing model file option");
}
ObjectInputStream s =
new ObjectInputStream(new FileInputStream(modelOption.value));
crf = (CRF) s.readObject();
s.close();
p = crf.getInputPipe();
}
else {
p = new SimpleTaggerSentence2FeatureVectorSequence();
p.getTargetAlphabet().lookupIndex(defaultOption.value);
}
if (trainOption.value)
{
p.setTargetProcessing(true);
trainingData = new InstanceList(p);
trainingData.addThruPipe(
new LineGroupIterator(trainingFile,
Pattern.compile("^\\s*$"), true));
logger.info
("Number of features in training data: "+p.getDataAlphabet().size());
if (testOption.value != null)
{
if (testFile != null)
{
testData = new InstanceList(p);
testData.addThruPipe(
new LineGroupIterator(testFile,
Pattern.compile("^\\s*$"), true));
}
else
{
Random r = new Random (randomSeedOption.value);
InstanceList[] trainingLists =
trainingData.split(
r, new double[] {trainingFractionOption.value,
1-trainingFractionOption.value});
trainingData = trainingLists[0];
testData = trainingLists[1];
}
}
} else if (testOption.value != null)
{
p.setTargetProcessing(true);
testData = new InstanceList(p);
testData.addThruPipe(
new LineGroupIterator(testFile,
Pattern.compile("^\\s*$"), true));
} else
{
p.setTargetProcessing(false);
testData = new InstanceList(p);
//testData.addThruPipe(
// new LineGroupIterator(testFile,
// Pattern.compile("^\\s*$"), true));
}
//logger.info ("Number of predicates: "+p.getDataAlphabet().size());
if (testOption.value != null)
{
if (testOption.value.startsWith("lab"))
eval = new TokenAccuracyEvaluator(new InstanceList[] {trainingData, testData}, new String[] {"Training", "Testing"});
else if (testOption.value.startsWith("seg="))
{
String[] pairs = testOption.value.substring(4).split(",");
if (pairs.length < 1)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException(
"Missing segment start/continue labels: " + testOption.value);
}
String startTags[] = new String[pairs.length];
String continueTags[] = new String[pairs.length];
for (int i = 0; i < pairs.length; i++)
{
String[] pair = pairs[i].split("\\.");
if (pair.length != 2)
{
commandOptions.printUsage(true);
throw new
IllegalArgumentException(
"Incorrectly-specified segment start and end labels: " +
pairs[i]);
}
startTags[i] = pair[0];
continueTags[i] = pair[1];
}
eval = new MultiSegmentationEvaluator(new InstanceList[] {trainingData, testData}, new String[] {"Training", "Testing"},
startTags, continueTags);
}
else
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Invalid test option: " +
testOption.value);
}
}
if (p.isTargetProcessing())
{
Alphabet targets = p.getTargetAlphabet();
StringBuffer buf = new StringBuffer("Labels:");
for (int i = 0; i < targets.size(); i++)
buf.append(" ").append(targets.lookupObject(i).toString());
logger.info(buf.toString());
}
if (trainOption.value)
{
crf = train(trainingData, testData, eval,
ordersOption.value, defaultOption.value,
forbiddenOption.value, allowedOption.value,
connectedOption.value, iterationsOption.value,
gaussianVarianceOption.value, crf);
if (modelOption.value != null)
{
ObjectOutputStream s =
new ObjectOutputStream(new FileOutputStream(modelOption.value));
s.writeObject(crf);
s.close();
}
}
else
{
if (crf == null)
{
if (modelOption.value == null)
{
commandOptions.printUsage(true);
throw new IllegalArgumentException("Missing model file option");
}
ObjectInputStream s =
new ObjectInputStream(new FileInputStream(modelOption.value));
crf = (CRF) s.readObject();
s.close();
}
if (eval != null)
test(new NoopTransducerTrainer(crf), eval, testData);
else
{
boolean includeInput = includeInputOption.value();
Scanner scanner = new Scanner(System.in);
Pattern pattern = Pattern.compile("^\\s*$");
int nLines = 0;
while (scanner.hasNextLine()) {
String line = scanner.nextLine();
line = line.replace('\t', '\n');
testData = new InstanceList(p);
testData.addThruPipe(new LineGroupIterator(new StringReader(line),
pattern, true));
for (int i = 0; i < testData.size(); i++) {
Sequence input = (Sequence)testData.get(i).getData();
Sequence[] outputs = apply(crf, input, nBestOption.value);
int k = outputs.length;
boolean error = false;
for (int a = 0; a < k; a++) {
if (outputs[a].size() != input.size()) {
logger.info("Failed to decode input sequence " + i + ", answer " + a);
error = true;
}
}
if (!error) {
ConstrainedForwardBackwardConfidenceEstimator cfb = new ConstrainedForwardBackwardConfidenceEstimator(crf);
SumLatticeDefault lattice = new SumLatticeDefault (cfb.getTransducer(), input);
double conf = 0;
for (int j = 0; j < input.size(); j++) {
StringBuffer buf = new StringBuffer();
for (int a = 0; a < k; a++) {
String tag = outputs[a].get(j).toString();
//Lets's get the confidence if this is an entity
if(tag.startsWith("B-")) {
int endTagIdx = j+1;
while(endTagIdx < input.size() && outputs[a].get(endTagIdx).toString().startsWith("I-")) {
endTagIdx++;
}
endTagIdx -= 1;
Segment s = new Segment(input, outputs[a], outputs[a], j, endTagIdx, tag, outputs[a].get(endTagIdx));
conf = cfb.estimateConfidenceFor(s, lattice);
}
if(!tag.equals("O")) {
tag = tag + ":" + Double.toString(conf);
}
buf.append(tag).append(" ");
}
if (includeInput) {
FeatureVector fv = (FeatureVector)input.get(j);
buf.append(fv.toString(true));
}
System.out.println(buf.toString());
}
//System.out.println();
}
}
testData = null;
//nLines++;
//if(nLines % 1000 == 0) {
//System.gc();
//}
}
}
}
}
}
| 30,337 | 40.615912 | 226 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MEMMTrainer.java
|
package cc.mallet.fst;
import java.util.BitSet;
import java.util.logging.Logger;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.fst.MEMM.State;
import cc.mallet.fst.MEMM.TransitionIterator;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.Optimizer;
import cc.mallet.util.MalletLogger;
/**
* Trains and evaluates a {@link MEMM}.
*/
public class MEMMTrainer extends TransducerTrainer
{
private static Logger logger = MalletLogger.getLogger(MEMMTrainer.class.getName());
MEMM memm;
private boolean gatheringTrainingData = false;
// After training sets have been gathered in the states, record which
// InstanceList we've gathers, so we don't double-count instances.
private InstanceList trainingGatheredFor;
// gsc: user is supposed to set the weights manually, so this flag is not needed
// boolean useSparseWeights = true;
MEMMOptimizableByLabelLikelihood omemm;
public MEMMTrainer (MEMM memm) {
this.memm = memm;
}
public MEMMOptimizableByLabelLikelihood getOptimizableMEMM (InstanceList trainingSet) {
return new MEMMOptimizableByLabelLikelihood (memm, trainingSet);
}
// public MEMMTrainer setUseSparseWeights (boolean f) { useSparseWeights = f; return this; }
/**
* Trains a MEMM until convergence.
*/
public boolean train (InstanceList training) {
return train (training, Integer.MAX_VALUE);
}
/**
* Trains a MEMM for specified number of iterations or until convergence whichever
* occurs first; returns true if training converged within specified iterations.
*/
public boolean train (InstanceList training, int numIterations)
{
if (numIterations <= 0)
return false;
assert (training.size() > 0);
// Allocate space for the parameters, and place transition FeatureVectors in
// per-source-state InstanceLists.
// Here, gatheringTrainingSets will be true, and these methods will result
// in new InstanceList's being created in each source state, and the FeatureVectors
// of their outgoing transitions to be added to them as the data field in the Instances.
if (trainingGatheredFor != training) {
gatherTrainingSets (training);
}
// gsc: the user has to set the weights manually
// if (useSparseWeights) {
// memm.setWeightsDimensionAsIn (training, false);
// } else {
// memm.setWeightsDimensionDensely ();
// }
/*
if (false) {
// Expectation-based placement of training data would go here.
for (int i = 0; i < training.size(); i++) {
Instance instance = training.get(i);
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
// Do it for the paths consistent with the labels...
gatheringConstraints = true;
new SumLatticeDefault (this, input, output, true);
// ...and also do it for the paths selected by the current model (so we will get some negative weights)
gatheringConstraints = false;
if (this.someTrainingDone)
// (do this once some training is done)
new SumLatticeDefault (this, input, null, true);
}
gatheringWeightsPresent = false;
SparseVector[] newWeights = new SparseVector[weights.length];
for (int i = 0; i < weights.length; i++) {
int numLocations = weightsPresent[i].cardinality ();
logger.info ("CRF weights["+weightAlphabet.lookupObject(i)+"] num features = "+numLocations);
int[] indices = new int[numLocations];
for (int j = 0; j < numLocations; j++) {
indices[j] = weightsPresent[i].nextSetBit (j == 0 ? 0 : indices[j-1]+1);
//System.out.println ("CRF4 has index "+indices[j]);
}
newWeights[i] = new IndexedSparseVector (indices, new double[numLocations],
numLocations, numLocations, false, false, false);
newWeights[i].plusEqualsSparse (weights[i]);
}
weights = newWeights;
}
*/
omemm = new MEMMOptimizableByLabelLikelihood (memm, training);
// Gather the constraints
omemm.gatherExpectationsOrConstraints (true);
Optimizer maximizer = new LimitedMemoryBFGS(omemm);
int i;
// boolean continueTraining = true;
boolean converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
for (i = 0; i < numIterations; i++) {
try {
converged = maximizer.optimize (1);
logger.info ("CRF finished one iteration of maximizer, i="+i);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
logger.info ("About to setTrainable(false)");
return converged;
}
void gatherTrainingSets (InstanceList training)
{
if (trainingGatheredFor != null) {
// It would be easy enough to support this, just go through all the states and set trainingSet to null.
throw new UnsupportedOperationException ("Training with multiple sets not supported.");
}
trainingGatheredFor = training;
for (int i = 0; i < training.size(); i++) {
Instance instance = training.get(i);
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
// Do it for the paths consistent with the labels...
new SumLatticeDefault (memm, input, output, new Transducer.Incrementor() {
public void incrementFinalState(Transducer.State s, double count) { }
public void incrementInitialState(Transducer.State s, double count) { }
public void incrementTransition(Transducer.TransitionIterator ti, double count) {
MEMM.State source = (MEMM.State) ti.getSourceState();
if (count != 0) {
// Create the source state's trainingSet if it doesn't exist yet.
if (source.trainingSet == null)
// New InstanceList with a null pipe, because it doesn't do any processing of input.
source.trainingSet = new InstanceList (null);
// TODO We should make sure we don't add duplicates (through a second call to setWeightsDimenstion..!
// TODO Note that when the training data still allows ambiguous outgoing transitions
// this will add the same FV more than once to the source state's trainingSet, each
// with >1.0 weight. Not incorrect, but inefficient.
// System.out.println ("From: "+source.getName()+" ---> "+getOutput()+" : "+getInput());
source.trainingSet.add (new Instance(ti.getInput (), ti.getOutput (), null, null), count);
}
}
});
}
}
/**
* Not implemented yet.
*
* @throws UnsupportedOperationException
*/
public boolean train (InstanceList training, InstanceList validation, InstanceList testing,
TransducerEvaluator eval, int numIterations,
int numIterationsPerProportion,
double[] trainingProportions)
{
throw new UnsupportedOperationException();
}
/**
* Not implemented yet.
*
* @throws UnsupportedOperationException
*/
public boolean trainWithFeatureInduction (InstanceList trainingData,
InstanceList validationData, InstanceList testingData,
TransducerEvaluator eval, int numIterations,
int numIterationsBetweenFeatureInductions,
int numFeatureInductions,
int numFeaturesPerFeatureInduction,
double trueLabelProbThreshold,
boolean clusteredFeatureInduction,
double[] trainingProportions,
String gainName)
{
throw new UnsupportedOperationException();
}
public void printInstanceLists ()
{
for (int i = 0; i < memm.numStates(); i++) {
State state = (State) memm.getState (i);
InstanceList training = state.trainingSet;
System.out.println ("State "+i+" : "+state.getName());
if (training == null) {
System.out.println ("No data");
continue;
}
for (int j = 0; j < training.size(); j++) {
Instance inst = training.get (j);
System.out.println ("From : "+state.getName()+" To : "+inst.getTarget());
System.out.println ("Instance "+j);
System.out.println (inst.getTarget());
System.out.println (inst.getData());
}
}
}
/**
* Represents the terms in the objective function.
* <p>
* The weights are trained by matching the expectations of the model to the observations gathered from the data.
*/
@SuppressWarnings("serial")
public class MEMMOptimizableByLabelLikelihood extends CRFOptimizableByLabelLikelihood implements Optimizable.ByGradientValue
{
BitSet infiniteValues = null;
protected MEMMOptimizableByLabelLikelihood (MEMM memm, InstanceList trainingData)
{
super (memm, trainingData);
expectations = new CRF.Factors (memm);
constraints = new CRF.Factors (memm);
}
// if constraints=false, return log probability of the training labels
protected double gatherExpectationsOrConstraints (boolean gatherConstraints)
{
// Instance values must either always or never be included in
// the total values; we can't just sometimes skip a value
// because it is infinite, this throws off the total values.
boolean initializingInfiniteValues = false;
CRF.Factors factors = gatherConstraints ? constraints : expectations;
CRF.Factors.Incrementor factorIncrementor = factors.new Incrementor ();
if (infiniteValues == null) {
infiniteValues = new BitSet ();
initializingInfiniteValues = true;
}
double labelLogProb = 0;
for (int i = 0; i < memm.numStates(); i++) {
MEMM.State s = (State) memm.getState (i);
if (s.trainingSet == null) {
System.out.println ("Empty training set for state "+s.name);
continue;
}
for (int j = 0; j < s.trainingSet.size(); j++) {
Instance instance = s.trainingSet.get (j);
double instWeight = s.trainingSet.getInstanceWeight (j);
FeatureVector fv = (FeatureVector) instance.getData ();
String labelString = (String) instance.getTarget ();
TransitionIterator iter = new TransitionIterator (s, fv, gatherConstraints?labelString:null, memm);
while (iter.hasNext ()) {
// gsc
iter.nextState(); // advance the iterator
// State destination = (MEMM.State) iter.nextState(); // Just to advance the iterator
double weight = iter.getWeight();
factorIncrementor.incrementTransition(iter, Math.exp(weight) * instWeight);
//iter.incrementCount (Math.exp(weight) * instWeight);
if (!gatherConstraints && iter.getOutput() == labelString) {
if (!Double.isInfinite (weight))
labelLogProb += instWeight * weight; // xxx ?????
else {
logger.warning ("State "+i+" transition "+j+" has infinite cost; skipping.");
if (initializingInfiniteValues)
throw new IllegalStateException ("Infinite-cost transitions not yet supported"); //infiniteValues.set (j);
else if (!infiniteValues.get(j))
throw new IllegalStateException ("Instance i used to have non-infinite value, "
+"but now it has infinite value.");
}
}
}
}
}
// Force initial & final weight parameters to 0 by making sure that
// whether factor refers to expectation or constraint, they have the same value.
for (int i = 0; i < memm.numStates(); i++) {
factors.initialWeights[i] = 0.0;
factors.finalWeights[i] = 0.0;
}
return labelLogProb;
}
// log probability of the training sequence labels, and fill in expectations[]
protected double getExpectationValue ()
{
return gatherExpectationsOrConstraints (false);
}
}
@Override
public int getIteration() {
// TODO Auto-generated method stub
return 0;
}
@Override
public Transducer getTransducer() {
return memm;
}
@Override
public boolean isFinishedTraining() {
// TODO Auto-generated method stub
return false;
}
}
| 11,863 | 34.100592 | 126 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFTrainerByLabelLikelihood.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.Random;
import java.util.logging.Logger;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizer;
import cc.mallet.types.ExpGain;
import cc.mallet.types.FeatureInducer;
import cc.mallet.types.FeatureSelection;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.GradientGain;
import cc.mallet.types.InfoGain;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelSequence;
import cc.mallet.types.LabelVector;
import cc.mallet.types.RankedFeatureVector;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
/**
* Unlike ClassifierTrainer, TransducerTrainer is not "stateless" between calls
* to train. A TransducerTrainer is constructed paired with a specific
* Transducer, and can only train that Transducer. CRF stores and has methods
* for FeatureSelection and weight freezing. CRFTrainer stores and has methods
* for determining the contents/dimensions/sparsity/FeatureInduction of the
* CRF's weights as determined by training data.
* <p>
* <b>Note:</b> In the future this class may go away in favor of some default
* version of CRFTrainerByValueGradients.
*/
public class CRFTrainerByLabelLikelihood extends TransducerTrainer implements TransducerTrainer.ByOptimization {
private static Logger logger = MalletLogger.getLogger(CRFTrainerByLabelLikelihood.class.getName());
static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1.0;
static final double DEFAULT_HYPERBOLIC_PRIOR_SLOPE = 0.2;
static final double DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS = 10.0;
CRF crf;
//OptimizableCRF ocrf;
CRFOptimizableByLabelLikelihood ocrf;
Optimizer opt;
int iterationCount = 0;
boolean converged;
boolean usingHyperbolicPrior = false;
double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
double hyperbolicPriorSlope = DEFAULT_HYPERBOLIC_PRIOR_SLOPE;
double hyperbolicPriorSharpness = DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS;
boolean useSparseWeights = true;
boolean useNoWeights = false; // TODO remove this; it is just for debugging
private transient boolean useSomeUnsupportedTrick = true;
// Various values from CRF acting as indicators of when we need to ...
private int cachedValueWeightsStamp = -1; // ... re-calculate expectations and values to getValue() because weights' values changed
private int cachedGradientWeightsStamp = -1; // ... re-calculate to getValueGradient() because weights' values changed
private int cachedWeightsStructureStamp = -1; // ... re-allocate crf.weights, expectations & constraints because new states, transitions
// Use mcrf.trainingSet to see when we need to re-allocate crf.weights, expectations & constraints because we are using a different TrainingList than last time
// xxx temporary hack. This is quite useful to have, though!! -cas
public boolean printGradient = false;
public CRFTrainerByLabelLikelihood (CRF crf) {
this.crf = crf;
}
public Transducer getTransducer() { return crf; }
public CRF getCRF () { return crf; }
public Optimizer getOptimizer() { return opt; }
public boolean isConverged() { return converged; }
public boolean isFinishedTraining() { return converged; }
public int getIteration () { return iterationCount; }
/**
* Use this method to specify whether or not factors
* are added to the CRF by this trainer. If you have
* already setup the factors in your CRF, you may
* not want the trainer to add additional factors.
*
* @param flag If true, this trainer adds no factors to the CRF.
*/
public void setAddNoFactors(boolean flag) {
this.useNoWeights = flag;
}
public CRFOptimizableByLabelLikelihood getOptimizableCRF (InstanceList trainingSet) {
if (cachedWeightsStructureStamp != crf.weightsStructureChangeStamp) {
if (!useNoWeights) {
if (useSparseWeights)
crf.setWeightsDimensionAsIn (trainingSet, useSomeUnsupportedTrick);
else
crf.setWeightsDimensionDensely ();
}
//reallocateSufficientStatistics(); // Not necessary here because it is done in the constructor for OptimizableCRF
ocrf = null;
cachedWeightsStructureStamp = crf.weightsStructureChangeStamp;
}
if (ocrf == null || ocrf.trainingSet != trainingSet) {
//ocrf = new OptimizableCRF (crf, trainingSet);
ocrf = new CRFOptimizableByLabelLikelihood(crf, trainingSet);
ocrf.setGaussianPriorVariance(gaussianPriorVariance);
ocrf.setHyperbolicPriorSharpness(hyperbolicPriorSharpness);
ocrf.setHyperbolicPriorSlope(hyperbolicPriorSlope);
ocrf.setUseHyperbolicPrior(usingHyperbolicPrior);
opt = null;
}
return ocrf;
}
public Optimizer getOptimizer (InstanceList trainingSet) {
getOptimizableCRF(trainingSet); // this will set this.mcrf if necessary
if (opt == null || ocrf != opt.getOptimizable())
opt = new LimitedMemoryBFGS(ocrf); // Alternative: opt = new ConjugateGradient (0.001);
return opt;
}
// Java question:
// If I make a non-static inner class CRF.Trainer,
// can that class by subclassed in another .java file,
// and can that subclass still have access to all the CRF's
// instance variables?
// ANSWER: Yes and yes, but you have to use special syntax in the subclass ctor (see mallet-dev archive) -cas
public boolean trainIncremental (InstanceList training)
{
return train (training, Integer.MAX_VALUE);
}
public boolean train (InstanceList trainingSet, int numIterations) {
if (numIterations <= 0)
return false;
assert (trainingSet.size() > 0);
getOptimizableCRF(trainingSet); // This will set this.mcrf if necessary
getOptimizer(trainingSet); // This will set this.opt if necessary
boolean converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
for (int i = 0; i < numIterations; i++) {
try {
converged = opt.optimize (1);
iterationCount++;
logger.info ("CRF finished one iteration of maximizer, i="+i);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
} catch (Exception e) {
e.printStackTrace();
logger.info("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
return converged;
}
/**
* Train a CRF on various-sized subsets of the data. This method is typically used to accelerate training by
* quickly getting to reasonable parameters on only a subset of the parameters first, then on progressively more data.
* @param training The training Instances.
* @param numIterationsPerProportion Maximum number of Maximizer iterations per training proportion.
* @param trainingProportions If non-null, train on increasingly
* larger portions of the data, e.g. new double[] {0.2, 0.5, 1.0}. This can sometimes speedup convergence.
* Be sure to end in 1.0 if you want to train on all the data in the end.
* @return True if training has converged.
*/
public boolean train (InstanceList training, int numIterationsPerProportion, double[] trainingProportions)
{
int trainingIteration = 0;
assert (trainingProportions.length > 0);
boolean converged = false;
for (int i = 0; i < trainingProportions.length; i++) {
assert (trainingProportions[i] <= 1.0);
logger.info ("Training on "+trainingProportions[i]+"% of the data this round.");
if (trainingProportions[i] == 1.0)
converged = this.train (training, numIterationsPerProportion);
else
converged = this.train (training.split (new Random(1),
new double[] {trainingProportions[i], 1-trainingProportions[i]})[0], numIterationsPerProportion);
trainingIteration += numIterationsPerProportion;
}
return converged;
}
public boolean trainWithFeatureInduction (InstanceList trainingData,
InstanceList validationData, InstanceList testingData,
TransducerEvaluator eval, int numIterations,
int numIterationsBetweenFeatureInductions,
int numFeatureInductions,
int numFeaturesPerFeatureInduction,
double trueLabelProbThreshold,
boolean clusteredFeatureInduction,
double[] trainingProportions)
{
return trainWithFeatureInduction (trainingData, validationData, testingData,
eval, numIterations, numIterationsBetweenFeatureInductions,
numFeatureInductions, numFeaturesPerFeatureInduction,
trueLabelProbThreshold, clusteredFeatureInduction,
trainingProportions, "exp");
}
/**
* Train a CRF using feature induction to generate conjunctions of
* features. Feature induction is run periodically during
* training. The features are added to improve performance on the
* mislabeled instances, with the specific scoring criterion given
* by the {@link FeatureInducer} specified by <code>gainName</code>
*
* @param training The training Instances.
* @param validation The validation Instances.
* @param testing The testing instances.
* @param eval For evaluation during training.
* @param numIterations Maximum number of Maximizer iterations.
* @param numIterationsBetweenFeatureInductions Number of maximizer
* iterations between each call to the Feature Inducer.
* @param numFeatureInductions Maximum number of rounds of feature
* induction.
* @param numFeaturesPerFeatureInduction Maximum number of features
* to induce at each round of induction.
* @param trueLabelProbThreshold If the model's probability of the
* true Label of an Instance is less than this value, it is added as
* an error instance to the {@link FeatureInducer}.
* @param clusteredFeatureInduction If true, a separate {@link
* FeatureInducer} is constructed for each label pair. This can
* avoid inducing a disproportionate number of features for a single
* label.
* @param trainingProportions If non-null, train on increasingly
* larger portions of the data (e.g. [0.2, 0.5, 1.0]. This can
* sometimes speedup convergence.
* @param gainName The type of {@link FeatureInducer} to use. One of
* "exp", "grad", or "info" for {@link ExpGain}, {@link
* GradientGain}, or {@link InfoGain}.
* @return True if training has converged.
*/
public boolean trainWithFeatureInduction (InstanceList trainingData,
InstanceList validationData, InstanceList testingData,
TransducerEvaluator eval, int numIterations,
int numIterationsBetweenFeatureInductions,
int numFeatureInductions,
int numFeaturesPerFeatureInduction,
double trueLabelProbThreshold,
boolean clusteredFeatureInduction,
double[] trainingProportions,
String gainName)
{
int trainingIteration = 0;
int numLabels = crf.outputAlphabet.size();
crf.globalFeatureSelection = trainingData.getFeatureSelection();
if (crf.globalFeatureSelection == null) {
// Mask out all features; some will be added later by FeatureInducer.induceFeaturesFor(.)
crf.globalFeatureSelection = new FeatureSelection (trainingData.getDataAlphabet());
trainingData.setFeatureSelection (crf.globalFeatureSelection);
}
// TODO Careful! If validationData and testingData get removed as arguments to this method
// then the next two lines of work will have to be done somewhere.
if (validationData != null) validationData.setFeatureSelection (crf.globalFeatureSelection);
if (testingData != null) testingData.setFeatureSelection (crf.globalFeatureSelection);
for (int featureInductionIteration = 0;
featureInductionIteration < numFeatureInductions;
featureInductionIteration++)
{
// Print out some feature information
logger.info ("Feature induction iteration "+featureInductionIteration);
// Train the CRF
InstanceList theTrainingData = trainingData;
if (trainingProportions != null && featureInductionIteration < trainingProportions.length) {
logger.info ("Training on "+trainingProportions[featureInductionIteration]+"% of the data this round.");
InstanceList[] sampledTrainingData = trainingData.split (new Random(1),
new double[] {trainingProportions[featureInductionIteration],
1-trainingProportions[featureInductionIteration]});
theTrainingData = sampledTrainingData[0];
theTrainingData.setFeatureSelection (crf.globalFeatureSelection); // xxx necessary?
logger.info (" which is "+theTrainingData.size()+" instances");
}
boolean converged = false;
if (featureInductionIteration != 0)
// Don't train until we have added some features
converged = this.train (theTrainingData, numIterationsBetweenFeatureInductions);
trainingIteration += numIterationsBetweenFeatureInductions;
logger.info ("Starting feature induction with "+crf.inputAlphabet.size()+" features.");
// Create the list of error tokens, for both unclustered and clustered feature induction
InstanceList errorInstances = new InstanceList (trainingData.getDataAlphabet(),
trainingData.getTargetAlphabet());
// This errorInstances.featureSelection will get examined by FeatureInducer,
// so it can know how to add "new" singleton features
errorInstances.setFeatureSelection (crf.globalFeatureSelection);
ArrayList errorLabelVectors = new ArrayList();
InstanceList clusteredErrorInstances[][] = new InstanceList[numLabels][numLabels];
ArrayList clusteredErrorLabelVectors[][] = new ArrayList[numLabels][numLabels];
for (int i = 0; i < numLabels; i++)
for (int j = 0; j < numLabels; j++) {
clusteredErrorInstances[i][j] = new InstanceList (trainingData.getDataAlphabet(),
trainingData.getTargetAlphabet());
clusteredErrorInstances[i][j].setFeatureSelection (crf.globalFeatureSelection);
clusteredErrorLabelVectors[i][j] = new ArrayList();
}
for (int i = 0; i < theTrainingData.size(); i++) {
logger.info ("instance="+i);
Instance instance = theTrainingData.get(i);
Sequence input = (Sequence) instance.getData();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
SumLattice lattice =
crf.sumLatticeFactory.newSumLattice (crf, input, (Sequence)null, (Transducer.Incrementor)null,
(LabelAlphabet)theTrainingData.getTargetAlphabet());
int prevLabelIndex = 0; // This will put extra error instances in this cluster
for (int j = 0; j < trueOutput.size(); j++) {
Label label = (Label) ((LabelSequence)trueOutput).getLabelAtPosition(j);
assert (label != null);
//System.out.println ("Instance="+i+" position="+j+" fv="+lattice.getLabelingAtPosition(j).toString(true));
LabelVector latticeLabeling = lattice.getLabelingAtPosition(j);
double trueLabelProb = latticeLabeling.value(label.getIndex());
int labelIndex = latticeLabeling.getBestIndex();
//System.out.println ("position="+j+" trueLabelProb="+trueLabelProb);
if (trueLabelProb < trueLabelProbThreshold) {
logger.info ("Adding error: instance="+i+" position="+j+" prtrue="+trueLabelProb+
(label == latticeLabeling.getBestLabel() ? " " : " *")+
" truelabel="+label+
" predlabel="+latticeLabeling.getBestLabel()+
" fv="+((FeatureVector)input.get(j)).toString(true));
errorInstances.add (input.get(j), label, null, null);
errorLabelVectors.add (latticeLabeling);
clusteredErrorInstances[prevLabelIndex][labelIndex].add (input.get(j), label, null, null);
clusteredErrorLabelVectors[prevLabelIndex][labelIndex].add (latticeLabeling);
}
prevLabelIndex = labelIndex;
}
}
logger.info ("Error instance list size = "+errorInstances.size());
if (clusteredFeatureInduction) {
FeatureInducer[][] klfi = new FeatureInducer[numLabels][numLabels];
for (int i = 0; i < numLabels; i++) {
for (int j = 0; j < numLabels; j++) {
// Note that we may see some "impossible" transitions here (like O->I in a OIB model)
// because we are using lattice gammas to get the predicted label, not Viterbi.
// I don't believe this does any harm, and may do some good.
logger.info ("Doing feature induction for "+
crf.outputAlphabet.lookupObject(i)+" -> "+crf.outputAlphabet.lookupObject(j)+
" with "+clusteredErrorInstances[i][j].size()+" instances");
if (clusteredErrorInstances[i][j].size() < 20) {
logger.info ("..skipping because only "+clusteredErrorInstances[i][j].size()+" instances.");
continue;
}
int s = clusteredErrorLabelVectors[i][j].size();
LabelVector[] lvs = new LabelVector[s];
for (int k = 0; k < s; k++)
lvs[k] = (LabelVector) clusteredErrorLabelVectors[i][j].get(k);
RankedFeatureVector.Factory gainFactory = null;
if (gainName.equals ("exp"))
gainFactory = new ExpGain.Factory (lvs, gaussianPriorVariance);
else if (gainName.equals("grad"))
gainFactory = new GradientGain.Factory (lvs);
else if (gainName.equals("info"))
gainFactory = new InfoGain.Factory ();
klfi[i][j] = new FeatureInducer (gainFactory,
clusteredErrorInstances[i][j],
numFeaturesPerFeatureInduction,
2*numFeaturesPerFeatureInduction,
2*numFeaturesPerFeatureInduction);
crf.featureInducers.add(klfi[i][j]);
}
}
for (int i = 0; i < numLabels; i++) {
for (int j = 0; j < numLabels; j++) {
logger.info ("Adding new induced features for "+
crf.outputAlphabet.lookupObject(i)+" -> "+crf.outputAlphabet.lookupObject(j));
if (klfi[i][j] == null) {
logger.info ("...skipping because no features induced.");
continue;
}
// Note that this adds features globally, but not on a per-transition basis
klfi[i][j].induceFeaturesFor (trainingData, false, false);
if (testingData != null) klfi[i][j].induceFeaturesFor (testingData, false, false);
}
}
klfi = null;
} else {
int s = errorLabelVectors.size();
LabelVector[] lvs = new LabelVector[s];
for (int i = 0; i < s; i++)
lvs[i] = (LabelVector) errorLabelVectors.get(i);
RankedFeatureVector.Factory gainFactory = null;
if (gainName.equals ("exp"))
gainFactory = new ExpGain.Factory (lvs, gaussianPriorVariance);
else if (gainName.equals("grad"))
gainFactory = new GradientGain.Factory (lvs);
else if (gainName.equals("info"))
gainFactory = new InfoGain.Factory ();
FeatureInducer klfi =
new FeatureInducer (gainFactory,
errorInstances,
numFeaturesPerFeatureInduction,
2*numFeaturesPerFeatureInduction,
2*numFeaturesPerFeatureInduction);
crf.featureInducers.add(klfi);
// Note that this adds features globally, but not on a per-transition basis
klfi.induceFeaturesFor (trainingData, false, false);
if (testingData != null) klfi.induceFeaturesFor (testingData, false, false);
logger.info ("CRF4 FeatureSelection now includes "+crf.globalFeatureSelection.cardinality()+" features");
klfi = null;
}
// This is done in CRF4.train() anyway
//this.setWeightsDimensionAsIn (trainingData);
////this.growWeightsDimensionToInputAlphabet ();
}
return this.train (trainingData, numIterations - trainingIteration);
}
public void setUseHyperbolicPrior (boolean f) { usingHyperbolicPrior = f; }
public void setHyperbolicPriorSlope (double p) { hyperbolicPriorSlope = p; }
public void setHyperbolicPriorSharpness (double p) { hyperbolicPriorSharpness = p; }
public double getUseHyperbolicPriorSlope () { return hyperbolicPriorSlope; }
public double getUseHyperbolicPriorSharpness () { return hyperbolicPriorSharpness; }
public void setGaussianPriorVariance (double p) { gaussianPriorVariance = p; }
public double getGaussianPriorVariance () { return gaussianPriorVariance; }
//public int getDefaultFeatureIndex () { return defaultFeatureIndex;}
public void setUseSparseWeights (boolean b) { useSparseWeights = b; }
public boolean getUseSparseWeights () { return useSparseWeights; }
/** Sets whether to use the 'some unsupported trick.' This trick is, if training a CRF
* where some training has been done and sparse weights are used, to add a few weights
* for feaures that do not occur in the tainig data.
* <p>
* This generally leads to better accuracy at only a small memory cost.
*
* @param b Whether to use the trick
*/
public void setUseSomeUnsupportedTrick (boolean b) { useSomeUnsupportedTrick = b; }
// Serialization for CRFTrainerByLikelihood
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
static final int NULL_INTEGER = -1;
/* Need to check for null pointers. */
private void writeObject (ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt (CURRENT_SERIAL_VERSION);
//out.writeInt(defaultFeatureIndex);
out.writeBoolean(usingHyperbolicPrior);
out.writeDouble(gaussianPriorVariance);
out.writeDouble(hyperbolicPriorSlope);
out.writeDouble(hyperbolicPriorSharpness);
out.writeInt(cachedGradientWeightsStamp);
out.writeInt(cachedValueWeightsStamp);
out.writeInt(cachedWeightsStructureStamp);
out.writeBoolean(printGradient);
out.writeBoolean (useSparseWeights);
throw new IllegalStateException("Implementation not yet complete.");
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int size, i;
int version = in.readInt ();
//defaultFeatureIndex = in.readInt();
usingHyperbolicPrior = in.readBoolean();
gaussianPriorVariance = in.readDouble();
hyperbolicPriorSlope = in.readDouble();
hyperbolicPriorSharpness = in.readDouble();
printGradient = in.readBoolean();
useSparseWeights = in.readBoolean();
throw new IllegalStateException("Implementation not yet complete.");
}
}
| 22,580 | 43.714851 | 160 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/NoopTransducerTrainer.java
|
package cc.mallet.fst;
import cc.mallet.types.InstanceList;
/** A TransducerTrainer that does no training, but simply acts as a container for a Transducer;
* for use in situations that require a TransducerTrainer, such as the TransducerEvaluator methods. */
public class NoopTransducerTrainer extends TransducerTrainer {
Transducer transducer;
public NoopTransducerTrainer (Transducer transducer) {
this.transducer = transducer;
}
@Override
public int getIteration() {
return -1;
}
@Override
public Transducer getTransducer() {
return transducer;
}
@Override
public boolean isFinishedTraining() {
return true;
}
@Override
public boolean train(InstanceList trainingSet) {
return true;
}
@Override
public boolean train(InstanceList trainingSet, int numIterations) {
return true;
}
}
| 824 | 19.121951 | 102 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/FeatureTransducer.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.logging.Logger;
import cc.mallet.types.Alphabet;
import cc.mallet.types.Multinomial;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
public class FeatureTransducer extends Transducer
{
private static Logger logger = MalletLogger.getLogger(FeatureTransducer.class.getName());
// These next two dictionaries may be the same
Alphabet inputAlphabet;
Alphabet outputAlphabet;
ArrayList<State> states = new ArrayList<State> ();
ArrayList<State> initialStates = new ArrayList<State> ();
HashMap<String,State> name2state = new HashMap<String,State> ();
Multinomial.Estimator initialStateCounts;
Multinomial.Estimator finalStateCounts;
boolean trainable = false;
public FeatureTransducer (Alphabet inputAlphabet,
Alphabet outputAlphabet)
{
this.inputAlphabet = inputAlphabet;
this.outputAlphabet = outputAlphabet;
// xxx When should these be frozen?
}
public FeatureTransducer (Alphabet dictionary)
{
this (dictionary, dictionary);
}
public FeatureTransducer ()
{
this (new Alphabet ());
}
public Alphabet getInputAlphabet () { return inputAlphabet; }
public Alphabet getOutputAlphabet () { return outputAlphabet; }
public void addState (String name, double initialWeight, double finalWeight,
int[] inputs, int[] outputs, double[] weights,
String[] destinationNames)
{
if (name2state.get(name) != null)
throw new IllegalArgumentException ("State with name `"+name+"' already exists.");
State s = new State (name, states.size(), initialWeight, finalWeight,
inputs, outputs, weights, destinationNames, this);
states.add (s);
if (initialWeight < IMPOSSIBLE_WEIGHT)
initialStates.add (s);
name2state.put (name, s);
setTrainable (false);
}
public void addState (String name, double initialWeight, double finalWeight,
Object[] inputs, Object[] outputs, double[] weights,
String[] destinationNames)
{
this.addState (name, initialWeight, finalWeight,
inputAlphabet.lookupIndices (inputs, true),
outputAlphabet.lookupIndices (outputs, true),
weights, destinationNames);
}
public int numStates () { return states.size(); }
public Transducer.State getState (int index) {
return states.get(index); }
public Iterator<State> initialStateIterator () { return initialStates.iterator (); }
public boolean isTrainable () { return trainable; }
public void setTrainable (boolean f)
{
trainable = f;
if (f) {
// This wipes away any previous counts we had.
// It also potentially allocates an esimator of a new size if
// the number of states has increased.
initialStateCounts = new Multinomial.LaplaceEstimator (states.size());
finalStateCounts = new Multinomial.LaplaceEstimator (states.size());
} else {
initialStateCounts = null;
finalStateCounts = null;
}
for (int i = 0; i < numStates(); i++)
((State)getState(i)).setTrainable(f);
}
public void reset ()
{
if (trainable) {
initialStateCounts.reset ();
finalStateCounts.reset ();
for (int i = 0; i < numStates(); i++)
((State)getState(i)).reset ();
}
}
public void estimate ()
{
if (initialStateCounts == null || finalStateCounts == null)
throw new IllegalStateException ("This transducer not currently trainable.");
Multinomial initialStateDistribution = initialStateCounts.estimate ();
Multinomial finalStateDistribution = finalStateCounts.estimate ();
for (int i = 0; i < states.size(); i++) {
State s = states.get (i);
s.initialWeight = initialStateDistribution.logProbability (i);
s.finalWeight = finalStateDistribution.logProbability (i);
s.estimate ();
}
}
// Note that this is a non-static inner class, so we have access to all of
// FeatureTransducer's instance variables.
public class State extends Transducer.State
{
String name;
int index;
double initialWeight, finalWeight;
Transition[] transitions;
gnu.trove.TIntObjectHashMap input2transitions;
Multinomial.Estimator transitionCounts;
FeatureTransducer transducer;
// Note that you cannot add transitions to a state once it is created.
protected State (String name, int index, double initialWeight, double finalWeight,
int[] inputs, int[] outputs, double[] weights,
String[] destinationNames, FeatureTransducer transducer)
{
assert (inputs.length == outputs.length
&& inputs.length == weights.length
&& inputs.length == destinationNames.length);
this.transducer = transducer;
this.name = name;
this.index = index;
this.initialWeight = initialWeight;
this.finalWeight = finalWeight;
this.transitions = new Transition[inputs.length];
this.input2transitions = new gnu.trove.TIntObjectHashMap ();
transitionCounts = null;
for (int i = 0; i < inputs.length; i++) {
// This constructor places the transtion into this.input2transitions
transitions[i] = new Transition (inputs[i], outputs[i],
weights[i], this, destinationNames[i]);
transitions[i].index = i;
}
}
public Transducer getTransducer () { return transducer; }
public double getInitialWeight () { return initialWeight; }
public double getFinalWeight () { return finalWeight; }
public void setInitialWeight (double v) { initialWeight = v; }
public void setFinalWeight (double v) { finalWeight = v; }
private void setTrainable (boolean f)
{
if (f)
transitionCounts = new Multinomial.LaplaceEstimator (transitions.length);
else
transitionCounts = null;
}
// Temporarily here for debugging
public Multinomial.Estimator getTransitionEstimator()
{
return transitionCounts;
}
private void reset ()
{
if (transitionCounts != null)
transitionCounts.reset();
}
public int getIndex () { return index; }
public Transducer.TransitionIterator transitionIterator (Sequence input,
int inputPosition,
Sequence output,
int outputPosition)
{
if (inputPosition < 0 || outputPosition < 0 || output != null)
throw new UnsupportedOperationException ("Not yet implemented.");
if (input == null)
return transitionIterator ();
return transitionIterator (input, inputPosition);
}
public Transducer.TransitionIterator transitionIterator (Sequence inputSequence,
int inputPosition)
{
int inputIndex = inputAlphabet.lookupIndex (inputSequence.get(inputPosition), false);
if (inputIndex == -1)
throw new IllegalArgumentException ("Input not in dictionary.");
return transitionIterator (inputIndex);
}
public Transducer.TransitionIterator transitionIterator (Object o)
{
int inputIndex = inputAlphabet.lookupIndex (o, false);
if (inputIndex == -1)
throw new IllegalArgumentException ("Input not in dictionary.");
return transitionIterator (inputIndex);
}
public Transducer.TransitionIterator transitionIterator (int input)
{
return new TransitionIterator (this, input);
}
public Transducer.TransitionIterator transitionIterator ()
{
return new TransitionIterator (this);
}
public String getName ()
{
return name;
}
public void incrementInitialCount (double count)
{
if (initialStateCounts == null)
throw new IllegalStateException ("Transducer is not currently trainable.");
initialStateCounts.increment (index, count);
}
public void incrementFinalCount (double count)
{
if (finalStateCounts == null)
throw new IllegalStateException ("Transducer is not currently trainable.");
finalStateCounts.increment (index, count);
}
private void estimate ()
{
if (transitionCounts == null)
throw new IllegalStateException ("Transducer is not currently trainable.");
Multinomial transitionDistribution = transitionCounts.estimate ();
for (int i = 0; i < transitions.length; i++)
transitions[i].weight = transitionDistribution.logProbability (i);
}
private static final long serialVersionUID = 1;
}
@SuppressWarnings("serial")
protected class TransitionIterator extends Transducer.TransitionIterator
{
// If "index" is >= -1 we are going through all FeatureState.transitions[] by index.
// If "index" is -2, we are following the chain of FeatureTransition.nextWithSameInput,
// and "transition" is already initialized to the first transition.
// If "index" is -3, we are following the chain of FeatureTransition.nextWithSameInput,
// and the next transition should be found by following the chain.
int index;
Transition transition;
State source;
int input;
// Iterate through all transitions, independent of input
public TransitionIterator (State source)
{
//System.out.println ("FeatureTransitionIterator over all");
this.source = source;
this.input = -1;
this.index = -1;
this.transition = null;
}
public TransitionIterator (State source, int input)
{
//System.out.println ("SymbolTransitionIterator over "+input);
this.source = source;
this.input = input;
this.index = -2;
this.transition = (Transition) source.input2transitions.get (input);
}
public boolean hasNext ()
{
if (index >= -1) {
//System.out.println ("hasNext index " + index);
return (index < source.transitions.length-1);
}
return (index == -2 ? transition != null : transition.nextWithSameInput != null);
};
public Transducer.State nextState ()
{
if (index >= -1)
transition = source.transitions[++index];
else if (index == -2)
index = -3;
else
transition = transition.nextWithSameInput;
return transition.getDestinationState();
}
public int getIndex () { return index; }
public Object getInput () { return inputAlphabet.lookupObject(transition.input); }
public Object getOutput () { return outputAlphabet.lookupObject(transition.output); }
public double getWeight () { return transition.weight; }
public Transducer.State getSourceState () { return source; }
public Transducer.State getDestinationState () {
return transition.getDestinationState (); }
public void incrementCount (double count) {
logger.info ("FeatureTransducer incrementCount "+count);
source.transitionCounts.increment (transition.index, count); }
}
// Note: this class has a natural ordering that is inconsistent with equals.
protected class Transition
{
int input, output;
double weight;
int index;
String destinationName;
State destination = null;
Transition nextWithSameInput;
public Transition (int input, int output, double weight,
State sourceState, String destinationName)
{
this.input = input;
this.output = output;
this.weight = weight;
this.nextWithSameInput = (Transition) sourceState.input2transitions.get (input);
sourceState.input2transitions.put (input, this);
// this.index is set by the caller of this constructor
this.destinationName = destinationName;
}
public State getDestinationState ()
{
if (destination == null) {
destination = name2state.get (destinationName);
assert (destination != null);
}
return destination;
}
}
private static final long serialVersionUID = 1;
}
| 11,892 | 30.630319 | 91 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/TransducerTrainer.java
|
package cc.mallet.fst;
import java.util.ArrayList;
import java.util.Collection;
import cc.mallet.optimize.Optimizer;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
/**
* An abstract class to train and evaluate a transducer model.
*/
public abstract class TransducerTrainer {
// The list of evaluators to be run every once in a while
ArrayList<TransducerEvaluator> evaluators = new ArrayList<TransducerEvaluator>();
public abstract Transducer getTransducer();
public abstract int getIteration();
public abstract boolean isFinishedTraining();
public boolean train (InstanceList trainingSet) {
return train (trainingSet, Integer.MAX_VALUE);
}
/** Train the transducer associated with this TransducerTrainer.
* You should be able to call this method with different trainingSet objects.
* Whether this causes the TransducerTrainer to combine both trainingSets or
* to view the second as a new alternative is at the discretion of the particular
* TransducerTrainer subclass involved. */
public abstract boolean train (InstanceList trainingSet, int numIterations);
// TODO Consider adding or removing these
//public abstract boolean train ();
//public abstract boolean train (int numIterations);
// Management of evaluators
public TransducerTrainer addEvaluator (TransducerEvaluator te) { evaluators.add(te); return this; }
public TransducerTrainer addEvaluators (Collection<TransducerEvaluator> tes) { evaluators.addAll(tes); return this; }
public TransducerTrainer removeEvaluator (TransducerEvaluator te) { evaluators.remove(te); return this; }
/** This method should be called by subclasses whenever evaluators should be run.
* Do not worry too much about them being run too often, because the evaluators
* themselves can control/limit when they actually do their work with TransducerEvaluator.precondition(). */
protected void runEvaluators () {
for (TransducerEvaluator te : evaluators)
te.evaluate(this);
}
public interface ByOptimization {
public Optimizer getOptimizer ();
// Remove the above, and only have public Optimizer getOptimizer (InstanceList trainingSet);
}
// Implied above; can always make a per-instance training method use a batch instance list
//public interface ByBatch {}
// TODO Consider making this an interface also, like ByOptimization
public static abstract class ByIncrements extends TransducerTrainer {
public abstract boolean trainIncremental (InstanceList incrementalTrainingSet);
}
public static abstract class ByInstanceIncrements extends ByIncrements {
public abstract boolean trainIncremental (Instance trainingInstance);
}
}
| 2,664 | 39.378788 | 118 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/TokenAccuracyEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.util.HashMap;
import java.util.logging.Logger;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
/**
* Evaluates a transducer model based on predictions of individual tokens.
*/
public class TokenAccuracyEvaluator extends TransducerEvaluator
{
private static Logger logger = MalletLogger.getLogger(TokenAccuracyEvaluator.class.getName());
private HashMap<String,Double> accuracy = new HashMap<String,Double>();
public TokenAccuracyEvaluator (InstanceList[] instanceLists, String[] descriptions) {
super (instanceLists, descriptions);
}
public TokenAccuracyEvaluator (InstanceList instanceList1, String description1) {
this (new InstanceList[] {instanceList1}, new String[] {description1});
}
public TokenAccuracyEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2) {
this (new InstanceList[] {instanceList1, instanceList2}, new String[] {description1, description2});
}
public TokenAccuracyEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2,
InstanceList instanceList3, String description3) {
this (new InstanceList[] {instanceList1, instanceList2, instanceList3}, new String[] {description1, description2, description3});
}
public void evaluateInstanceList (TransducerTrainer trainer, InstanceList instances, String description)
{
int numCorrectTokens;
int totalTokens;
Transducer transducer = trainer.getTransducer();
totalTokens = numCorrectTokens = 0;
for (int i = 0; i < instances.size(); i++) {
Instance instance = instances.get(i);
Sequence input = (Sequence) instance.getData();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
//System.err.println ("TokenAccuracyEvaluator "+i+" length="+input.size());
Sequence predOutput = transducer.transduce (input);
assert (predOutput.size() == trueOutput.size());
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens++;
}
//System.err.println ("TokenAccuracyEvaluator "+i+" numCorrectTokens="+numCorrectTokens+" totalTokens="+totalTokens+" accuracy="+((double)numCorrectTokens)/totalTokens);
}
double acc = ((double)numCorrectTokens)/totalTokens;
//System.err.println ("TokenAccuracyEvaluator accuracy="+acc);
accuracy.put(description, acc);
logger.info (description +" accuracy="+acc);
}
/**
* Returns the accuracy from the last time test() or evaluate() was called
* @return
*/
public double getAccuracy (String description)
{
Double ret = accuracy.get(description);
if (ret != null)
return ret.doubleValue();
throw new IllegalArgumentException ("No accuracy available for instance list \""+description+"\"");
}
}
| 3,482 | 35.28125 | 172 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MaxLatticeFactory.java
|
package cc.mallet.fst;
import java.io.Serializable;
import cc.mallet.types.Sequence;
public abstract class MaxLatticeFactory implements Serializable {
public MaxLattice newMaxLattice (Transducer trans, Sequence inputSequence)
{
return newMaxLattice (trans, inputSequence, null);
}
// You may pass null for output
public abstract MaxLattice newMaxLattice (Transducer trans, Sequence inputSequence, Sequence outputSequence);
}
| 440 | 23.5 | 110 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/PerClassAccuracyEvaluator.java
|
/* Copyright (C) 2003 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst;
import java.util.logging.Logger;
import java.util.Arrays;
import java.io.PrintStream;
import java.text.DecimalFormat;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
* Determines the precision, recall and F1 on a per-class basis.
*
* @author Charles Sutton
* @version $Id: PerClassAccuracyEvaluator.java,v 1.1 2007/10/22 21:37:48 mccallum Exp $
*/
public class PerClassAccuracyEvaluator extends TransducerEvaluator {
private static Logger logger = MalletLogger.getLogger(TokenAccuracyEvaluator.class.getName());
public PerClassAccuracyEvaluator (InstanceList[] instanceLists, String[] descriptions) {
super (instanceLists, descriptions);
}
public PerClassAccuracyEvaluator (InstanceList i1, String d1) {
this (new InstanceList[] {i1}, new String[] {d1});
}
public PerClassAccuracyEvaluator (InstanceList i1, String d1, InstanceList i2, String d2) {
this (new InstanceList[] {i1, i2}, new String[] {d1, d2});
}
public void evaluateInstanceList (TransducerTrainer tt, InstanceList data, String description)
{
Transducer model = tt.getTransducer();
Alphabet dict = model.getInputPipe().getTargetAlphabet();
int numLabels = dict.size();
int[] numCorrectTokens = new int [numLabels];
int[] numPredTokens = new int [numLabels];
int[] numTrueTokens = new int [numLabels];
logger.info("Per-token results for " + description);
for (int i = 0; i < data.size(); i++) {
Instance instance = data.get(i);
Sequence input = (Sequence) instance.getData();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = model.transduce (input);
assert (predOutput.size() == trueOutput.size());
for (int j = 0; j < trueOutput.size(); j++) {
int idx = dict.lookupIndex(trueOutput.get(j));
numTrueTokens[idx]++;
numPredTokens[dict.lookupIndex(predOutput.get(j))]++;
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens[idx]++;
}
}
DecimalFormat f = new DecimalFormat ("0.####");
double[] allf = new double [numLabels];
for (int i = 0; i < numLabels; i++) {
Object label = dict.lookupObject(i);
double precision = ((double) numCorrectTokens[i]) / numPredTokens[i];
double recall = ((double) numCorrectTokens[i]) / numTrueTokens[i];
double f1 = (2 * precision * recall) / (precision + recall);
if (!Double.isNaN (f1)) allf [i] = f1;
logger.info(description +" label " + label + " P " + f.format (precision)
+ " R " + f.format(recall) + " F1 "+ f.format (f1));
}
logger.info ("Macro-average F1 "+f.format (MatrixOps.mean (allf)));
}
}
| 3,187 | 37.409639 | 96 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/Segment.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.util.ArrayList;
import cc.mallet.types.ArraySequence;
import cc.mallet.types.Sequence;
/**
* Represents a labelled chunk of a {@link Sequence} segmented by a
* {@link Transducer}, usually corresponding to some object extracted
* from an input {@link Sequence}.
*/
public class Segment implements Comparable
{
Sequence input, pred, truth; // input, predicted, and true sequences
int start, end; // offsets for this segment in the sequence
Object startTag, inTag; // label for the beginning and inside of this Segment
double confidence; // confidence score for this extracted segment
boolean correct;
// this is a tough case b/c technically everything inside the
// segment is tagged correctly
boolean endsPrematurely; // e.g. truth: B I I O O
// pred: B I O O O
/**
* Initializes the segment.
*
* @param input entire input sequence
* @param pred predicted sequence
* @param start starting position of extracted segment
* @param end ending position of extracted segment
*/
public Segment (Sequence input, Sequence pred, Sequence truth, int start, int end,
Object startTag, Object inTag )
{
this.input = input;
this.pred = pred;
this.truth = truth;
this.start = start;
this.startTag = startTag;
this.inTag = inTag;
this.end = end;
this.confidence = -1;
this.correct = true;
this.endsPrematurely = false;
for (int i=start; i <= end; i++) {
if (!pred.get(i).equals (truth.get(i))) {
this.correct = false;
break;
}
}
// segment can also be incorrect if it ends prematurely
if (truth != null) {
if (correct && end+1 < truth.size() && truth.get (end+1).equals (inTag)) {
this.correct = false;
this.endsPrematurely = true;
}
}
}
public void setCorrect (boolean b) { this.correct = b; }
public int size() { return this.end - this.start + 1; }
public Object getTruth (int i) { return this.truth.get( i ); }
public Sequence getTruth () { return this.truth; }
public Object getPredicted (int i) { return this.pred.get( i ); }
public Sequence getPredicted () { return this.pred; }
public void setPredicted (Sequence predicted) { this.pred = predicted; }
public Sequence getInput () { return this.input; }
public int getStart () { return this.start; }
public int getEnd () { return this.end; }
public Object getStartTag () { return this.startTag; }
public Object getInTag () { return this.inTag; }
public double getConfidence () {return this.confidence; }
public void setConfidence (double c) {this.confidence = c; }
public boolean correct () { return this.correct; }
public boolean endsPrematurely () { return this.endsPrematurely; }
public boolean indexInSegment (int index) {
return (index >= this.start && index <= this.end);
}
public Sequence getSegmentInputSequence () {
ArrayList ret = new ArrayList ();
for (int i=start; i <= end; i++)
ret.add( input.get( i ) );
return new ArraySequence( ret );
}
public int compareTo (Object o) {
Segment s = (Segment) o;
if (s.confidence == -1 || this.confidence == -1) {
throw new IllegalArgumentException ("attempting to compare confidences that have not been set yet..");
}
if (this.confidence > s.confidence)
return 1;
else if (this.confidence < s.confidence)
return -1;
else return 0;
}
public String sequenceToString () {
String ret = "";
for (int i=0; i < input.size(); i++) {
if (i <= end && i >= start) // part of segment
ret += pred.get(i).toString() + "[" + truth.get (i) + "][" + confidence + "]\t";
else
ret += "-[" + truth.get (i) + "]\t";
}
return ret;
}
public String toString () {
String ret = "";
ret += "start: " + start + " end: " + end + " confidence: " + confidence + "\n";
for (int i=start; i <= end; i++) {
ret += pred.get (i).toString() + "[" + truth.get (i) + "]\t";
}
return ret;
}
public boolean equals (Object o) {
Segment s = (Segment) o;
if (start == s.getStart() &&
end == s.getEnd() &&
correct == s.correct() &&
input.size() == s.getInput().size()) {
for (int i=start; i <= end; i++) {
if (!pred.get( i ).equals( s.getPredicted( i ) ) ||
!truth.get( i ).equals( s.getTruth( i ) ) )
return false;
}
return true;
}
return false;
}
}
| 4,873 | 31.932432 | 105 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRF.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.BitSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import java.text.DecimalFormat;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureInducer;
import cc.mallet.types.FeatureSelection;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.IndexedSparseVector;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.RankedFeatureVector;
import cc.mallet.types.Sequence;
import cc.mallet.types.SparseVector;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.Pipe;
import cc.mallet.util.ArrayUtils;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Maths;
/* There are several different kinds of numeric values:
"weights" range from -Inf to Inf. High weights make a path more
likely. These don't appear directly in Transducer.java, but appear
as parameters to many subclasses, such as CRFs. Weights are also
often summed, or combined in a dot product with feature vectors.
"unnormalized costs" range from -Inf to Inf. High costs make a
path less likely. Unnormalized costs can be obtained from negated
weights or negated sums of weights. These are often returned by a
TransitionIterator's getValue() method. The LatticeNode.alpha
values are unnormalized costs.
"normalized costs" range from 0 to Inf. High costs make a path
less likely. Normalized costs can safely be considered as the
-log(probability) of some event. They can be obtained by
subtracting a (negative) normalizer from unnormalized costs, for
example, subtracting the total cost of a lattice. Typically
initialCosts and finalCosts are examples of normalized costs, but
they are also allowed to be unnormalized costs. The gammas[][],
stateGammas[], and transitionXis[][] are all normalized costs, as
well as the return value of Lattice.getValue().
"probabilities" range from 0 to 1. High probabilities make a path
more likely. They are obtained from normalized costs by taking the
log and negating.
"sums of probabilities" range from 0 to positive numbers. They are
the sum of several probabilities. These are passed to the
incrementCount() methods.
*/
/**
* Represents a CRF model.
*/
public class CRF extends Transducer implements Serializable
{
private static Logger logger = MalletLogger.getLogger(CRF.class.getName());
static final String LABEL_SEPARATOR = ",";
protected Alphabet inputAlphabet;
protected Alphabet outputAlphabet;
protected ArrayList<State> states = new ArrayList<State> ();
protected ArrayList<State> initialStates = new ArrayList<State> ();
protected HashMap<String,State> name2state = new HashMap<String,State> ();
protected Factors parameters = new Factors ();
//SparseVector[] weights;
//double[] defaultWeights; // parameters for default feature
//Alphabet weightAlphabet = new Alphabet ();
//boolean[] weightsFrozen;
// FeatureInduction can fill this in
protected FeatureSelection globalFeatureSelection;
// "featureSelections" is on a per- weights[i] basis, and over-rides
// (permanently disabling) FeatureInducer's and
// setWeightsDimensionsAsIn() from using these features on these transitions
protected FeatureSelection[] featureSelections;
// Store here the induced feature conjunctions so that these conjunctions can be added to test instances before transduction
protected ArrayList<FeatureInducer> featureInducers = new ArrayList<FeatureInducer>();
// An integer index that gets incremented each time this CRFs parameters get changed
protected int weightsValueChangeStamp = 0;
// An integer index that gets incremented each time this CRFs parameters' structure get changed
protected int weightsStructureChangeStamp = 0;
protected int cachedNumParametersStamp = -1; // A copy of weightsStructureChangeStamp the last time numParameters was calculated
protected int numParameters;
/** A simple, transparent container to hold the parameters or sufficient statistics for the CRF. */
public static class Factors implements Serializable {
public Alphabet weightAlphabet;
public SparseVector[] weights; // parameters on transitions, indexed by "weight index"
public double[] defaultWeights;// parameters for default features, indexed by "weight index"
public boolean[] weightsFrozen; // flag, if true indicating that the weights of this "weight index" should not be changed by learning, indexed by "weight index"
public double [] initialWeights; // indexed by state index
public double [] finalWeights; // indexed by state index
/** Construct a new empty Factors with a new empty weightsAlphabet, 0-length initialWeights and finalWeights, and the other arrays null. */
public Factors () {
weightAlphabet = new Alphabet();
initialWeights = new double[0];
finalWeights = new double[0];
// Leave the rest as null. They will get set later by addState() and addWeight()
// Alternatively, we could create zero-length arrays
}
/** Construct new Factors by mimicking the structure of the other one, but with zero values.
* Always simply point to the other's Alphabet; do not clone it. */
public Factors (Factors other) {
weightAlphabet = other.weightAlphabet;
weights = new SparseVector[other.weights.length];
for (int i = 0; i < weights.length; i++)
weights[i] = (SparseVector) other.weights[i].cloneMatrixZeroed();
defaultWeights = new double[other.defaultWeights.length];
weightsFrozen = other.weightsFrozen; // We don't copy here because we want "expectation" and "constraint" factors to get changes to a CRF.parameters factor. Alternatively we declare freezing to be a change of structure, and force reallocation of "expectations", etc.
initialWeights = new double[other.initialWeights.length];
finalWeights = new double[other.finalWeights.length];
}
/** Construct new Factors by copying the other one. */
public Factors (Factors other, boolean cloneAlphabet) {
weightAlphabet = cloneAlphabet ? (Alphabet) other.weightAlphabet.clone() : other.weightAlphabet;
weights = new SparseVector[other.weights.length];
for (int i = 0; i < weights.length; i++)
weights[i] = (SparseVector) other.weights[i].cloneMatrix();
defaultWeights = other.defaultWeights.clone();
weightsFrozen = other.weightsFrozen;
initialWeights = other.initialWeights.clone();
finalWeights = other.finalWeights.clone();
}
/** Construct a new Factors with the same structure as the parameters of 'crf', but with values initialized to zero.
* This method is typically used to allocate storage for sufficient statistics, expectations, constraints, etc. */
public Factors (CRF crf) {
// TODO Change this implementation to this(crf.parameters)
weightAlphabet = crf.parameters.weightAlphabet; // TODO consider cloning this instead
weights = new SparseVector[crf.parameters.weights.length];
for (int i = 0; i < weights.length; i++)
weights[i] = (SparseVector) crf.parameters.weights[i].cloneMatrixZeroed();
defaultWeights = new double[crf.parameters.weights.length];
weightsFrozen = crf.parameters.weightsFrozen;
assert (crf.numStates() == crf.parameters.initialWeights.length);
assert (crf.parameters.initialWeights.length == crf.parameters.finalWeights.length);
initialWeights = new double[crf.parameters.initialWeights.length];
finalWeights = new double[crf.parameters.finalWeights.length];
}
public int getNumFactors () {
assert (initialWeights.length == finalWeights.length);
assert (defaultWeights.length == weights.length);
int ret = initialWeights.length + finalWeights.length + defaultWeights.length;
for (int i = 0; i < weights.length; i++)
ret += weights[i].numLocations();
return ret;
}
public void zero () {
for (int i = 0; i < weights.length; i++)
weights[i].setAll(0);
Arrays.fill(defaultWeights, 0);
Arrays.fill(initialWeights, 0);
Arrays.fill(finalWeights, 0);
}
public boolean structureMatches (Factors other) {
if (weightAlphabet.size() != other.weightAlphabet.size()) return false;
if (weights.length != other.weights.length) return false;
// gsc: checking each SparseVector's size within weights.
for (int i = 0; i < weights.length; i++)
if (weights[i].numLocations() != other.weights[i].numLocations()) return false;
// Note that we are not checking the indices of the SparseVectors in weights
if (defaultWeights.length != other.defaultWeights.length) return false;
assert (initialWeights.length == finalWeights.length);
if (initialWeights.length != other.initialWeights.length) return false;
return true;
}
public void assertNotNaN () {
for (int i = 0; i < weights.length; i++)
assert (!weights[i].isNaN());
assert (!MatrixOps.isNaN(defaultWeights));
assert (!MatrixOps.isNaN(initialWeights));
assert (!MatrixOps.isNaN(finalWeights));
}
// gsc: checks all weights to make sure there are no NaN or Infinite values,
// this method can be called for checking the weights of constraints and
// expectations but not for crf.parameters since it can have infinite
// weights associated with states that are not likely.
public void assertNotNaNOrInfinite () {
for (int i = 0; i < weights.length; i++)
assert (!weights[i].isNaNOrInfinite());
assert (!MatrixOps.isNaNOrInfinite(defaultWeights));
assert (!MatrixOps.isNaNOrInfinite(initialWeights));
assert (!MatrixOps.isNaNOrInfinite(finalWeights));
}
public void plusEquals (Factors other, double factor) {
plusEquals(other, factor, false);
}
public void plusEquals (Factors other, double factor, boolean obeyWeightsFrozen) {
for (int i = 0; i < weights.length; i++) {
if (obeyWeightsFrozen && weightsFrozen[i]) continue;
this.weights[i].plusEqualsSparse(other.weights[i], factor);
this.defaultWeights[i] += other.defaultWeights[i] * factor;
}
for (int i = 0; i < initialWeights.length; i++) {
this.initialWeights[i] += other.initialWeights[i] * factor;
this.finalWeights[i] += other.finalWeights[i] * factor;
}
}
/** Return the log(p(parameters)) according to a zero-mean Gaussian with given variance. */
public double gaussianPrior (double variance) {
double value = 0;
double priorDenom = 2 * variance;
assert (initialWeights.length == finalWeights.length);
for (int i = 0; i < initialWeights.length; i++) {
if (!Double.isInfinite(initialWeights[i])) value -= initialWeights[i] * initialWeights[i] / priorDenom;
if (!Double.isInfinite(finalWeights[i])) value -= finalWeights[i] * finalWeights[i] / priorDenom;
}
double w;
for (int i = 0; i < weights.length; i++) {
if (!Double.isInfinite(defaultWeights[i])) value -= defaultWeights[i] * defaultWeights[i] / priorDenom;
for (int j = 0; j < weights[i].numLocations(); j++) {
w = weights[i].valueAtLocation (j);
if (!Double.isInfinite(w)) value -= w * w / priorDenom;
}
}
return value;
}
public void plusEqualsGaussianPriorGradient (Factors other, double variance) {
assert (initialWeights.length == finalWeights.length);
for (int i = 0; i < initialWeights.length; i++) {
// gsc: checking initial/final weights of crf.parameters as well since we could
// have a state machine where some states have infinite initial and/or final weight
if (!Double.isInfinite(initialWeights[i]) && !Double.isInfinite(other.initialWeights[i]))
initialWeights[i] -= other.initialWeights[i] / variance;
if (!Double.isInfinite(finalWeights[i]) && !Double.isInfinite(other.finalWeights[i]))
finalWeights[i] -= other.finalWeights[i] / variance;
}
double w, ow;
for (int i = 0; i < weights.length; i++) {
if (weightsFrozen[i]) continue;
// TODO Note that there doesn't seem to be a way to freeze the initialWeights and finalWeights
// TODO Should we also obey FeatureSelection here? No need; it is enforced by the creation of the weights.
if (!Double.isInfinite(defaultWeights[i])) defaultWeights[i] -= other.defaultWeights[i] / variance;
for (int j = 0; j < weights[i].numLocations(); j++) {
w = weights[i].valueAtLocation (j);
ow = other.weights[i].valueAtLocation (j);
if (!Double.isInfinite(w)) weights[i].setValueAtLocation(j, w - (ow/variance));
}
}
}
/** Return the log(p(parameters)) according to a a hyperbolic curve that is a smooth approximation to an L1 prior. */
public double hyberbolicPrior (double slope, double sharpness) {
double value = 0;
assert (initialWeights.length == finalWeights.length);
for (int i = 0; i < initialWeights.length; i++) {
if (!Double.isInfinite(initialWeights[i]))
value -= (slope / sharpness * Math.log (Maths.cosh (sharpness * -initialWeights[i])));
if (!Double.isInfinite(finalWeights[i]))
value -= (slope / sharpness * Math.log (Maths.cosh (sharpness * -finalWeights[i])));
}
double w;
for (int i = 0; i < weights.length; i++) {
value -= (slope / sharpness * Math.log (Maths.cosh (sharpness * defaultWeights[i])));
for (int j = 0; j < weights[i].numLocations(); j++) {
w = weights[i].valueAtLocation(j);
if (!Double.isInfinite(w))
value -= (slope / sharpness * Math.log (Maths.cosh (sharpness * w)));
}
}
return value;
}
public void plusEqualsHyperbolicPriorGradient (Factors other, double slope, double sharpness) {
// TODO This method could use some careful checking over, especially for flipped negations
assert (initialWeights.length == finalWeights.length);
double ss = slope * sharpness;
for (int i = 0; i < initialWeights.length; i++) {
// gsc: checking initial/final weights of crf.parameters as well since we could
// have a state machine where some states have infinite initial and/or final weight
if (!Double.isInfinite(initialWeights[i]) && !Double.isInfinite(other.initialWeights[i]))
initialWeights[i] += ss * Maths.tanh (-other.initialWeights[i]);
if (!Double.isInfinite(finalWeights[i]) && !Double.isInfinite(other.finalWeights[i]))
finalWeights[i] += ss * Maths.tanh (-other.finalWeights[i]);
}
double w, ow;
for (int i = 0; i < weights.length; i++) {
if (weightsFrozen[i]) continue;
// TODO Note that there doesn't seem to be a way to freeze the initialWeights and finalWeights
// TODO Should we also obey FeatureSelection here? No need; it is enforced by the creation of the weights.
if (!Double.isInfinite(defaultWeights[i])) defaultWeights[i] += ss * Maths.tanh(-other.defaultWeights[i]);
for (int j = 0; j < weights[i].numLocations(); j++) {
w = weights[i].valueAtLocation (j);
ow = other.weights[i].valueAtLocation (j);
if (!Double.isInfinite(w)) weights[i].setValueAtLocation(j, w + (ss * Maths.tanh(-ow)));
}
}
}
/** Instances of this inner class can be passed to various inference methods, which can then
* gather/increment sufficient statistics counts into the containing Factor instance. */
public class Incrementor implements Transducer.Incrementor {
public void incrementFinalState(Transducer.State s, double count) {
finalWeights[s.getIndex()] += count;
}
public void incrementInitialState(Transducer.State s, double count) {
initialWeights[s.getIndex()] += count;
}
public void incrementTransition(Transducer.TransitionIterator ti, double count) {
int index = ti.getIndex();
CRF.State source = (CRF.State)ti.getSourceState();
int nwi = source.weightsIndices[index].length;
int weightsIndex;
for (int wi = 0; wi < nwi; wi++) {
weightsIndex = source.weightsIndices[index][wi];
// For frozen weights, don't even gather their sufficient statistics; this is how we ensure that the gradient for these will be zero
if (weightsFrozen[weightsIndex]) continue;
// TODO Should we also obey FeatureSelection here? No need; it is enforced by the creation of the weights.
weights[weightsIndex].plusEqualsSparse ((FeatureVector)ti.getInput(), count);
defaultWeights[weightsIndex] += count;
}
}
}
public double getParametersAbsNorm ()
{
double ret = 0;
for (int i = 0; i < initialWeights.length; i++) {
if (initialWeights[i] > Transducer.IMPOSSIBLE_WEIGHT)
ret += Math.abs(initialWeights[i]);
if (finalWeights[i] > Transducer.IMPOSSIBLE_WEIGHT)
ret += Math.abs(finalWeights[i]);
}
for (int i = 0; i < weights.length; i++) {
ret += Math.abs(defaultWeights[i]);
int nl = weights[i].numLocations();
for (int j = 0; j < nl; j++)
ret += Math.abs(weights[i].valueAtLocation(j));
}
return ret;
}
public class WeightedIncrementor implements Transducer.Incrementor {
double instanceWeight = 1.0;
public WeightedIncrementor (double instanceWeight) {
this.instanceWeight = instanceWeight;
}
public void incrementFinalState(Transducer.State s, double count) {
finalWeights[s.getIndex()] += count * instanceWeight;
}
public void incrementInitialState(Transducer.State s, double count) {
initialWeights[s.getIndex()] += count * instanceWeight;
}
public void incrementTransition(Transducer.TransitionIterator ti, double count) {
int index = ti.getIndex();
CRF.State source = (CRF.State)ti.getSourceState();
int nwi = source.weightsIndices[index].length;
int weightsIndex;
count *= instanceWeight;
for (int wi = 0; wi < nwi; wi++) {
weightsIndex = source.weightsIndices[index][wi];
// For frozen weights, don't even gather their sufficient statistics; this is how we ensure that the gradient for these will be zero
if (weightsFrozen[weightsIndex]) continue;
// TODO Should we also obey FeatureSelection here? No need; it is enforced by the creation of the weights.
weights[weightsIndex].plusEqualsSparse ((FeatureVector)ti.getInput(), count);
defaultWeights[weightsIndex] += count;
}
}
}
public void getParameters (double[] buffer)
{
if (buffer.length != getNumFactors ())
throw new IllegalArgumentException ("Expected size of buffer: " + getNumFactors() + ", actual size: " + buffer.length);
int pi = 0;
for (int i = 0; i < initialWeights.length; i++) {
buffer[pi++] = initialWeights[i];
buffer[pi++] = finalWeights[i];
}
for (int i = 0; i < weights.length; i++) {
buffer[pi++] = defaultWeights[i];
int nl = weights[i].numLocations();
for (int j = 0; j < nl; j++)
buffer[pi++] = weights[i].valueAtLocation(j);
}
}
public double getParameter (int index) {
int numStateParms = 2 * initialWeights.length;
if (index < numStateParms) {
if (index % 2 == 0)
return initialWeights[index/2];
return finalWeights[index/2];
}
index -= numStateParms;
for (int i = 0; i < weights.length; i++) {
if (index == 0)
return this.defaultWeights[i];
index--;
if (index < weights[i].numLocations())
return weights[i].valueAtLocation (index);
index -= weights[i].numLocations();
}
throw new IllegalArgumentException ("index too high = "+index);
}
public void setParameters (double [] buff) {
assert (buff.length == getNumFactors());
int pi = 0;
for (int i = 0; i < initialWeights.length; i++) {
initialWeights[i] = buff[pi++];
finalWeights[i] = buff[pi++];
}
for (int i = 0; i < weights.length; i++) {
this.defaultWeights[i] = buff[pi++];
int nl = weights[i].numLocations();
for (int j = 0; j < nl; j++)
weights[i].setValueAtLocation (j, buff[pi++]);
}
}
public void setParameter (int index, double value) {
int numStateParms = 2 * initialWeights.length;
if (index < numStateParms) {
if (index % 2 == 0)
initialWeights[index/2] = value;
else
finalWeights[index/2] = value;
} else {
index -= numStateParms;
for (int i = 0; i < weights.length; i++) {
if (index == 0) {
defaultWeights[i] = value;
return;
}
index--;
if (index < weights[i].numLocations()) {
weights[i].setValueAtLocation (index, value);
return;
} else {
index -= weights[i].numLocations();
}
}
throw new IllegalArgumentException ("index too high = "+index);
}
}
// gsc: Serialization for Factors
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (weightAlphabet);
out.writeObject (weights);
out.writeObject (defaultWeights);
out.writeObject (weightsFrozen);
out.writeObject (initialWeights);
out.writeObject (finalWeights);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
weightAlphabet = (Alphabet) in.readObject ();
weights = (SparseVector[]) in.readObject ();
defaultWeights = (double[]) in.readObject ();
weightsFrozen = (boolean[]) in.readObject ();
initialWeights = (double[]) in.readObject ();
finalWeights = (double[]) in.readObject ();
}
}
public CRF (Pipe inputPipe, Pipe outputPipe)
{
super (inputPipe, outputPipe);
this.inputAlphabet = inputPipe.getDataAlphabet();
this.outputAlphabet = inputPipe.getTargetAlphabet();
//inputAlphabet.stopGrowth();
}
public CRF (Alphabet inputAlphabet, Alphabet outputAlphabet)
{
super (new Noop(inputAlphabet, outputAlphabet), null);
inputAlphabet.stopGrowth();
logger.info ("CRF input dictionary size = "+inputAlphabet.size());
//xxx outputAlphabet.stopGrowth();
this.inputAlphabet = inputAlphabet;
this.outputAlphabet = outputAlphabet;
}
/** Create a CRF whose states and weights are a copy of those from another CRF. */
public CRF (CRF other)
{
// This assumes that "other" has non-null inputPipe and outputPipe. We'd need to add another constructor to handle this if not.
this (other.getInputPipe (), other.getOutputPipe ());
copyStatesAndWeightsFrom (other);
assertWeightsLength ();
}
private void copyStatesAndWeightsFrom (CRF initialCRF)
{
this.parameters = new Factors (initialCRF.parameters, true); // This will copy all the transition weights
this.parameters.weightAlphabet = (Alphabet) initialCRF.parameters.weightAlphabet.clone();
//weightAlphabet = (Alphabet) initialCRF.weightAlphabet.clone ();
//weights = new SparseVector [initialCRF.weights.length];
states.clear ();
// Clear these, because they will be filled by this.addState()
this.parameters.initialWeights = new double[0];
this.parameters.finalWeights = new double[0];
for (int i = 0; i < initialCRF.states.size(); i++) {
State s = (State) initialCRF.getState (i);
String[][] weightNames = new String[s.weightsIndices.length][];
for (int j = 0; j < weightNames.length; j++) {
int[] thisW = s.weightsIndices[j];
weightNames[j] = (String[]) initialCRF.parameters.weightAlphabet.lookupObjects(thisW, new String [s.weightsIndices[j].length]);
}
addState (s.name, initialCRF.parameters.initialWeights[i], initialCRF.parameters.finalWeights[i],
s.destinationNames, s.labels, weightNames);
}
featureSelections = initialCRF.featureSelections.clone ();
// yyy weightsFrozen = (boolean[]) initialCRF.weightsFrozen.clone();
}
public Alphabet getInputAlphabet () { return inputAlphabet; }
public Alphabet getOutputAlphabet () { return outputAlphabet; }
/** This method should be called whenever the CRFs weights (parameters) have their structure/arity/number changed. */
public void weightsStructureChanged () {
weightsStructureChangeStamp++;
weightsValueChangeStamp++;
}
/** This method should be called whenever the CRFs weights (parameters) are changed. */
public void weightsValueChanged () {
weightsValueChangeStamp++;
}
// This method can be over-ridden in subclasses of CRF to return subclasses of CRF.State
protected CRF.State newState (String name, int index,
double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[][] weightNames,
CRF crf)
{
return new State (name, index, initialWeight, finalWeight,
destinationNames, labelNames, weightNames, crf);
}
public void addState (String name, double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[][] weightNames)
{
assert (weightNames.length == destinationNames.length);
assert (labelNames.length == destinationNames.length);
weightsStructureChanged();
if (name2state.get(name) != null)
throw new IllegalArgumentException ("State with name `"+name+"' already exists.");
parameters.initialWeights = MatrixOps.append(parameters.initialWeights, initialWeight);
parameters.finalWeights = MatrixOps.append(parameters.finalWeights, finalWeight);
State s = newState (name, states.size(), initialWeight, finalWeight,
destinationNames, labelNames, weightNames, this);
s.print ();
states.add (s);
if (initialWeight > IMPOSSIBLE_WEIGHT)
initialStates.add (s);
name2state.put (name, s);
}
public void addState (String name, double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[] weightNames)
{
String[][] newWeightNames = new String[weightNames.length][1];
for (int i = 0; i < weightNames.length; i++)
newWeightNames[i][0] = weightNames[i];
this.addState (name, initialWeight, finalWeight, destinationNames, labelNames, newWeightNames);
}
/** Default gives separate parameters to each transition. */
public void addState (String name, double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames)
{
assert (destinationNames.length == labelNames.length);
String[] weightNames = new String[labelNames.length];
for (int i = 0; i < labelNames.length; i++)
weightNames[i] = name + "->" + destinationNames[i] + ":" + labelNames[i];
this.addState (name, initialWeight, finalWeight, destinationNames, labelNames, weightNames);
}
/** Add a state with parameters equal zero, and labels on out-going arcs
the same name as their destination state names. */
public void addState (String name, String[] destinationNames)
{
this.addState (name, 0, 0, destinationNames, destinationNames);
}
/** Add a group of states that are fully connected with each other,
* with parameters equal zero, and labels on their out-going arcs
* the same name as their destination state names. */
public void addFullyConnectedStates (String[] stateNames)
{
for (int i = 0; i < stateNames.length; i++)
addState (stateNames[i], stateNames);
}
public void addFullyConnectedStatesForLabels ()
{
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
logger.info ("CRF: outputAlphabet.lookup class = "+
outputAlphabet.lookupObject(i).getClass().getName());
labels[i] = (String) outputAlphabet.lookupObject(i);
}
addFullyConnectedStates (labels);
}
public void addStartState ()
{
addStartState ("<START>");
}
public void addStartState (String name)
{
for (int i = 0; i < numStates (); i++)
parameters.initialWeights[i] = IMPOSSIBLE_WEIGHT;
String[] dests = new String [numStates()];
for (int i = 0; i < dests.length; i++)
dests[i] = getState(i).getName();
addState (name, 0, 0.0, dests, dests); // initialWeight of 0.0
}
public void setAsStartState (State state)
{
for (int i = 0; i < numStates(); i++) {
Transducer.State other = getState (i);
if (other == state) {
other.setInitialWeight (0);
} else {
other.setInitialWeight (IMPOSSIBLE_WEIGHT);
}
}
weightsValueChanged();
}
private boolean[][] labelConnectionsIn (InstanceList trainingSet)
{
return labelConnectionsIn (trainingSet, null);
}
private boolean[][] labelConnectionsIn (InstanceList trainingSet, String start)
{
int numLabels = outputAlphabet.size();
boolean[][] connections = new boolean[numLabels][numLabels];
for (int i = 0; i < trainingSet.size(); i++) {
Instance instance = trainingSet.get(i);
FeatureSequence output = (FeatureSequence) instance.getTarget();
for (int j = 1; j < output.size(); j++) {
int sourceIndex = outputAlphabet.lookupIndex (output.get(j-1));
int destIndex = outputAlphabet.lookupIndex (output.get(j));
assert (sourceIndex >= 0 && destIndex >= 0);
connections[sourceIndex][destIndex] = true;
}
}
// Handle start state
if (start != null) {
int startIndex = outputAlphabet.lookupIndex (start);
for (int j = 0; j < outputAlphabet.size(); j++) {
connections[startIndex][j] = true;
}
}
return connections;
}
/**
* Add states to create a first-order Markov model on labels, adding only
* those transitions the occur in the given trainingSet.
*/
public void addStatesForLabelsConnectedAsIn (InstanceList trainingSet)
{
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn (trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j]) numDestinations++;
String[] destinationNames = new String[numDestinations];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
destinationNames[destinationIndex++] = (String)outputAlphabet.lookupObject(j);
addState ((String)outputAlphabet.lookupObject(i), destinationNames);
}
}
/**
* Add as many states as there are labels, but don't create separate weights
* for each source-destination pair of states. Instead have all the incoming
* transitions to a state share the same weights.
*/
public void addStatesForHalfLabelsConnectedAsIn (InstanceList trainingSet)
{
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn (trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j]) numDestinations++;
String[] destinationNames = new String[numDestinations];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
destinationNames[destinationIndex++] = (String)outputAlphabet.lookupObject(j);
addState ((String)outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames, destinationNames);
}
}
/**
* Add as many states as there are labels, but don't create separate
* observational-test-weights for each source-destination pair of
* states---instead have all the incoming transitions to a state share the
* same observational-feature-test weights. However, do create separate
* default feature for each transition, (which acts as an HMM-style transition
* probability).
*/
public void addStatesForThreeQuarterLabelsConnectedAsIn (InstanceList trainingSet)
{
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn (trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j]) numDestinations++;
String[] destinationNames = new String[numDestinations];
String[][] weightNames = new String[numDestinations][];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j]) {
String labelName = (String)outputAlphabet.lookupObject(j);
destinationNames[destinationIndex] = labelName;
weightNames[destinationIndex] = new String[2];
// The "half-labels" will include all observed tests
weightNames[destinationIndex][0] = labelName;
// The "transition" weights will include only the default feature
String wn = (String)outputAlphabet.lookupObject(i) + "->" + (String)outputAlphabet.lookupObject(j);
weightNames[destinationIndex][1] = wn;
int wi = getWeightsIndex (wn);
// A new empty FeatureSelection won't allow any features here, so we only
// get the default feature for transitions
featureSelections[wi] = new FeatureSelection(trainingSet.getDataAlphabet());
destinationIndex++;
}
addState ((String)outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames, weightNames);
}
}
public void addFullyConnectedStatesForThreeQuarterLabels (InstanceList trainingSet)
{
int numLabels = outputAlphabet.size();
for (int i = 0; i < numLabels; i++) {
String[] destinationNames = new String[numLabels];
String[][] weightNames = new String[numLabels][];
for (int j = 0; j < numLabels; j++) {
String labelName = (String)outputAlphabet.lookupObject(j);
destinationNames[j] = labelName;
weightNames[j] = new String[2];
// The "half-labels" will include all observational tests
weightNames[j][0] = labelName;
// The "transition" weights will include only the default feature
String wn = (String)outputAlphabet.lookupObject(i) + "->" + (String)outputAlphabet.lookupObject(j);
weightNames[j][1] = wn;
int wi = getWeightsIndex (wn);
// A new empty FeatureSelection won't allow any features here, so we only
// get the default feature for transitions
featureSelections[wi] = new FeatureSelection(trainingSet.getDataAlphabet());
}
addState ((String)outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames, weightNames);
}
}
public void addFullyConnectedStatesForBiLabels ()
{
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
logger.info ("CRF: outputAlphabet.lookup class = "+
outputAlphabet.lookupObject(i).getClass().getName());
labels[i] = (String) outputAlphabet.lookupObject(i);
}
for (int i = 0; i < labels.length; i++) {
for (int j = 0; j < labels.length; j++) {
String[] destinationNames = new String[labels.length];
for (int k = 0; k < labels.length; k++)
destinationNames[k] = labels[j]+LABEL_SEPARATOR+labels[k];
addState (labels[i]+LABEL_SEPARATOR+labels[j], 0.0, 0.0,
destinationNames, labels);
}
}
}
/**
* Add states to create a second-order Markov model on labels, adding only
* those transitions the occur in the given trainingSet.
*/
public void addStatesForBiLabelsConnectedAsIn (InstanceList trainingSet)
{
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn (trainingSet);
for (int i = 0; i < numLabels; i++) {
for (int j = 0; j < numLabels; j++) {
if (!connections[i][j])
continue;
int numDestinations = 0;
for (int k = 0; k < numLabels; k++)
if (connections[j][k]) numDestinations++;
String[] destinationNames = new String[numDestinations];
String[] labels = new String[numDestinations];
int destinationIndex = 0;
for (int k = 0; k < numLabels; k++)
if (connections[j][k]) {
destinationNames[destinationIndex] =
(String)outputAlphabet.lookupObject(j)+LABEL_SEPARATOR+(String)outputAlphabet.lookupObject(k);
labels[destinationIndex] = (String)outputAlphabet.lookupObject(k);
destinationIndex++;
}
addState ((String)outputAlphabet.lookupObject(i)+LABEL_SEPARATOR+
(String)outputAlphabet.lookupObject(j), 0.0, 0.0,
destinationNames, labels);
}
}
}
public void addFullyConnectedStatesForTriLabels ()
{
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
logger.info ("CRF: outputAlphabet.lookup class = "+
outputAlphabet.lookupObject(i).getClass().getName());
labels[i] = (String) outputAlphabet.lookupObject(i);
}
for (int i = 0; i < labels.length; i++) {
for (int j = 0; j < labels.length; j++) {
for (int k = 0; k < labels.length; k++) {
String[] destinationNames = new String[labels.length];
for (int l = 0; l < labels.length; l++)
destinationNames[l] = labels[j]+LABEL_SEPARATOR+labels[k]+LABEL_SEPARATOR+labels[l];
addState (labels[i]+LABEL_SEPARATOR+labels[j]+LABEL_SEPARATOR+labels[k], 0.0, 0.0,
destinationNames, labels);
}
}
}
}
public void addSelfTransitioningStateForAllLabels (String name)
{
String[] labels = new String[outputAlphabet.size()];
String[] destinationNames = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
logger.info ("CRF: outputAlphabet.lookup class = "+
outputAlphabet.lookupObject(i).getClass().getName());
labels[i] = (String) outputAlphabet.lookupObject(i);
destinationNames[i] = name;
}
addState (name, 0.0, 0.0, destinationNames, labels);
}
private String concatLabels(String[] labels)
{
String sep = "";
StringBuffer buf = new StringBuffer();
for (int i = 0; i < labels.length; i++)
{
buf.append(sep).append(labels[i]);
sep = LABEL_SEPARATOR;
}
return buf.toString();
}
private String nextKGram(String[] history, int k, String next)
{
String sep = "";
StringBuffer buf = new StringBuffer();
int start = history.length + 1 - k;
for (int i = start; i < history.length; i++)
{
buf.append(sep).append(history[i]);
sep = LABEL_SEPARATOR;
}
buf.append(sep).append(next);
return buf.toString();
}
private boolean allowedTransition(String prev, String curr,
Pattern no, Pattern yes)
{
String pair = concatLabels(new String[]{prev, curr});
if (no != null && no.matcher(pair).matches())
return false;
if (yes != null && !yes.matcher(pair).matches())
return false;
return true;
}
private boolean allowedHistory(String[] history, Pattern no, Pattern yes) {
for (int i = 1; i < history.length; i++)
if (!allowedTransition(history[i-1], history[i], no, yes))
return false;
return true;
}
/**
* Assumes that the CRF's output alphabet contains
* <code>String</code>s. Creates an order-<em>n</em> CRF with input
* predicates and output labels given by <code>trainingSet</code>
* and order, connectivity, and weights given by the remaining
* arguments.
*
* @param trainingSet the training instances
* @param orders an array of increasing non-negative numbers giving
* the orders of the features for this CRF. The largest number
* <em>n</em> is the Markov order of the CRF. States are
* <em>n</em>-tuples of output labels. Each of the other numbers
* <em>k</em> in <code>orders</code> represents a weight set shared
* by all destination states whose last (most recent) <em>k</em>
* labels agree. If <code>orders</code> is <code>null</code>, an
* order-0 CRF is built.
* @param defaults If non-null, it must be the same length as
* <code>orders</code>, with <code>true</code> positions indicating
* that the weight set for the corresponding order contains only the
* weight for a default feature; otherwise, the weight set has
* weights for all features built from input predicates.
* @param start The label that represents the context of the start of
* a sequence. It may be also used for sequence labels. If no label of
* this name exists, one will be added. Connection wills be added between
* the start label and all other labels, even if <tt>fullyConnected</tt> is
* <tt>false</tt>. This argument may be null, in which case no special
* start state is added.
* @param forbidden If non-null, specifies what pairs of successive
* labels are not allowed, both for constructing <em>n</em>order
* states or for transitions. A label pair (<em>u</em>,<em>v</em>)
* is not allowed if <em>u</em> + "," + <em>v</em> matches
* <code>forbidden</code>.
* @param allowed If non-null, specifies what pairs of successive
* labels are allowed, both for constructing <em>n</em>order
* states or for transitions. A label pair (<em>u</em>,<em>v</em>)
* is allowed only if <em>u</em> + "," + <em>v</em> matches
* <code>allowed</code>.
* @param fullyConnected Whether to include all allowed transitions,
* even those not occurring in <code>trainingSet</code>,
* @return The name of the start state.
*
*/
public String addOrderNStates(InstanceList trainingSet, int[] orders,
boolean[] defaults, String start,
Pattern forbidden, Pattern allowed,
boolean fullyConnected)
{
boolean[][] connections = null;
if (start != null)
outputAlphabet.lookupIndex (start);
if (!fullyConnected)
connections = labelConnectionsIn (trainingSet, start);
int order = -1;
if (defaults != null && defaults.length != orders.length)
throw new IllegalArgumentException("Defaults must be null or match orders");
if (orders == null)
order = 0;
else
{
for (int i = 0; i < orders.length; i++) {
if (orders[i] <= order)
throw new IllegalArgumentException("Orders must be non-negative and in ascending order");
order = orders[i];
}
if (order < 0) order = 0;
}
if (order > 0)
{
int[] historyIndexes = new int[order];
String[] history = new String[order];
String label0 = (String)outputAlphabet.lookupObject(0);
for (int i = 0; i < order; i++)
history[i] = label0;
int numLabels = outputAlphabet.size();
while (historyIndexes[0] < numLabels)
{
logger.info("Preparing " + concatLabels(history));
if (allowedHistory(history, forbidden, allowed))
{
String stateName = concatLabels(history);
int nt = 0;
String[] destNames = new String[numLabels];
String[] labelNames = new String[numLabels];
String[][] weightNames = new String[numLabels][orders.length];
for (int nextIndex = 0; nextIndex < numLabels; nextIndex++)
{
String next = (String)outputAlphabet.lookupObject(nextIndex);
if (allowedTransition(history[order-1], next, forbidden, allowed)
&& (fullyConnected ||
connections[historyIndexes[order-1]][nextIndex]))
{
destNames[nt] = nextKGram(history, order, next);
labelNames[nt] = next;
for (int i = 0; i < orders.length; i++)
{
weightNames[nt][i] = nextKGram(history, orders[i]+1, next);
if (defaults != null && defaults[i]) {
int wi = getWeightsIndex (weightNames[nt][i]);
// Using empty feature selection gives us only the
// default features
featureSelections[wi] =
new FeatureSelection(trainingSet.getDataAlphabet());
}
}
nt++;
}
}
if (nt < numLabels)
{
String[] newDestNames = new String[nt];
String[] newLabelNames = new String[nt];
String[][] newWeightNames = new String[nt][];
for (int t = 0; t < nt; t++)
{
newDestNames[t] = destNames[t];
newLabelNames[t] = labelNames[t];
newWeightNames[t] = weightNames[t];
}
destNames = newDestNames;
labelNames = newLabelNames;
weightNames = newWeightNames;
}
for (int i = 0; i < destNames.length; i++)
{
StringBuffer b = new StringBuffer();
for (int j = 0; j < orders.length; j++)
b.append(" ").append(weightNames[i][j]);
logger.info(stateName + "->" + destNames[i] +
"(" + labelNames[i] + ")" + b.toString());
}
addState (stateName, 0.0, 0.0, destNames, labelNames, weightNames);
}
for (int o = order-1; o >= 0; o--)
if (++historyIndexes[o] < numLabels)
{
history[o] = (String)outputAlphabet.lookupObject(historyIndexes[o]);
break;
} else if (o > 0)
{
historyIndexes[o] = 0;
history[o] = label0;
}
}
for (int i = 0; i < order; i++)
history[i] = start;
return concatLabels(history);
}
String[] stateNames = new String[outputAlphabet.size()];
for (int s = 0; s < outputAlphabet.size(); s++)
stateNames[s] = (String)outputAlphabet.lookupObject(s);
for (int s = 0; s < outputAlphabet.size(); s++)
addState(stateNames[s], 0.0, 0.0, stateNames, stateNames, stateNames);
return start;
}
public State getState (String name)
{
return name2state.get(name);
}
public void setWeights (int weightsIndex, SparseVector transitionWeights)
{
weightsStructureChanged();
if (weightsIndex >= parameters.weights.length || weightsIndex < 0)
throw new IllegalArgumentException ("weightsIndex "+weightsIndex+" is out of bounds");
parameters.weights[weightsIndex] = transitionWeights;
}
public void setWeights (String weightName, SparseVector transitionWeights)
{
setWeights (getWeightsIndex (weightName), transitionWeights);
}
public String getWeightsName (int weightIndex)
{
return (String) parameters.weightAlphabet.lookupObject (weightIndex);
}
public SparseVector getWeights (String weightName)
{
return parameters.weights[getWeightsIndex (weightName)];
}
public SparseVector getWeights (int weightIndex)
{
return parameters.weights[weightIndex];
}
public double[] getDefaultWeights () {
return parameters.defaultWeights;
}
public SparseVector[] getWeights () {
return parameters.weights;
}
public void setWeights (SparseVector[] m) {
weightsStructureChanged();
parameters.weights = m;
}
public void setDefaultWeights (double[] w) {
weightsStructureChanged();
parameters.defaultWeights = w;
}
public void setDefaultWeight (int widx, double val) {
weightsValueChanged();
parameters.defaultWeights[widx] = val;
}
// Support for making cc.mallet.optimize.Optimizable CRFs
public boolean isWeightsFrozen (int weightsIndex)
{
return parameters.weightsFrozen [weightsIndex];
}
/**
* Freezes a set of weights to their current values.
* Frozen weights are used for labeling sequences (as in <tt>transduce</tt>),
* but are not be modified by the <tt>train</tt> methods.
* @param weightsIndex Index of weight set to freeze.
*/
public void freezeWeights (int weightsIndex)
{
parameters.weightsFrozen [weightsIndex] = true;
}
/**
* Freezes a set of weights to their current values.
* Frozen weights are used for labeling sequences (as in <tt>transduce</tt>),
* but are not be modified by the <tt>train</tt> methods.
* @param weightsName Name of weight set to freeze.
*/
public void freezeWeights (String weightsName)
{
int widx = getWeightsIndex (weightsName);
freezeWeights (widx);
}
/**
* Unfreezes a set of weights.
* Frozen weights are used for labeling sequences (as in <tt>transduce</tt>),
* but are not be modified by the <tt>train</tt> methods.
* @param weightsName Name of weight set to unfreeze.
*/
public void unfreezeWeights (String weightsName)
{
int widx = getWeightsIndex (weightsName);
parameters.weightsFrozen[widx] = false;
}
public void setFeatureSelection (int weightIdx, FeatureSelection fs)
{
featureSelections [weightIdx] = fs;
weightsStructureChanged(); // Is this necessary? -akm 11/2007
}
public void setWeightsDimensionAsIn (InstanceList trainingData) {
setWeightsDimensionAsIn(trainingData, false);
}
// gsc: changing this to consider the case when trainingData is a mix of labeled and unlabeled data,
// and we want to use the unlabeled data as well to set some weights (while using the unsupported trick)
// *note*: 'target' sequence of an unlabeled instance is either null or is of size zero.
public void setWeightsDimensionAsIn (InstanceList trainingData, boolean useSomeUnsupportedTrick)
{
final BitSet[] weightsPresent;
int numWeights = 0;
// The value doesn't actually change, because the "new" parameters will have zero value
// but the gradient changes because the parameters now have different layout.
weightsStructureChanged();
weightsPresent = new BitSet[parameters.weights.length];
for (int i = 0; i < parameters.weights.length; i++)
weightsPresent[i] = new BitSet();
// Put in the weights that are already there
for (int i = 0; i < parameters.weights.length; i++)
for (int j = parameters.weights[i].numLocations()-1; j >= 0; j--)
weightsPresent[i].set (parameters.weights[i].indexAtLocation(j));
// Put in the weights in the training set
for (int i = 0; i < trainingData.size(); i++) {
Instance instance = trainingData.get(i);
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
// gsc: trainingData can have unlabeled instances as well
if (output != null && output.size() > 0) {
// Do it for the paths consistent with the labels...
sumLatticeFactory.newSumLattice (this, input, output, new Transducer.Incrementor() {
public void incrementTransition (Transducer.TransitionIterator ti, double count) {
State source = (CRF.State)ti.getSourceState();
FeatureVector input = (FeatureVector)ti.getInput();
int index = ti.getIndex();
int nwi = source.weightsIndices[index].length;
for (int wi = 0; wi < nwi; wi++) {
int weightsIndex = source.weightsIndices[index][wi];
for (int i = 0; i < input.numLocations(); i++) {
int featureIndex = input.indexAtLocation(i);
if ((globalFeatureSelection == null || globalFeatureSelection.contains(featureIndex))
&& (featureSelections == null
|| featureSelections[weightsIndex] == null
|| featureSelections[weightsIndex].contains(featureIndex)))
weightsPresent[weightsIndex].set (featureIndex);
}
}
}
public void incrementInitialState (Transducer.State s, double count) { }
public void incrementFinalState (Transducer.State s, double count) { }
});
}
// ...and also do it for the paths selected by the current model (so we will get some negative weights)
if (useSomeUnsupportedTrick && this.getParametersAbsNorm() > 0) {
if (i == 0)
logger.info ("CRF: Incremental training detected. Adding weights for some unsupported features...");
// (do this once some training is done)
sumLatticeFactory.newSumLattice (this, input, null, new Transducer.Incrementor() {
public void incrementTransition (Transducer.TransitionIterator ti, double count) {
if (count < 0.2) // Only create features for transitions with probability above 0.2
return; // This 0.2 is somewhat arbitrary -akm
State source = (CRF.State)ti.getSourceState();
FeatureVector input = (FeatureVector)ti.getInput();
int index = ti.getIndex();
int nwi = source.weightsIndices[index].length;
for (int wi = 0; wi < nwi; wi++) {
int weightsIndex = source.weightsIndices[index][wi];
for (int i = 0; i < input.numLocations(); i++) {
int featureIndex = input.indexAtLocation(i);
if ((globalFeatureSelection == null || globalFeatureSelection.contains(featureIndex))
&& (featureSelections == null
|| featureSelections[weightsIndex] == null
|| featureSelections[weightsIndex].contains(featureIndex)))
weightsPresent[weightsIndex].set (featureIndex);
}
}
}
public void incrementInitialState (Transducer.State s, double count) { }
public void incrementFinalState (Transducer.State s, double count) { }
});
}
}
SparseVector[] newWeights = new SparseVector[parameters.weights.length];
for (int i = 0; i < parameters.weights.length; i++) {
int numLocations = weightsPresent[i].cardinality ();
logger.info ("CRF weights["+parameters.weightAlphabet.lookupObject(i)+"] num features = "+numLocations);
int[] indices = new int[numLocations];
for (int j = 0; j < numLocations; j++) {
indices[j] = weightsPresent[i].nextSetBit (j == 0 ? 0 : indices[j-1]+1);
//System.out.println ("CRF4 has index "+indices[j]);
}
newWeights[i] = new IndexedSparseVector (indices, new double[numLocations],
numLocations, numLocations, false, false, false);
newWeights[i].plusEqualsSparse (parameters.weights[i]); // Put in the previous weights
numWeights += (numLocations + 1);
}
logger.info("Number of weights = "+numWeights);
parameters.weights = newWeights;
}
public void setWeightsDimensionDensely ()
{
weightsStructureChanged();
SparseVector[] newWeights = new SparseVector [parameters.weights.length];
int max = inputAlphabet.size();
int numWeights = 0;
logger.info ("CRF using dense weights, num input features = "+max);
for (int i = 0; i < parameters.weights.length; i++) {
int nfeatures;
if (featureSelections[i] == null) {
nfeatures = max;
newWeights [i] = new SparseVector (null, new double [max],
max, max, false, false, false);
} else {
// Respect the featureSelection
FeatureSelection fs = featureSelections[i];
nfeatures = fs.getBitSet ().cardinality ();
int[] idxs = new int [nfeatures];
int j = 0, thisIdx = -1;
while ((thisIdx = fs.nextSelectedIndex (thisIdx + 1)) >= 0) {
idxs[j++] = thisIdx;
}
newWeights[i] = new SparseVector (idxs, new double [nfeatures], nfeatures, nfeatures, false, false, false);
}
newWeights [i].plusEqualsSparse (parameters.weights [i]);
numWeights += (nfeatures + 1);
}
logger.info("Number of weights = "+numWeights);
parameters.weights = newWeights;
}
// Create a new weight Vector if weightName is new.
public int getWeightsIndex (String weightName)
{
int wi = parameters.weightAlphabet.lookupIndex (weightName);
if (wi == -1)
throw new IllegalArgumentException ("Alphabet frozen, and no weight with name "+ weightName);
if (parameters.weights == null) {
assert (wi == 0);
parameters.weights = new SparseVector[1];
parameters.defaultWeights = new double[1];
featureSelections = new FeatureSelection[1];
parameters.weightsFrozen = new boolean [1];
// Use initial capacity of 8
parameters.weights[0] = new IndexedSparseVector ();
parameters.defaultWeights[0] = 0;
featureSelections[0] = null;
weightsStructureChanged();
} else if (wi == parameters.weights.length) {
SparseVector[] newWeights = new SparseVector[parameters.weights.length+1];
double[] newDefaultWeights = new double[parameters.weights.length+1];
FeatureSelection[] newFeatureSelections = new FeatureSelection[parameters.weights.length+1];
for (int i = 0; i < parameters.weights.length; i++) {
newWeights[i] = parameters.weights[i];
newDefaultWeights[i] = parameters.defaultWeights[i];
newFeatureSelections[i] = featureSelections[i];
}
newWeights[wi] = new IndexedSparseVector ();
newDefaultWeights[wi] = 0;
newFeatureSelections[wi] = null;
parameters.weights = newWeights;
parameters.defaultWeights = newDefaultWeights;
featureSelections = newFeatureSelections;
parameters.weightsFrozen = ArrayUtils.append (parameters.weightsFrozen, false);
weightsStructureChanged();
}
//setTrainable (false);
return wi;
}
private void assertWeightsLength ()
{
if (parameters.weights != null) {
assert parameters.defaultWeights != null;
assert featureSelections != null;
assert parameters.weightsFrozen != null;
int n = parameters.weights.length;
assert parameters.defaultWeights.length == n;
assert featureSelections.length == n;
assert parameters.weightsFrozen.length == n;
}
}
public int numStates () { return states.size(); }
public Transducer.State getState (int index) {
return states.get(index); }
public Iterator initialStateIterator () {
return initialStates.iterator (); }
public boolean isTrainable () { return true; }
// gsc: accessor methods
public int getWeightsValueChangeStamp() {
return weightsValueChangeStamp;
}
// kedar: access structure stamp method
public int getWeightsStructureChangeStamp() {
return weightsStructureChangeStamp;
}
public Factors getParameters ()
{
return parameters;
}
// gsc
public double getParametersAbsNorm ()
{
double ret = 0;
for (int i = 0; i < numStates(); i++) {
ret += Math.abs (parameters.initialWeights[i]);
ret += Math.abs (parameters.finalWeights[i]);
}
for (int i = 0; i < parameters.weights.length; i++) {
ret += Math.abs (parameters.defaultWeights[i]);
ret += parameters.weights[i].absNorm();
}
return ret;
}
/** Only sets the parameter from the first group of parameters. */
public void setParameter (int sourceStateIndex, int destStateIndex, int featureIndex, double value)
{
setParameter(sourceStateIndex, destStateIndex, featureIndex, 0, value);
}
public void setParameter (int sourceStateIndex, int destStateIndex, int featureIndex, int weightIndex, double value)
{
weightsValueChanged();
State source = (State)getState(sourceStateIndex);
State dest = (State) getState(destStateIndex);
int rowIndex;
for (rowIndex = 0; rowIndex < source.destinationNames.length; rowIndex++)
if (source.destinationNames[rowIndex].equals (dest.name))
break;
if (rowIndex == source.destinationNames.length)
throw new IllegalArgumentException ("No transtition from state "+sourceStateIndex+" to state "+destStateIndex+".");
int weightsIndex = source.weightsIndices[rowIndex][weightIndex];
if (featureIndex < 0)
parameters.defaultWeights[weightsIndex] = value;
else {
parameters.weights[weightsIndex].setValue (featureIndex, value);
}
}
/** Only gets the parameter from the first group of parameters. */
public double getParameter (int sourceStateIndex, int destStateIndex, int featureIndex)
{
return getParameter(sourceStateIndex,destStateIndex,featureIndex,0);
}
public double getParameter (int sourceStateIndex, int destStateIndex, int featureIndex, int weightIndex)
{
State source = (State)getState(sourceStateIndex);
State dest = (State) getState(destStateIndex);
int rowIndex;
for (rowIndex = 0; rowIndex < source.destinationNames.length; rowIndex++)
if (source.destinationNames[rowIndex].equals (dest.name))
break;
if (rowIndex == source.destinationNames.length)
throw new IllegalArgumentException ("No transtition from state "+sourceStateIndex+" to state "+destStateIndex+".");
int weightsIndex = source.weightsIndices[rowIndex][weightIndex];
if (featureIndex < 0)
return parameters.defaultWeights[weightsIndex];
return parameters.weights[weightsIndex].value (featureIndex);
}
public int getNumParameters () {
if (cachedNumParametersStamp != weightsStructureChangeStamp) {
this.numParameters = 2 * this.numStates() + this.parameters.defaultWeights.length;
for (int i = 0; i < parameters.weights.length; i++)
numParameters += parameters.weights[i].numLocations();
}
return this.numParameters;
}
/** This method is deprecated. */
// But it is here as a reminder to do something about induceFeaturesFor(). */
@Deprecated
public Sequence[] predict (InstanceList testing) {
testing.setFeatureSelection(this.globalFeatureSelection);
for (int i = 0; i < featureInducers.size(); i++) {
FeatureInducer klfi = (FeatureInducer)featureInducers.get(i);
klfi.induceFeaturesFor (testing, false, false);
}
Sequence[] ret = new Sequence[testing.size()];
for (int i = 0; i < testing.size(); i++) {
Instance instance = testing.get(i);
Sequence input = (Sequence) instance.getData();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = new MaxLatticeDefault(this, input).bestOutputSequence();
assert (predOutput.size() == trueOutput.size());
ret[i] = predOutput;
}
return ret;
}
/** This method is deprecated. */
@Deprecated
public void evaluate (TransducerEvaluator eval, InstanceList testing) {
throw new IllegalStateException ("This method is no longer usable. Use CRF.induceFeaturesFor() instead.");
/*
testing.setFeatureSelection(this.globalFeatureSelection);
for (int i = 0; i < featureInducers.size(); i++) {
FeatureInducer klfi = (FeatureInducer)featureInducers.get(i);
klfi.induceFeaturesFor (testing, false, false);
}
eval.evaluate (this, true, 0, true, 0.0, null, null, testing);
*/
}
/** When the CRF has done feature induction, these new feature conjunctions must be
* created in the test or validation data in order for them to take effect. */
public void induceFeaturesFor (InstanceList instances) {
instances.setFeatureSelection(this.globalFeatureSelection);
for (int i = 0; i < featureInducers.size(); i++) {
FeatureInducer klfi = featureInducers.get(i);
klfi.induceFeaturesFor (instances, false, false);
}
}
// TODO Put support to Optimizable here, including getValue(InstanceList)??
public void print ()
{
print (new PrintWriter (new OutputStreamWriter (System.out), true));
}
public void print (PrintWriter out)
{
out.println ("*** CRF STATES ***");
for (int i = 0; i < numStates (); i++) {
State s = (State) getState (i);
out.print ("STATE NAME=\"");
out.print (s.name); out.print ("\" ("); out.print (s.destinations.length); out.print (" outgoing transitions)\n");
out.print (" "); out.print ("initialWeight = "); out.print (parameters.initialWeights[i]); out.print ('\n');
out.print (" "); out.print ("finalWeight = "); out.print (parameters.finalWeights[i]); out.print ('\n');
out.println (" transitions:");
for (int j = 0; j < s.destinations.length; j++) {
out.print (" "); out.print (s.name); out.print (" -> "); out.println (s.getDestinationState (j).getName ());
for (int k = 0; k < s.weightsIndices[j].length; k++) {
out.print (" WEIGHTS = \"");
int widx = s.weightsIndices[j][k];
out.print (parameters.weightAlphabet.lookupObject (widx).toString ());
out.print ("\"\n");
}
}
out.println ();
}
if (parameters.weights == null)
out.println ("\n\n*** NO WEIGHTS ***");
else {
out.println ("\n\n*** CRF WEIGHTS ***");
for (int widx = 0; widx < parameters.weights.length; widx++) {
out.println ("WEIGHTS NAME = " + parameters.weightAlphabet.lookupObject (widx));
out.print (": <DEFAULT_FEATURE> = "); out.print (parameters.defaultWeights[widx]); out.print ('\n');
SparseVector transitionWeights = parameters.weights[widx];
if (transitionWeights.numLocations () == 0)
continue;
RankedFeatureVector rfv = new RankedFeatureVector (inputAlphabet, transitionWeights);
for (int m = 0; m < rfv.numLocations (); m++) {
double v = rfv.getValueAtRank (m);
//int index = rfv.indexAtLocation (rfv.getIndexAtRank (m)); // This doesn't make any sense. How did this ever work? -akm 12/2007
int index = rfv.getIndexAtRank (m);
Object feature = inputAlphabet.lookupObject (index);
if (v != 0) {
out.print (": "); out.print (feature); out.print (" = "); out.println (v);
}
}
}
}
out.flush ();
}
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
// gsc: Serialization for CRF class
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (inputAlphabet);
out.writeObject (outputAlphabet);
out.writeObject (states);
out.writeObject (initialStates);
out.writeObject (name2state);
out.writeObject (parameters);
out.writeObject (globalFeatureSelection);
out.writeObject (featureSelections);
out.writeObject (featureInducers);
out.writeInt (weightsValueChangeStamp);
out.writeInt (weightsStructureChangeStamp);
out.writeInt (cachedNumParametersStamp);
out.writeInt (numParameters);
}
@SuppressWarnings("unchecked")
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
inputAlphabet = (Alphabet) in.readObject ();
outputAlphabet = (Alphabet) in.readObject ();
states = (ArrayList<State>) in.readObject ();
initialStates = (ArrayList<State>) in.readObject ();
name2state = (HashMap) in.readObject ();
parameters = (Factors) in.readObject ();
globalFeatureSelection = (FeatureSelection) in.readObject ();
featureSelections = (FeatureSelection[]) in.readObject ();
featureInducers = (ArrayList<FeatureInducer>) in.readObject ();
weightsValueChangeStamp = in.readInt ();
weightsStructureChangeStamp = in.readInt ();
cachedNumParametersStamp = in.readInt ();
numParameters = in.readInt ();
}
// Why is this "static"? Couldn't it be a non-static inner class? (In Transducer also) -akm 12/2007
public static class State extends Transducer.State implements Serializable
{
// Parameters indexed by destination state, feature index
String name;
int index;
String[] destinationNames;
State[] destinations; // N.B. elements are null until getDestinationState(int) is called
int[][] weightsIndices; // contains indices into CRF.weights[],
String[] labels;
CRF crf;
// No arg constructor so serialization works
protected State() {
super ();
}
protected State (String name, int index,
double initialWeight, double finalWeight,
String[] destinationNames,
String[] labelNames,
String[][] weightNames,
CRF crf)
{
super ();
assert (destinationNames.length == labelNames.length);
assert (destinationNames.length == weightNames.length);
this.name = name;
this.index = index;
// Note: setting these parameters here is actually redundant; they were set already in CRF.addState(...)
// I'm considering removing initialWeight and finalWeight as arguments to this constructor, but need to think more -akm 12/2007
// If CRF.State were non-static, then this constructor could add the state to the list of states, and put it in the name2state also.
crf.parameters.initialWeights[index] = initialWeight;
crf.parameters.finalWeights[index] = finalWeight;
this.destinationNames = new String[destinationNames.length];
this.destinations = new State[labelNames.length];
this.weightsIndices = new int[labelNames.length][];
this.labels = new String[labelNames.length];
this.crf = crf;
for (int i = 0; i < labelNames.length; i++) {
// Make sure this label appears in our output Alphabet
crf.outputAlphabet.lookupIndex (labelNames[i]);
this.destinationNames[i] = destinationNames[i];
this.labels[i] = labelNames[i];
this.weightsIndices[i] = new int[weightNames[i].length];
for (int j = 0; j < weightNames[i].length; j++)
this.weightsIndices[i][j] = crf.getWeightsIndex (weightNames[i][j]);
}
crf.weightsStructureChanged();
}
public Transducer getTransducer () { return crf; }
public double getInitialWeight () { return crf.parameters.initialWeights[index]; }
public void setInitialWeight (double c) { crf.parameters.initialWeights[index]= c; }
public double getFinalWeight () { return crf.parameters.finalWeights[index]; }
public void setFinalWeight (double c) { crf.parameters.finalWeights[index] = c; }
public void print ()
{
System.out.println ("State #"+index+" \""+name+"\"");
System.out.println ("initialWeight="+crf.parameters.initialWeights[index]+", finalWeight="+crf.parameters.finalWeights[index]);
System.out.println ("#destinations="+destinations.length);
for (int i = 0; i < destinations.length; i++)
System.out.println ("-> "+destinationNames[i]);
}
public int numDestinations () { return destinations.length;}
public String[] getWeightNames (int index) {
int[] indices = this.weightsIndices[index];
String[] ret = new String[indices.length];
for (int i=0; i < ret.length; i++)
ret[i] = crf.parameters.weightAlphabet.lookupObject(indices[i]).toString();
return ret;
}
public void addWeight (int didx, String weightName) {
int widx = crf.getWeightsIndex (weightName);
weightsIndices[didx] = ArrayUtils.append (weightsIndices[didx], widx);
}
public String getLabelName (int index) {
return labels [index];
}
public State getDestinationState (int index)
{
State ret;
if ((ret = destinations[index]) == null) {
ret = destinations[index] = crf.name2state.get (destinationNames[index]);
if (ret == null)
throw new IllegalArgumentException ("this.name="+this.name+" index="+index+" destinationNames[index]="+destinationNames[index]+" name2state.size()="+ crf.name2state.size());
}
return ret;
}
public Transducer.TransitionIterator transitionIterator (Sequence inputSequence, int inputPosition,
Sequence outputSequence, int outputPosition)
{
if (inputPosition < 0 || outputPosition < 0)
throw new UnsupportedOperationException ("Epsilon transitions not implemented.");
if (inputSequence == null)
throw new UnsupportedOperationException ("CRFs are not generative models; must have an input sequence.");
return new TransitionIterator (this, (FeatureVectorSequence)inputSequence, inputPosition,
(outputSequence == null ? null : (String)outputSequence.get(outputPosition)), crf);
}
public Transducer.TransitionIterator transitionIterator (FeatureVector fv, String output)
{
return new TransitionIterator (this, fv, output, crf);
}
public String getName () { return name; }
// "final" to make it efficient inside incrementTransition
public final int getIndex () { return index; }
// Serialization
// For class State
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(name);
out.writeInt(index);
out.writeObject(destinationNames);
out.writeObject(destinations);
out.writeObject(weightsIndices);
out.writeObject(labels);
out.writeObject(crf);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
name = (String) in.readObject();
index = in.readInt();
destinationNames = (String[]) in.readObject();
destinations = (CRF.State[]) in.readObject();
weightsIndices = (int[][]) in.readObject();
labels = (String[]) in.readObject();
crf = (CRF) in.readObject();
}
}
protected static class TransitionIterator extends Transducer.TransitionIterator implements Serializable
{
State source;
int index, nextIndex;
protected double[] weights;
FeatureVector input;
CRF crf;
public TransitionIterator (State source,
FeatureVectorSequence inputSeq,
int inputPosition,
String output, CRF crf)
{
this (source, inputSeq.get(inputPosition), output, crf);
}
protected TransitionIterator (State source,
FeatureVector fv,
String output, CRF crf)
{
this.source = source;
this.crf = crf;
this.input = fv;
this.weights = new double[source.destinations.length];
int nwi, swi;
for (int transIndex = 0; transIndex < source.destinations.length; transIndex++) {
// xxx Or do we want output.equals(...) here?
if (output == null || output.equals(source.labels[transIndex])) {
// Here is the dot product of the feature weights with the lambda weights
// for one transition
weights[transIndex] = 0;
nwi = source.weightsIndices[transIndex].length;
for (int wi = 0; wi < nwi; wi++) {
swi = source.weightsIndices[transIndex][wi];
weights[transIndex] += (crf.parameters.weights[swi].dotProduct (fv)
// include with implicit weight 1.0 the default feature
+ crf.parameters.defaultWeights[swi]);
}
assert (!Double.isNaN(weights[transIndex]));
assert (weights[transIndex] != Double.POSITIVE_INFINITY);
}
else
weights[transIndex] = IMPOSSIBLE_WEIGHT;
}
// Prepare nextIndex, pointing at the next non-impossible transition
nextIndex = 0;
while (nextIndex < source.destinations.length && weights[nextIndex] == IMPOSSIBLE_WEIGHT)
nextIndex++;
}
public boolean hasNext () { return nextIndex < source.destinations.length; }
public Transducer.State nextState ()
{
assert (nextIndex < source.destinations.length);
index = nextIndex;
nextIndex++;
while (nextIndex < source.destinations.length && weights[nextIndex] == IMPOSSIBLE_WEIGHT)
nextIndex++;
return source.getDestinationState (index);
}
// These "final"s are just to try to make this more efficient. Perhaps some of them will have to go away
public final int getIndex () { return index; }
public final Object getInput () { return input; }
public final Object getOutput () { return source.labels[index]; }
public final double getWeight () { return weights[index]; }
public final Transducer.State getSourceState () { return source; }
public final Transducer.State getDestinationState () { return source.getDestinationState (index); }
// Serialization
// TransitionIterator
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (source);
out.writeInt (index);
out.writeInt (nextIndex);
out.writeObject(weights);
out.writeObject (input);
out.writeObject(crf);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
source = (State) in.readObject();
index = in.readInt ();
nextIndex = in.readInt ();
weights = (double[]) in.readObject();
input = (FeatureVector) in.readObject();
crf = (CRF) in.readObject();
}
public String describeTransition (double cutoff)
{
DecimalFormat f = new DecimalFormat ("0.###");
StringBuffer buf = new StringBuffer ();
buf.append ("Value: " + f.format (-getWeight ()) + " <br />\n");
try {
int[] theseWeights = source.weightsIndices[index];
for (int i = 0; i < theseWeights.length; i++) {
int wi = theseWeights[i];
SparseVector w = crf.parameters.weights[wi];
buf.append ("WEIGHTS <br />\n" + crf.parameters.weightAlphabet.lookupObject (wi) + "<br />\n");
buf.append (" d.p. = "+f.format (w.dotProduct (input))+"<br />\n");
double[] vals = new double[input.numLocations ()];
double[] absVals = new double[input.numLocations ()];
for (int k = 0; k < vals.length; k++) {
int index = input.indexAtLocation (k);
vals[k] = w.value (index) * input.value (index);
absVals[k] = Math.abs (vals[k]);
}
buf.append ("DEFAULT " + f.format (crf.parameters.defaultWeights[wi]) + "<br />\n");
RankedFeatureVector rfv = new RankedFeatureVector (crf.inputAlphabet, input.getIndices (), absVals);
for (int rank = 0; rank < absVals.length; rank++) {
int fidx = rfv.getIndexAtRank (rank);
Object fname = crf.inputAlphabet.lookupObject (input.indexAtLocation (fidx));
if (absVals[fidx] < cutoff) break; // Break looping over features
if (vals[fidx] != 0) {
buf.append (fname + " " + f.format (vals[fidx]) + "<br />\n");
}
}
}
} catch (Exception e) {
System.err.println ("Error writing transition descriptions.");
e.printStackTrace ();
buf.append ("ERROR WHILE WRITING OUTPUT...\n");
}
return buf.toString ();
}
}
}
| 76,573 | 37.949135 | 270 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SumLatticeConstrained.java
|
package cc.mallet.fst;
import java.util.logging.Level;
import java.util.logging.Logger;
import cc.mallet.fst.SumLatticeDefault.LatticeNode;
import cc.mallet.fst.Transducer.State;
import cc.mallet.fst.Transducer.TransitionIterator;
import cc.mallet.types.DenseVector;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelVector;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Sequence;
import cc.mallet.util.MalletLogger;
public class SumLatticeConstrained extends SumLatticeDefault {
private static Logger logger = MalletLogger.getLogger(SumLatticeConstrained.class.getName());
public SumLatticeConstrained (Transducer t, Sequence input, Sequence output, Segment requiredSegment, Sequence constrainedSequence) {
this (t, input, output, (Transducer.Incrementor)null, null, makeConstraints(t, input, output, requiredSegment, constrainedSequence));
}
private static int[] makeConstraints (Transducer t, Sequence inputSequence, Sequence outputSequence, Segment requiredSegment, Sequence constrainedSequence) {
if (constrainedSequence.size () != inputSequence.size ())
throw new IllegalArgumentException ("constrainedSequence.size [" + constrainedSequence.size () + "] != inputSequence.size [" + inputSequence.size () + "]");
// constraints tells the lattice which states must emit which
// observations. positive values say all paths must pass through
// this state index, negative values say all paths must _not_
// pass through this state index. 0 means we don't
// care. initialize to 0. include 1 extra node for start state.
int [] constraints = new int [constrainedSequence.size() + 1];
for (int c = 0; c < constraints.length; c++)
constraints[c] = 0;
for (int i=requiredSegment.getStart (); i <= requiredSegment.getEnd(); i++) {
int si = t.stateIndexOfString ((String)constrainedSequence.get (i));
if (si == -1)
logger.warning ("Could not find state " + constrainedSequence.get (i) + ". Check that state labels match startTages and inTags, and that all labels are seen in training data.");
// throw new IllegalArgumentException ("Could not find state " + constrainedSequence.get(i) + ". Check that state labels match startTags and InTags.");
constraints[i+1] = si + 1;
}
// set additional negative constraint to ensure state after
// segment is not a continue tag
// xxx if segment length=1, this actually constrains the sequence
// to B-tag (B-tag)', instead of the intended constraint of B-tag
// (I-tag)'
// the fix below is unsafe, but will have to do for now.
// FIXED BELOW
/* String endTag = (String) constrainedSequence.get (requiredSegment.getEnd ());
if (requiredSegment.getEnd()+2 < constraints.length) {
if (requiredSegment.getStart() == requiredSegment.getEnd()) { // segment has length 1
if (endTag.startsWith ("B-")) {
endTag = "I" + endTag.substring (1, endTag.length());
}
else if (!(endTag.startsWith ("I-") || endTag.startsWith ("0")))
throw new IllegalArgumentException ("Constrained Lattice requires that states are tagged in B-I-O format.");
}
int statei = stateIndexOfString (endTag);
if (statei == -1) // no I- tag for this B- tag
statei = stateIndexOfString ((String)constrainedSequence.get (requiredSegment.getStart ()));
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
*/
if (requiredSegment.getEnd() + 2 < constraints.length) { // if
String endTag = requiredSegment.getInTag().toString();
int statei = t.stateIndexOfString (endTag);
if (statei == -1)
throw new IllegalArgumentException ("Could not find state " + endTag + ". Check that state labels match startTags and InTags.");
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
// printStates ();
logger.fine ("Segment:\n" + requiredSegment.sequenceToString () +
"\nconstrainedSequence:\n" + constrainedSequence +
"\nConstraints:\n");
for (int i=0; i < constraints.length; i++) {
logger.fine (constraints[i] + "\t");
}
logger.fine ("");
return constraints;
}
// culotta: constructor for constrained lattice
/** Create a lattice that constrains its transitions such that the
* <position,label> pairs in "constraints" are adhered
* to. constraints is an array where each entry is the index of
* the required label at that position. An entry of 0 means there
* are no constraints on that <position, label>. Positive values
* mean the path must pass through that state. Negative values
* mean the path must _not_ pass through that state. NOTE -
* constraints.length must be equal to output.size() + 1. A
* lattice has one extra position for the initial
* state. Generally, this should be unconstrained, since it does
* not produce an observation.
*/
public SumLatticeConstrained (Transducer trans, Sequence input, Sequence output, Transducer.Incrementor incrementor, LabelAlphabet outputAlphabet, int [] constraints)
{
if (false && logger.isLoggable (Level.FINE)) {
logger.fine ("Starting Lattice");
logger.fine ("Input: ");
for (int ip = 0; ip < input.size(); ip++)
logger.fine (" " + input.get(ip));
logger.fine ("\nOutput: ");
if (output == null)
logger.fine ("null");
else
for (int op = 0; op < output.size(); op++)
logger.fine (" " + output.get(op));
logger.fine ("\n");
}
// Initialize some structures
this.t = trans;
this.input = input;
this.output = output;
// xxx Not very efficient when the lattice is actually sparse,
// especially when the number of states is large and the
// sequence is long.
latticeLength = input.size()+1;
int numStates = t.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// xxx Yipes, this could get big; something sparse might be better?
gammas = new double[latticeLength][numStates];
// xxx Move this to an ivar, so we can save it? But for what?
// Commenting this out, because it's a memory hog and not used right now.
// Uncomment and conditionalize under a flag if ever needed. -cas
// double xis[][][] = new double[latticeLength][numStates][numStates];
double outputCounts[][] = null;
if (outputAlphabet != null)
outputCounts = new double[latticeLength][outputAlphabet.size()];
for (int i = 0; i < numStates; i++) {
for (int ip = 0; ip < latticeLength; ip++)
gammas[ip][i] = Transducer.IMPOSSIBLE_WEIGHT;
/* Commenting out xis -cas
for (int j = 0; j < numStates; j++)
for (int ip = 0; ip < latticeLength; ip++)
xis[ip][i][j] = IMPOSSIBLE_WEIGHT;
*/
}
// Forward pass
logger.fine ("Starting Constrained Foward pass");
// ensure that at least one state has initial weight greater than -Infinity
// so we can start from there
boolean atLeastOneInitialState = false;
for (int i = 0; i < numStates; i++) {
double initialWeight = t.getState(i).getInitialWeight();
//System.out.println ("Forward pass initialWeight = "+initialWeight);
if (initialWeight > Transducer.IMPOSSIBLE_WEIGHT) {
getLatticeNode(0, i).alpha = initialWeight;
//System.out.println ("nodes[0][i].alpha="+nodes[0][i].alpha);
atLeastOneInitialState = true;
}
}
if (atLeastOneInitialState == false)
logger.warning ("There are no starting states!");
for (int ip = 0; ip < latticeLength-1; ip++)
for (int i = 0; i < numStates; i++) {
logger.fine ("ip=" + ip+", i=" + i);
// check if this node is possible at this <position,
// label>. if not, skip it.
if (constraints[ip] > 0) { // must be in state indexed by constraints[ip] - 1
if (constraints[ip]-1 != i) {
logger.fine ("Current state does not match positive constraint. position="+ip+", constraint="+(constraints[ip]-1)+", currState="+i);
continue;
}
}
else if (constraints[ip] < 0) { // must _not_ be in state indexed by constraints[ip]
if (constraints[ip]+1 == -i) {
logger.fine ("Current state does not match negative constraint. position="+ip+", constraint="+(constraints[ip]+1)+", currState="+i);
continue;
}
}
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT) {
// xxx if we end up doing this a lot,
// we could save a list of the non-null ones
if (nodes[ip][i] == null) logger.fine ("nodes[ip][i] is NULL");
else if (nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT) logger.fine ("nodes[ip][i].alpha is -Inf");
logger.fine ("-INFINITE weight or NULL...skipping");
continue;
}
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
if (logger.isLoggable (Level.FINE))
logger.fine (" Starting Forward transition iteration from state "
+ s.getName() + " on input " + input.get(ip).toString()
+ " and output "
+ (output==null ? "(null)" : output.get(ip).toString()));
while (iter.hasNext()) {
State destination = iter.nextState();
boolean legalTransition = true;
// check constraints to see if node at <ip,i> can transition to destination
if (ip+1 < constraints.length && constraints[ip+1] > 0 && ((constraints[ip+1]-1) != destination.getIndex())) {
logger.fine ("Destination state does not match positive constraint. Assigning -infinite weight. position="+(ip+1)+", constraint="+(constraints[ip+1]-1)+", source ="+i+", destination="+destination.getIndex());
legalTransition = false;
}
else if (((ip+1) < constraints.length) && constraints[ip+1] < 0 && (-(constraints[ip+1]+1) == destination.getIndex())) {
logger.fine ("Destination state does not match negative constraint. Assigning -infinite weight. position="+(ip+1)+", constraint="+(constraints[ip+1]+1)+", destination="+destination.getIndex());
legalTransition = false;
}
if (logger.isLoggable (Level.FINE))
logger.fine ("Forward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
LatticeNode destinationNode = getLatticeNode (ip+1, destination.getIndex());
destinationNode.output = iter.getOutput();
double transitionWeight = iter.getWeight();
if (legalTransition) {
//if (logger.isLoggable (Level.FINE))
logger.fine ("transitionWeight="+transitionWeight
+" nodes["+ip+"]["+i+"].alpha="+nodes[ip][i].alpha
+" destinationNode.alpha="+destinationNode.alpha);
destinationNode.alpha = Transducer.sumLogProb (destinationNode.alpha,
nodes[ip][i].alpha + transitionWeight);
//System.out.println ("destinationNode.alpha <- "+destinationNode.alpha);
logger.fine ("Set alpha of latticeNode at ip = "+ (ip+1) + " stateIndex = " + destination.getIndex() + ", destinationNode.alpha = " + destinationNode.alpha);
}
else {
// this is an illegal transition according to our
// constraints, so set its prob to 0 . NO, alpha's are
// unnormalized weights...set to -Inf //
// destinationNode.alpha = 0.0;
// destinationNode.alpha = IMPOSSIBLE_WEIGHT;
logger.fine ("Illegal transition from state " + i + " to state " + destination.getIndex() + ". Setting alpha to -Inf");
}
}
}
// Calculate total weight of Lattice. This is the normalizer
totalWeight = Transducer.IMPOSSIBLE_WEIGHT;
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
// Note: actually we could sum at any ip index,
// the choice of latticeLength-1 is arbitrary
//System.out.println ("Ending alpha, state["+i+"] = "+nodes[latticeLength-1][i].alpha);
//System.out.println ("Ending beta, state["+i+"] = "+getState(i).finalWeight);
if (constraints[latticeLength-1] > 0 && i != constraints[latticeLength-1]-1)
continue;
if (constraints[latticeLength-1] < 0 && -i == constraints[latticeLength-1]+1)
continue;
logger.fine ("Summing final lattice weight. state="+i+", alpha="+nodes[latticeLength-1][i].alpha + ", final weight = "+t.getState(i).getFinalWeight());
totalWeight = Transducer.sumLogProb (totalWeight,
(nodes[latticeLength-1][i].alpha + t.getState(i).getFinalWeight()));
}
// Weight is now an "unnormalized weight" of the entire Lattice
//assert (weight >= 0) : "weight = "+weight;
// If the sequence has -infinite weight, just return.
// Usefully this avoids calling any incrementX methods.
// It also relies on the fact that the gammas[][] and .alpha and .beta values
// are already initialized to values that reflect -infinite weight
// xxx Although perhaps not all (alphas,betas) exactly correctly reflecting?
if (totalWeight == Transducer.IMPOSSIBLE_WEIGHT)
return;
// Backward pass
for (int i = 0; i < numStates; i++)
if (nodes[latticeLength-1][i] != null) {
State s = t.getState(i);
nodes[latticeLength-1][i].beta = s.getFinalWeight();
gammas[latticeLength-1][i] =
nodes[latticeLength-1][i].alpha + nodes[latticeLength-1][i].beta - totalWeight;
if (incrementor != null) {
double p = Math.exp(gammas[latticeLength-1][i]);
assert (p >= 0.0 && p <= 1.0 && !Double.isNaN(p)) : "p="+p+" gamma="+gammas[latticeLength-1][i];
incrementor.incrementFinalState(s, p);
}
}
for (int ip = latticeLength-2; ip >= 0; ip--) {
for (int i = 0; i < numStates; i++) {
if (nodes[ip][i] == null || nodes[ip][i].alpha == Transducer.IMPOSSIBLE_WEIGHT)
// Note that skipping here based on alpha means that beta values won't
// be correct, but since alpha is -infinite anyway, it shouldn't matter.
continue;
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator (input, ip, output, ip);
while (iter.hasNext()) {
State destination = iter.nextState();
if (logger.isLoggable (Level.FINE))
logger.fine ("Backward Lattice[inputPos="+ip
+"][source="+s.getName()
+"][dest="+destination.getName()+"]");
int j = destination.getIndex();
LatticeNode destinationNode = nodes[ip+1][j];
if (destinationNode != null) {
double transitionWeight = iter.getWeight();
assert (!Double.isNaN(transitionWeight));
// assert (transitionWeight >= 0); Not necessarily
double oldBeta = nodes[ip][i].beta;
assert (!Double.isNaN(nodes[ip][i].beta));
nodes[ip][i].beta = Transducer.sumLogProb (nodes[ip][i].beta,
destinationNode.beta + transitionWeight);
assert (!Double.isNaN(nodes[ip][i].beta))
: "dest.beta="+destinationNode.beta+" trans="+transitionWeight+" sum="+(destinationNode.beta+transitionWeight)
+ " oldBeta="+oldBeta;
// xis[ip][i][j] = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - weight;
assert (!Double.isNaN(nodes[ip][i].alpha));
assert (!Double.isNaN(transitionWeight));
assert (!Double.isNaN(nodes[ip+1][j].beta));
assert (!Double.isNaN(totalWeight));
if (incrementor != null || outputAlphabet != null) {
double xi = nodes[ip][i].alpha + transitionWeight + nodes[ip+1][j].beta - totalWeight;
double p = Math.exp(xi);
assert (p > Transducer.IMPOSSIBLE_WEIGHT && !Double.isNaN(p)) : "xis["+ip+"]["+i+"]["+j+"]="+xi;
if (incrementor != null)
incrementor.incrementTransition(iter, p);
if (outputAlphabet != null) {
int outputIndex = outputAlphabet.lookupIndex (iter.getOutput(), false);
assert (outputIndex >= 0);
// xxx This assumes that "ip" == "op"!
outputCounts[ip][outputIndex] += p;
//System.out.println ("CRF Lattice outputCounts["+ip+"]["+outputIndex+"]+="+p);
}
}
}
}
gammas[ip][i] = nodes[ip][i].alpha + nodes[ip][i].beta - totalWeight;
}
}
if (incrementor != null)
for (int i = 0; i < numStates; i++) {
double p = Math.exp(gammas[0][i]);
assert (p > Transducer.IMPOSSIBLE_WEIGHT && !Double.isNaN(p));
incrementor.incrementInitialState(t.getState(i), p);
}
if (outputAlphabet != null) {
labelings = new LabelVector[latticeLength];
for (int ip = latticeLength-2; ip >= 0; ip--) {
assert (Math.abs(1.0-MatrixOps.sum (outputCounts[ip])) < 0.000001);;
labelings[ip] = new LabelVector (outputAlphabet, outputCounts[ip]);
}
}
}
// The following used to be in fst.Transducer.
// Does it still apply? Does it still need addressing?
// -akm
// culotta: interface for constrained lattice
/**
Create constrained lattice such that all paths pass through the
the labeling of <code> requiredSegment </code> as indicated by
<code> constrainedSequence </code>
@param inputSequence input sequence
@param outputSequence output sequence
@param requiredSegment segment of sequence that must be labelled
@param constrainedSequence lattice must have labels of this
sequence from <code> requiredSegment.start </code> to <code>
requiredSegment.end </code> correctly
*//*
public Lattice forwardBackward (Sequence inputSequence,
Sequence outputSequence,
Segment requiredSegment,
Sequence constrainedSequence) {
if (constrainedSequence.size () != inputSequence.size ())
throw new IllegalArgumentException ("constrainedSequence.size [" + constrainedSequence.size () + "] != inputSequence.size [" + inputSequence.size () + "]");
// constraints tells the lattice which states must emit which
// observations. positive values say all paths must pass through
// this state index, negative values say all paths must _not_
// pass through this state index. 0 means we don't
// care. initialize to 0. include 1 extra node for start state.
int [] constraints = new int [constrainedSequence.size() + 1];
for (int c = 0; c < constraints.length; c++)
constraints[c] = 0;
for (int i=requiredSegment.getStart (); i <= requiredSegment.getEnd(); i++) {
int si = stateIndexOfString ((String)constrainedSequence.get (i));
if (si == -1)
logger.warning ("Could not find state " + constrainedSequence.get (i) + ". Check that state labels match startTages and inTags, and that all labels are seen in training data.");
// throw new IllegalArgumentException ("Could not find state " + constrainedSequence.get(i) + ". Check that state labels match startTags and InTags.");
constraints[i+1] = si + 1;
}
// set additional negative constraint to ensure state after
// segment is not a continue tag
// xxx if segment length=1, this actually constrains the sequence
// to B-tag (B-tag)', instead of the intended constraint of B-tag
// (I-tag)'
// the fix below is unsafe, but will have to do for now.
// FIXED BELOW
/* String endTag = (String) constrainedSequence.get (requiredSegment.getEnd ());
if (requiredSegment.getEnd()+2 < constraints.length) {
if (requiredSegment.getStart() == requiredSegment.getEnd()) { // segment has length 1
if (endTag.startsWith ("B-")) {
endTag = "I" + endTag.substring (1, endTag.length());
}
else if (!(endTag.startsWith ("I-") || endTag.startsWith ("0")))
throw new IllegalArgumentException ("Constrained Lattice requires that states are tagged in B-I-O format.");
}
int statei = stateIndexOfString (endTag);
if (statei == -1) // no I- tag for this B- tag
statei = stateIndexOfString ((String)constrainedSequence.get (requiredSegment.getStart ()));
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
*//*
if (requiredSegment.getEnd() + 2 < constraints.length) { // if
String endTag = requiredSegment.getInTag().toString();
int statei = stateIndexOfString (endTag);
if (statei == -1)
logger.fine ("Could not find state " + endTag + ". Check that state labels match startTags and InTags.");
else
constraints[requiredSegment.getEnd() + 2] = - (statei + 1);
}
logger.fine ("Segment:\n" + requiredSegment.sequenceToString () +
"\nconstrainedSequence:\n" + constrainedSequence +
"\nConstraints:\n");
for (int i=0; i < constraints.length; i++) {
logger.fine (constraints[i] + "\t");
}
logger.fine ("");
return forwardBackward (inputSequence, outputSequence, constraints);
}
*/
}
| 20,145 | 46.739336 | 214 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/ViterbiWriter.java
|
package cc.mallet.fst;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.types.TokenSequence;
/**
* Prints the input instances along with the features and the true and
* predicted labels to a file.
* <p>
* To control the number of times output has to be printed, override the
* {@link cc.mallet.fst.TransducerTrainer.precondition} method.
* <p>
* The name of the output file is <tt>filename_prefix + description + iteration_number + '.viterbi'</tt>.
*/
public class ViterbiWriter extends TransducerEvaluator {
String filenamePrefix;
String outputEncoding = "UTF-8";
public ViterbiWriter (String filenamePrefix, InstanceList[] instanceLists, String[] descriptions) {
super (instanceLists, descriptions);
this.filenamePrefix = filenamePrefix;
}
public ViterbiWriter (String filenamePrefix, InstanceList instanceList1, String description1) {
this (filenamePrefix, new InstanceList[] {instanceList1}, new String[] {description1});
}
public ViterbiWriter (String filenamePrefix,
InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2) {
this (filenamePrefix, new InstanceList[] {instanceList1, instanceList2}, new String[] {description1, description2});
}
public ViterbiWriter (String filenamePrefix,
InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2,
InstanceList instanceList3, String description3) {
this (filenamePrefix, new InstanceList[] {instanceList1, instanceList2, instanceList3},
new String[] {description1, description2, description3});
}
protected void preamble (TransducerTrainer tt) {
// We don't want to print iteration number and cost, so here we override this behavior in the superclass.
}
@SuppressWarnings("unchecked")
@Override
public void evaluateInstanceList(TransducerTrainer transducerTrainer, InstanceList instances, String description) {
int iteration = transducerTrainer.getIteration();
String viterbiFilename = filenamePrefix + description + iteration + ".viterbi";
PrintStream viterbiOutputStream;
try {
FileOutputStream fos = new FileOutputStream (viterbiFilename);
if (outputEncoding == null)
viterbiOutputStream = new PrintStream (fos);
else
viterbiOutputStream = new PrintStream (fos, true, outputEncoding);
//((CRF)model).write (new File(viterbiOutputFilePrefix + "."+description + iteration+".model"));
} catch (IOException e) {
System.err.println ("Couldn't open Viterbi output file '"+viterbiFilename+"'; continuing without Viterbi output trace.");
return;
}
for (int i = 0; i < instances.size(); i++) {
if (viterbiOutputStream != null)
viterbiOutputStream.println ("Viterbi path for "+description+" instance #"+i);
Instance instance = instances.get(i);
Sequence input = (Sequence) instance.getData();
TokenSequence sourceTokenSequence = null;
if (instance.getSource() instanceof TokenSequence)
sourceTokenSequence = (TokenSequence) instance.getSource();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = transducerTrainer.getTransducer().transduce (input);
assert (predOutput.size() == trueOutput.size());
for (int j = 0; j < trueOutput.size(); j++) {
FeatureVector fv = (FeatureVector) input.get(j);
//viterbiOutputStream.println (tokens.charAt(j)+" "+trueOutput.get(j).toString()+
//'/'+predOutput.get(j).toString()+" "+ fv.toString(true));
if (sourceTokenSequence != null)
viterbiOutputStream.print (sourceTokenSequence.get(j).getText()+": ");
viterbiOutputStream.println (trueOutput.get(j).toString()+
'/'+predOutput.get(j).toString()+" "+ fv.toString(true));
}
}
}
}
| 4,064 | 40.060606 | 127 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFTrainerByValueGradients.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.BitSet;
import java.util.Random;
import java.util.logging.Logger;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.OptimizationException;
import cc.mallet.optimize.Optimizer;
import cc.mallet.util.MalletLogger;
/**
* A CRF trainer that can combine multiple objective functions, each represented
* by a Optmizable.ByValueGradient.
*/
public class CRFTrainerByValueGradients extends TransducerTrainer implements TransducerTrainer.ByOptimization {
private static Logger logger = MalletLogger.getLogger(CRFTrainerByLabelLikelihood.class.getName());
CRF crf;
// gsc: keep objects instead of classnames, this will give more flexibility to the
// user to setup new CRFOptimizable* objects and then pass them directly in the constructor,
// so the CRFOptimizable inner class no longer creates CRFOptimizable* objects
Optimizable.ByGradientValue[] optimizableByValueGradientObjects;
// Class[] optimizableByValueGradientClasses;
OptimizableCRF ocrf;
Optimizer opt;
int iterationCount = 0;
boolean converged;
// gsc: removing these options, the user ought to set the weights before
// creating the trainer object
// boolean useSparseWeights = true;
// // gsc
// boolean useUnsupportedTrick = false;
// Various values from CRF acting as indicators of when we need to ...
private int cachedValueWeightsStamp = -1; // ... re-calculate expectations and values to getValue() because weights' values changed
private int cachedGradientWeightsStamp = -1; // ... re-calculate to getValueGradient() because weights' values changed
// gsc: removing this because the user will call setWeightsDimensionsAsIn
// private int cachedWeightsStructureStamp = -1; // ... re-allocate crf.weights, expectations & constraints because new states, transitions
// Use mcrf.trainingSet to see when we need to re-allocate crf.weights, expectations & constraints because we are using a different TrainingList than last time
// gsc: number of times to reset (the optimizer), and continue training when the "could not step in
// current direction" exception occurs
public static final int DEFAULT_MAX_RESETS = 3;
int maxResets = DEFAULT_MAX_RESETS;
public CRFTrainerByValueGradients (CRF crf, Optimizable.ByGradientValue[] optimizableByValueGradientObjects) {
this.crf = crf;
this.optimizableByValueGradientObjects = optimizableByValueGradientObjects;
}
public Transducer getTransducer() { return crf; }
public CRF getCRF () { return crf; }
public Optimizer getOptimizer() { return opt; }
/** Returns true if training converged, false otherwise. */
public boolean isConverged() { return converged; }
/** Returns true if training converged, false otherwise. */
public boolean isFinishedTraining() { return converged; }
public int getIteration () { return iterationCount; }
// gsc
public Optimizable.ByGradientValue[] getOptimizableByGradientValueObjects() {
return optimizableByValueGradientObjects;
}
/**
* Returns an optimizable CRF that contains a collection of objective functions.
* <p>
* If one doesn't exist then creates one and sets the optimizer to null.
*/
public OptimizableCRF getOptimizableCRF (InstanceList trainingSet) {
// gsc: user should call setWeightsDimensionsAsIn before the optimizable and
// trainer objects are created
// if (cachedWeightsStructureStamp != crf.weightsStructureChangeStamp) {
// if (useSparseWeights)
// crf.setWeightsDimensionAsIn (trainingSet, useUnsupportedTrick);
// else
// crf.setWeightsDimensionDensely ();
// ocrf = null;
// cachedWeightsStructureStamp = crf.weightsStructureChangeStamp;
// }
if (ocrf == null || ocrf.trainingSet != trainingSet) {
ocrf = new OptimizableCRF (crf, trainingSet);
opt = null;
}
return ocrf;
}
/**
* Returns a L-BFGS optimizer, creating if one doesn't exist.
* <p>
* Also creates an optimizable CRF if required.
*/
public Optimizer getOptimizer (InstanceList trainingSet) {
getOptimizableCRF(trainingSet); // this will set this.mcrf if necessary
if (opt == null || ocrf != opt.getOptimizable())
opt = new LimitedMemoryBFGS(ocrf); // Alternative: opt = new ConjugateGradient (0.001);
return opt;
}
/** Trains a CRF until convergence. */
public boolean trainIncremental (InstanceList training)
{
return train (training, Integer.MAX_VALUE);
}
/**
* Trains a CRF until convergence or specified number of iterations, whichever is earlier.
* <p>
* Also creates an optimizable CRF and an optmizer if required.
*/
public boolean train (InstanceList trainingSet, int numIterations) {
if (numIterations <= 0)
return false;
assert (trainingSet.size() > 0);
getOptimizableCRF(trainingSet); // This will set this.mcrf if necessary
getOptimizer(trainingSet); // This will set this.opt if necessary
int numResets = 0;
boolean converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
for (int i = 0; i < numIterations; i++) {
try {
// gsc: timing each iteration
long startTime = System.currentTimeMillis();
converged = opt.optimize (1);
logger.info ("CRF finished one iteration of maximizer, i="+i+", "+
+(System.currentTimeMillis()-startTime)/1000 + " secs.");
iterationCount++;
runEvaluators();
} catch (OptimizationException e) {
// gsc: resetting the optimizer for specified number of times
e.printStackTrace();
logger.info ("Catching exception.");
if (numResets < maxResets) {
// reset the optimizer and get a new one
logger.info("Resetting optimizer.");
++numResets;
opt = null;
getOptimizer(trainingSet);
// logger.info ("Catching exception; saying converged.");
// converged = true;
} else {
logger.info("Saying converged.");
converged = true;
}
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
return converged;
}
/**
* Train a CRF on various-sized subsets of the data. This method is typically used to accelerate training by
* quickly getting to reasonable parameters on only a subset of the parameters first, then on progressively more data.
* @param training The training Instances.
* @param numIterationsPerProportion Maximum number of Maximizer iterations per training proportion.
* @param trainingProportions If non-null, train on increasingly
* larger portions of the data, e.g. new double[] {0.2, 0.5, 1.0}. This can sometimes speedup convergence.
* Be sure to end in 1.0 if you want to train on all the data in the end.
* @return True if training has converged.
*/
public boolean train (InstanceList training, int numIterationsPerProportion, double[] trainingProportions)
{
int trainingIteration = 0;
assert (trainingProportions.length > 0);
boolean converged = false;
for (int i = 0; i < trainingProportions.length; i++) {
assert (trainingProportions[i] <= 1.0);
logger.info ("Training on "+trainingProportions[i]+"% of the data this round.");
if (trainingProportions[i] == 1.0)
converged = this.train (training, numIterationsPerProportion);
else
converged = this.train (training.split (new Random(1),
new double[] {trainingProportions[i], 1-trainingProportions[i]})[0], numIterationsPerProportion);
trainingIteration += numIterationsPerProportion;
}
return converged;
}
// gsc: see comment in getOptimizableCRF
// public void setUseSparseWeights (boolean b) { useSparseWeights = b; }
// public boolean getUseSparseWeights () { return useSparseWeights; }
//
// // gsc
// public void setUseUnsupportedTrick (boolean b) { useUnsupportedTrick = b; }
// public boolean getUseUnsupportedTrick () { return useUnsupportedTrick; }
// gsc: change max. number of times the optimizer can be reset before
// throwing the "could not step in current direction" exception
/**
* Sets the max. number of times the optimizer can be reset before throwing
* an exception.
* <p>
* Default value: <tt>DEFAULT_MAX_RESETS</tt>.
*/
public void setMaxResets(int maxResets) { this.maxResets = maxResets; }
/** An optimizable CRF that contains a collection of objective functions. */
public class OptimizableCRF implements Optimizable.ByGradientValue, Serializable
{
InstanceList trainingSet;
double cachedValue = -123456789;
double[] cachedGradie;
BitSet infiniteValues = null;
CRF crf;
Optimizable.ByGradientValue[] opts;
protected OptimizableCRF (CRF crf, InstanceList ilist)
{
// Set up
this.crf = crf;
this.trainingSet = ilist;
this.opts = optimizableByValueGradientObjects;
cachedGradie = new double[crf.parameters.getNumFactors()];
cachedValueWeightsStamp = -1;
cachedGradientWeightsStamp = -1;
}
// protected OptimizableCRF (CRF crf, InstanceList ilist)
// {
// // Set up
// this.crf = crf;
// this.trainingSet = ilist;
// cachedGradie = new double[crf.parameters.getNumFactors()];
// Class[] parameterTypes = new Class[] {CRF.class, InstanceList.class};
// for (int i = 0; i < optimizableByValueGradientClasses.length; i++) {
// try {
// Constructor c = optimizableByValueGradientClasses[i].getConstructor(parameterTypes);
// opts[i] = (Optimizable.ByGradientValue) c.newInstance(crf, ilist);
// } catch (Exception e) { throw new IllegalStateException ("Couldn't contruct Optimizable.ByGradientValue"); }
// }
// cachedValueWeightsStamp = -1;
// cachedGradientWeightsStamp = -1;
// }
// TODO Move these implementations into CRF.java, and put here stubs that call them!
public int getNumParameters () {
return crf.parameters.getNumFactors();
}
public void getParameters (double[] buffer) {
crf.parameters.getParameters(buffer);
}
public double getParameter (int index) {
return crf.parameters.getParameter(index);
}
public void setParameters (double [] buff) {
crf.parameters.setParameters(buff);
crf.weightsValueChanged();
}
public void setParameter (int index, double value) {
crf.parameters.setParameter(index, value);
crf.weightsValueChanged();
}
/** Returns the log probability of the training sequence labels and the prior over parameters. */
public double getValue ()
{
if (crf.weightsValueChangeStamp != cachedValueWeightsStamp) {
// The cached value is not up to date; it was calculated for a different set of CRF weights.
long startingTime = System.currentTimeMillis();
cachedValue = 0;
for (int i = 0; i < opts.length; i++)
cachedValue += opts[i].getValue();
cachedValueWeightsStamp = crf.weightsValueChangeStamp; // cachedValue is now no longer stale
logger.info ("getValue() (loglikelihood) = "+cachedValue);
logger.fine ("Inference milliseconds = "+(System.currentTimeMillis() - startingTime));
}
return cachedValue;
}
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue (); // This will fill in the this.expectation, updating it if necessary
MatrixOps.setAll(cachedGradie, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < opts.length; i++) {
MatrixOps.setAll(b2, 0);
opts[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradie, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradie, 0, buffer, 0, cachedGradie.length);
}
//Serialization of MaximizableCRF
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(trainingSet);
out.writeDouble(cachedValue);
out.writeObject(cachedGradie);
out.writeObject(infiniteValues);
out.writeObject(crf);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
trainingSet = (InstanceList) in.readObject();
cachedValue = in.readDouble();
cachedGradie = (double[]) in.readObject();
infiniteValues = (BitSet) in.readObject();
crf = (CRF)in.readObject();
}
}
// Serialization for CRFTrainerByValueGradient
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
static final int NULL_INTEGER = -1;
/* Need to check for null pointers. */
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
//out.writeInt(defaultFeatureIndex);
out.writeInt(cachedGradientWeightsStamp);
out.writeInt(cachedValueWeightsStamp);
// out.writeInt(cachedWeightsStructureStamp);
// out.writeBoolean (useSparseWeights);
throw new IllegalStateException("Implementation not yet complete.");
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
in.readInt ();
//defaultFeatureIndex = in.readInt();
// useSparseWeights = in.readBoolean();
throw new IllegalStateException("Implementation not yet complete.");
}
}
| 13,626 | 36.334247 | 160 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/MultiSegmentationEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
Evaluate segmentation f1 for several different tags (marked in OIB format).
For example, tags might be B-PERSON I-PERSON O B-LOCATION I-LOCATION O...
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.io.PrintStream;
import java.util.List;
import java.util.logging.Logger;
import java.text.DecimalFormat;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.types.TokenSequence;
import cc.mallet.util.MalletLogger;
/**
* Evaluates a transducer model, computes the precision, recall and F1 scores;
* considers segments that span across multiple tokens.
*/
public class MultiSegmentationEvaluator extends TransducerEvaluator
{
private static Logger logger = MalletLogger.getLogger(SegmentationEvaluator.class.getName());
// equals() is called on these objects to determine if this token is the start or continuation of a segment.
// A tag not equal to any of these is an "other".
// is not part of the segment).
Object[] segmentStartTags;
Object[] segmentContinueTags;
Object[] segmentStartOrContinueTags;
public MultiSegmentationEvaluator (InstanceList[] instanceLists, String[] instanceListDescriptions,
Object[] segmentStartTags, Object[] segmentContinueTags)
{
super (instanceLists, instanceListDescriptions);
this.segmentStartTags = segmentStartTags;
this.segmentContinueTags = segmentContinueTags;
assert (segmentStartTags.length == segmentContinueTags.length);
}
public MultiSegmentationEvaluator (InstanceList instanceList1, String description1,
Object[] segmentStartTags, Object[] segmentContinueTags)
{
this (new InstanceList[] {instanceList1}, new String[] {description1},
segmentStartTags, segmentContinueTags);
}
public MultiSegmentationEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2,
Object[] segmentStartTags, Object[] segmentContinueTags)
{
this (new InstanceList[] {instanceList1, instanceList2}, new String[] {description1, description2},
segmentStartTags, segmentContinueTags);
}
public MultiSegmentationEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2,
InstanceList instanceList3, String description3,
Object[] segmentStartTags, Object[] segmentContinueTags)
{
this (new InstanceList[] {instanceList1, instanceList2, instanceList3}, new String[] {description1, description2, description3},
segmentStartTags, segmentContinueTags);
}
public void evaluateInstanceList (TransducerTrainer tt, InstanceList data, String description)
{
Transducer model = tt.getTransducer();
int numCorrectTokens, totalTokens;
int[] numTrueSegments, numPredictedSegments, numCorrectSegments;
int allIndex = segmentStartTags.length;
int anyTypeIndex = segmentStartTags.length+1;
numTrueSegments = new int[allIndex+2];
numPredictedSegments = new int[allIndex+2];
numCorrectSegments = new int[allIndex+2];
totalTokens = numCorrectTokens = 0;
for (int n = 0; n < numTrueSegments.length; n++)
numTrueSegments[n] = numPredictedSegments[n] = numCorrectSegments[n] = 0;
for (int i = 0; i < data.size(); i++) {
Instance instance = data.get(i);
Sequence input = (Sequence) instance.getData();
//String tokens = null;
//if (instance.getSource() != null)
//tokens = (String) instance.getSource().toString();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = model.transduce (input);
assert (predOutput.size() == trueOutput.size());
int trueStart, predStart; // -1 for non-start, otherwise index into segmentStartTag
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens++;
trueStart = predStart = -1;
// Count true segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(trueOutput.get(j))) {
numTrueSegments[n]++;
numTrueSegments[allIndex]++;
numTrueSegments[anyTypeIndex]++;
trueStart = n;
break;
}
}
// Count predicted segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(predOutput.get(j))) {
numPredictedSegments[n]++;
numPredictedSegments[allIndex]++;
numPredictedSegments[anyTypeIndex]++;
predStart = n;
}
}
//if (trueStart != -1 && trueStart == predStart) {
if (trueStart != -1 && predStart != -1) {
// Truth and Prediction both agree that the same segment tag-type is starting now
int m;
boolean trueContinue = false;
boolean predContinue = false;
for (m = j+1; m < trueOutput.size(); m++) {
trueContinue = segmentContinueTags[predStart].equals (trueOutput.get(m));
predContinue = segmentContinueTags[predStart].equals (predOutput.get(m));
if (!trueContinue || !predContinue) {
if (trueContinue == predContinue) {
// They agree about a segment is ending somehow
numCorrectSegments[anyTypeIndex]++;
if(trueStart == predStart) {
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
}
break;
}
}
// for the case of the end of the sequence
if (m == trueOutput.size()) {
if (trueContinue == predContinue) {
numCorrectSegments[anyTypeIndex]++;
if(trueStart == predStart) {
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
}
}
}
}
}
DecimalFormat f = new DecimalFormat ("0.####");
logger.info (description +" tokenaccuracy="+f.format(((double)numCorrectTokens)/totalTokens));
for (int n = 0; n < numCorrectSegments.length; n++) {
logger.info ((n < allIndex ? segmentStartTags[n].toString() : "OVERALL") +' ');
double precision = numPredictedSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numPredictedSegments[n];
double recall = numTrueSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numTrueSegments[n];
double f1 = recall+precision == 0.0 ? 0.0 : (2.0 * recall * precision) / (recall + precision);
logger.info (" "+description+" segments true="+numTrueSegments[n]+" pred="+numPredictedSegments[n]+" correct="+numCorrectSegments[n]+
" misses="+(numTrueSegments[n]-numCorrectSegments[n])+" alarms="+(numPredictedSegments[n]-numCorrectSegments[n]));
logger.info (" "+description+" precision="+f.format(precision)+" recall="+f.format(recall)+" f1="+f.format(f1));
}
}
/**
* Returns the number of incorrect segments in <code>predOutput</code>
*
* @param trueOutput truth
* @param predOutput predicted
* @return number of incorrect segments
*/
public int numIncorrectSegments (Sequence trueOutput, Sequence predOutput) {
int numCorrectTokens, totalTokens;
int[] numTrueSegments, numPredictedSegments, numCorrectSegments;
int allIndex = segmentStartTags.length;
numTrueSegments = new int[allIndex+1];
numPredictedSegments = new int[allIndex+1];
numCorrectSegments = new int[allIndex+1];
totalTokens = numCorrectTokens = 0;
for (int n = 0; n < numTrueSegments.length; n++)
numTrueSegments[n] = numPredictedSegments[n] = numCorrectSegments[n] = 0;
assert (predOutput.size() == trueOutput.size());
// -1 for non-start, otherwise index into segmentStartTag
int trueStart, predStart;
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens++;
trueStart = predStart = -1;
// Count true segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(trueOutput.get(j))) {
numTrueSegments[n]++;
numTrueSegments[allIndex]++;
trueStart = n;
break;
}
}
// Count predicted segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(predOutput.get(j))) {
numPredictedSegments[n]++;
numPredictedSegments[allIndex]++;
predStart = n;
}
}
if (trueStart != -1 && trueStart == predStart) {
// Truth and Prediction both agree that the same segment tag-type is starting now
int m;
boolean trueContinue = false;
boolean predContinue = false;
for (m = j+1; m < trueOutput.size(); m++) {
trueContinue = segmentContinueTags[predStart].equals (trueOutput.get(m));
predContinue = segmentContinueTags[predStart].equals (predOutput.get(m));
if (!trueContinue || !predContinue) {
if (trueContinue == predContinue) {
// They agree about a segment is ending somehow
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
break;
}
}
// for the case of the end of the sequence
if (m == trueOutput.size()) {
if (trueContinue == predContinue) {
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
}
}
}
int wrong = 0;
for (int n=0; n < numCorrectSegments.length; n++) {
// incorrect segment is either false pos or false neg.
wrong += numTrueSegments[n] - numCorrectSegments[n];
}
return wrong;
}
/**
* Tests segmentation using an ArrayList of predicted Sequences instead of a
* {@link Transducer}. If predictedSequence is null, don't include in stats
* (useful for error analysis).
*
* @param data list of instances to be segmented
* @param predictedSequences predictions
* @param description description of trial
* @param viterbiOutputStream where to print the Viterbi paths
*/
public void batchTest(InstanceList data, List<Sequence> predictedSequences,
String description, PrintStream viterbiOutputStream)
{
int numCorrectTokens, totalTokens;
int[] numTrueSegments, numPredictedSegments, numCorrectSegments;
int allIndex = segmentStartTags.length;
numTrueSegments = new int[allIndex+1];
numPredictedSegments = new int[allIndex+1];
numCorrectSegments = new int[allIndex+1];
TokenSequence sourceTokenSequence = null;
totalTokens = numCorrectTokens = 0;
for (int n = 0; n < numTrueSegments.length; n++)
numTrueSegments[n] = numPredictedSegments[n] = numCorrectSegments[n] = 0;
for (int i = 0; i < data.size(); i++) {
if (viterbiOutputStream != null)
viterbiOutputStream.println ("Viterbi path for "+description+" instance #"+i);
Instance instance = data.get(i);
Sequence input = (Sequence) instance.getData();
//String tokens = null;
//if (instance.getSource() != null)
//tokens = (String) instance.getSource().toString();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = (Sequence) predictedSequences.get (i);
if (predOutput == null) // skip this instance
continue;
assert (predOutput.size() == trueOutput.size());
int trueStart, predStart; // -1 for non-start, otherwise index into segmentStartTag
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens++;
trueStart = predStart = -1;
// Count true segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(trueOutput.get(j))) {
numTrueSegments[n]++;
numTrueSegments[allIndex]++;
trueStart = n;
break;
}
}
// Count predicted segment starts
for (int n = 0; n < segmentStartTags.length; n++) {
if (segmentStartTags[n].equals(predOutput.get(j))) {
numPredictedSegments[n]++;
numPredictedSegments[allIndex]++;
predStart = n;
}
}
if (trueStart != -1 && trueStart == predStart) {
// Truth and Prediction both agree that the same segment tag-type is starting now
int m;
boolean trueContinue = false;
boolean predContinue = false;
for (m = j+1; m < trueOutput.size(); m++) {
trueContinue = segmentContinueTags[predStart].equals (trueOutput.get(m));
predContinue = segmentContinueTags[predStart].equals (predOutput.get(m));
if (!trueContinue || !predContinue) {
if (trueContinue == predContinue) {
// They agree about a segment is ending somehow
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
break;
}
}
// for the case of the end of the sequence
if (m == trueOutput.size()) {
if (trueContinue == predContinue) {
numCorrectSegments[predStart]++;
numCorrectSegments[allIndex]++;
}
}
}
if (viterbiOutputStream != null) {
FeatureVector fv = (FeatureVector) input.get(j);
//viterbiOutputStream.println (tokens.charAt(j)+" "+trueOutput.get(j).toString()+
//'/'+predOutput.get(j).toString()+" "+ fv.toString(true));
if (sourceTokenSequence != null)
viterbiOutputStream.print (sourceTokenSequence.get(j).getText()+": ");
viterbiOutputStream.println (trueOutput.get(j).toString()+
'/'+predOutput.get(j).toString()+" "+ fv.toString(true));
}
}
}
DecimalFormat f = new DecimalFormat ("0.####");
logger.info (description +" tokenaccuracy="+f.format(((double)numCorrectTokens)/totalTokens));
for (int n = 0; n < numCorrectSegments.length; n++) {
logger.info ((n < allIndex ? segmentStartTags[n].toString() : "OVERALL") +' ');
double precision = numPredictedSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numPredictedSegments[n];
double recall = numTrueSegments[n] == 0 ? 1 : ((double)numCorrectSegments[n]) / numTrueSegments[n];
double f1 = recall+precision == 0.0 ? 0.0 : (2.0 * recall * precision) / (recall + precision);
logger.info (" segments true="+numTrueSegments[n]+" pred="+numPredictedSegments[n]+" correct="+numCorrectSegments[n]+
" misses="+(numTrueSegments[n]-numCorrectSegments[n])+" alarms="+(numPredictedSegments[n]-numCorrectSegments[n]));
logger.info (" precision="+f.format(precision)+" recall="+f.format(recall)+" f1="+f.format(f1));
}
}
}
| 15,536 | 40.991892 | 139 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/TransducerEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.util.logging.Logger;
import cc.mallet.types.InstanceList;
import cc.mallet.optimize.Optimizable;
import cc.mallet.util.MalletLogger;
/**
* An abstract class to evaluate a transducer model.
*/
public abstract class TransducerEvaluator
{
private static Logger logger = MalletLogger.getLogger(TransducerEvaluator.class.getName());
// TODO consider storing the TransducerTrainer here also? Methods like precondition() will be shorter and easier.
protected InstanceList[] instanceLists;
protected String[] instanceListDescriptions;
public TransducerEvaluator () {
instanceLists = new InstanceList[0];
instanceListDescriptions = new String[0];
}
public TransducerEvaluator (InstanceList[] instanceLists, String[] instanceListDescriptions) {
this.instanceLists = instanceLists;
this.instanceListDescriptions = instanceListDescriptions;
}
/**
* Evaluates a TransducerTrainer and its Transducer on the instance lists specified in the constructor. .
* <P>
* The default implementation calls the evaluator's <TT>evaluateInstanceList</TT> on each instance list.
*
* @param tt The TransducerTrainer to evaluate.
*/
public void evaluate (TransducerTrainer tt) {
if (!precondition(tt))
return;
this.preamble(tt);
for (int k = 0; k < instanceLists.length; k++)
if (instanceLists[k] != null)
evaluateInstanceList (tt, instanceLists[k], instanceListDescriptions[k]);
}
protected void preamble (TransducerTrainer tt) {
int iteration = tt.getIteration();
Optimizable opt;
if (tt instanceof TransducerTrainer.ByOptimization
&& (opt = ((TransducerTrainer.ByOptimization)tt).getOptimizer().getOptimizable()) instanceof Optimizable.ByValue)
logger.info ("Evaluator iteration="+iteration+" cost="+((Optimizable.ByValue)opt).getValue());
else
logger.info ("Evaluator iteration="+iteration+" cost=NA (not Optimizable.ByValue)");
}
/** If this returns false, then the body of the evaluate(TransducerTrainer) method will not run.
* Use this method to implement behaviors such as only evaluating every 5 iterations with
* <code>
* new TokenAccuracyEvaluator (crft) { public boolean precondition (TransducerTrainer tt) { return tt.getIteration() % 5 == 0; };
* </code>*/
public boolean precondition (TransducerTrainer tt) {
return true;
}
public abstract void evaluateInstanceList (TransducerTrainer transducer, InstanceList instances, String description);
}
| 3,015 | 34.482353 | 130 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/ThreadedOptimizable.java
|
package cc.mallet.fst;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.logging.Logger;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.optimize.Optimizable;
import cc.mallet.util.MalletLogger;
/**
* An adaptor for optimizables based on batch values/gradients.
* <p>
* Computes values, gradients for each batch in multiple threads and combines
* them in the end.
*
* @author Gaurav Chandalia
* @see CRFOptimizableByBatchLabelLikelihood
*/
public class ThreadedOptimizable implements Optimizable.ByGradientValue {
private static Logger logger = MalletLogger.getLogger(ThreadedOptimizable.class.getName());
/** Data */
protected InstanceList trainingSet;
/** optimizable to be parallelized */
protected Optimizable.ByCombiningBatchGradient optimizable;
/** Value obtained from the optimizable for each batch */
protected double[] batchCachedValue;
/** Gradient obtained from the optimizable for each batch */
protected List<double[]> batchCachedGradient;
// determine when value/gradient become stale
protected CacheStaleIndicator cacheIndicator;
// tasks to be executed in individual threads, each task is instantiated only
// once but executed in every iteration
private transient Collection<Callable<Double>> valueTasks;
private transient Collection<Callable<Boolean>> gradientTasks;
// thread pool to compute value/gradient for one batch of data
private transient ThreadPoolExecutor executor;
// milliseconds
public static final int SLEEP_TIME = 100;
/**
* Initializes the optimizable and starts new threads.
*
* @param optimizable Optimizable to be parallelized
* @param numFactors Number of factors in model's parameters, used to
* initialize the gradient
* @param cacheIndicator Determines when value/gradient become stale
*/
public ThreadedOptimizable(Optimizable.ByCombiningBatchGradient optimizable,
InstanceList trainingSet, int numFactors,
CacheStaleIndicator cacheIndicator) {
// set up
this.trainingSet = trainingSet;
this.optimizable = optimizable;
int numBatches = optimizable.getNumBatches();
assert(numBatches > 0) : "Invalid number of batches: " + numBatches;
batchCachedValue = new double[numBatches];
batchCachedGradient = new ArrayList<double[]>(numBatches);
for (int i = 0; i < numBatches; ++i) {
batchCachedGradient.add(new double[numFactors]);
}
this.cacheIndicator = cacheIndicator;
logger.info("Creating " + numBatches + " threads for updating gradient...");
executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(numBatches);
this.createTasks();
}
public Optimizable.ByCombiningBatchGradient getOptimizable() {
return optimizable;
}
/**
* Shuts down the executor used to start and run threads to compute values
* and gradients.
* <p>
* *Note*: For a clean exit of all the threads, it is recommended to call
* this method after training finishes.
*/
public void shutdown() {
// fix submitted by Mark Dredze ([email protected])
executor.shutdown();
try {
executor.awaitTermination(30, TimeUnit.SECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
assert(executor.shutdownNow().size() == 0) : "All tasks didn't finish";
}
public double getValue () {
if (cacheIndicator.isValueStale()) {
// compute values again
try {
// run all threads and wait for them to finish
List<Future<Double>> results = executor.invokeAll(valueTasks);
// compute final log probability
int batch = 0;
for (Future<Double> f : results) {
try {
batchCachedValue[batch++] = f.get();
} catch (ExecutionException ee) {
ee.printStackTrace();
}
}
} catch (InterruptedException ie) {
ie.printStackTrace();
}
double cachedValue = MatrixOps.sum(batchCachedValue);
logger.info("getValue() (loglikelihood, optimizable by label likelihood) =" + cachedValue);
return cachedValue;
}
return MatrixOps.sum(batchCachedValue);
}
/**
* Returns the gradient, re-computes if gradient is stale. <p>
*
* *Note*: Assumes that <tt>buffer</tt> is already initialized.
*/
public void getValueGradient (double[] buffer) {
if (cacheIndicator.isGradientStale()) {
// compute values again if required
this.getValue();
// compute gradients again
try {
// run all threads and wait for them to finish
executor.invokeAll(gradientTasks);
} catch (InterruptedException ie) {
ie.printStackTrace();
}
}
optimizable.combineGradients(batchCachedGradient, buffer);
}
/**
* Creates tasks to be executed in parallel, each task looks at a batch of
* data.
*/
protected void createTasks() {
int numBatches = optimizable.getNumBatches();
valueTasks = new ArrayList<Callable<Double>>(numBatches);
gradientTasks = new ArrayList<Callable<Boolean>>(numBatches);
// number of instances per batch
int numBatchInstances = trainingSet.size() / numBatches;
// batch assignments
int start = -1, end = -1;
for (int i = 0; i < numBatches; ++i) {
// get the indices of batch
if (i == 0) {
start = 0;
end = start + numBatchInstances;
} else if (i == numBatches-1) {
start = end;
end = trainingSet.size();
} else {
start = end;
end = start + numBatchInstances;
}
valueTasks.add(new ValueHandler(i, new int[]{start, end}));
gradientTasks.add(new GradientHandler(i, new int[]{start, end}));
}
}
public int getNumParameters () { return optimizable.getNumParameters(); }
public void getParameters (double[] buffer) {
optimizable.getParameters(buffer);
}
public double getParameter (int index) {
return optimizable.getParameter(index);
}
public void setParameters (double [] buff) {
optimizable.setParameters(buff);
}
public void setParameter (int index, double value) {
optimizable.setParameter(index, value);
}
/**
* Computes value in a separate thread for a batch of data.
*/
private class ValueHandler implements Callable<Double> {
private int batchIndex;
private int[] batchAssignments;
public ValueHandler(int batchIndex, int[] batchAssignments) {
this.batchIndex = batchIndex;
this.batchAssignments = batchAssignments;
}
/**
* Returns the value for a batch.
*/
public Double call() {
return optimizable.getBatchValue(batchIndex, batchAssignments);
}
}
/**
* Computes gradient in a separate thread for a batch of data.
*/
private class GradientHandler implements Callable<Boolean> {
private int batchIndex;
private int[] batchAssignments;
public GradientHandler(int batchIndex, int[] batchAssignments) {
this.batchIndex = batchIndex;
this.batchAssignments = batchAssignments;
}
/**
* Computes the gradient for a batch, always returns true.
*/
public Boolean call() {
optimizable.getBatchValueGradient(batchCachedGradient.get(batchIndex),
batchIndex, batchAssignments);
return true;
}
}
}
| 7,238 | 28.307692 | 94 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFTrainerByThreadedLabelLikelihood.java
|
package cc.mallet.fst;
import java.util.Random;
import java.util.logging.Logger;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizer;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletLogger;
/**
* @author Gregory Druck [email protected]
*
* Multi-threaded version of CRF trainer. Note that multi-threaded feature induction
* and hyperbolic prior are not supported by this code.
*/
public class CRFTrainerByThreadedLabelLikelihood extends TransducerTrainer implements TransducerTrainer.ByOptimization {
private static Logger logger = MalletLogger.getLogger(CRFTrainerByThreadedLabelLikelihood.class.getName());
static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1.0;
private boolean useSparseWeights;
private boolean useNoWeights;
private transient boolean useSomeUnsupportedTrick;
private boolean converged;
private int numThreads;
private int iterationCount;
private double gaussianPriorVariance;
private CRF crf;
private CRFOptimizableByBatchLabelLikelihood optimizable;
private ThreadedOptimizable threadedOptimizable;
private Optimizer optimizer;
private int cachedWeightsStructureStamp;
public CRFTrainerByThreadedLabelLikelihood (CRF crf, int numThreads) {
this.crf = crf;
this.useSparseWeights = true;
this.useNoWeights = false;
this.useSomeUnsupportedTrick = true;
this.converged = false;
this.numThreads = numThreads;
this.iterationCount = 0;
this.gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
this.cachedWeightsStructureStamp = -1;
}
public Transducer getTransducer() { return crf; }
public CRF getCRF () { return crf; }
public Optimizer getOptimizer() { return optimizer; }
public boolean isConverged() { return converged; }
public boolean isFinishedTraining() { return converged; }
public int getIteration () { return iterationCount; }
public void setGaussianPriorVariance (double p) { gaussianPriorVariance = p; }
public double getGaussianPriorVariance () { return gaussianPriorVariance; }
public void setUseSparseWeights (boolean b) { useSparseWeights = b; }
public boolean getUseSparseWeights () { return useSparseWeights; }
/** Sets whether to use the 'some unsupported trick.' This trick is, if training a CRF
* where some training has been done and sparse weights are used, to add a few weights
* for feaures that do not occur in the tainig data.
* <p>
* This generally leads to better accuracy at only a small memory cost.
*
* @param b Whether to use the trick
*/
public void setUseSomeUnsupportedTrick (boolean b) { useSomeUnsupportedTrick = b; }
/**
* Use this method to specify whether or not factors
* are added to the CRF by this trainer. If you have
* already setup the factors in your CRF, you may
* not want the trainer to add additional factors.
*
* @param flag If true, this trainer adds no factors to the CRF.
*/
public void setAddNoFactors(boolean flag) {
this.useNoWeights = flag;
}
public void shutdown() {
threadedOptimizable.shutdown();
}
public CRFOptimizableByBatchLabelLikelihood getOptimizableCRF (InstanceList trainingSet) {
if (cachedWeightsStructureStamp != crf.weightsStructureChangeStamp) {
if (!useNoWeights) {
if (useSparseWeights) {
crf.setWeightsDimensionAsIn (trainingSet, useSomeUnsupportedTrick);
}
else {
crf.setWeightsDimensionDensely ();
}
}
optimizable = null;
cachedWeightsStructureStamp = crf.weightsStructureChangeStamp;
}
if (optimizable == null || optimizable.trainingSet != trainingSet) {
optimizable = new CRFOptimizableByBatchLabelLikelihood(crf, trainingSet, numThreads);
optimizable.setGaussianPriorVariance(gaussianPriorVariance);
threadedOptimizable = new ThreadedOptimizable(optimizable, trainingSet, crf.getParameters().getNumFactors(),
new CRFCacheStaleIndicator(crf));
optimizer = null;
}
return optimizable;
}
public Optimizer getOptimizer (InstanceList trainingSet) {
getOptimizableCRF(trainingSet);
if (optimizer == null || optimizable != optimizer.getOptimizable()) {
optimizer = new LimitedMemoryBFGS(threadedOptimizable);
}
return optimizer;
}
public boolean trainIncremental (InstanceList training) {
return train (training, Integer.MAX_VALUE);
}
public boolean train (InstanceList trainingSet, int numIterations) {
if (numIterations <= 0) {
return false;
}
assert (trainingSet.size() > 0);
getOptimizableCRF(trainingSet); // This will set this.mcrf if necessary
getOptimizer(trainingSet); // This will set this.opt if necessary
boolean converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
for (int i = 0; i < numIterations; i++) {
try {
converged = optimizer.optimize (1);
iterationCount++;
logger.info ("CRF finished one iteration of maximizer, i="+i);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
} catch (Exception e) {
e.printStackTrace();
logger.info("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
return converged;
}
/**
* Train a CRF on various-sized subsets of the data. This method is typically used to accelerate training by
* quickly getting to reasonable parameters on only a subset of the parameters first, then on progressively more data.
* @param training The training Instances.
* @param numIterationsPerProportion Maximum number of Maximizer iterations per training proportion.
* @param trainingProportions If non-null, train on increasingly
* larger portions of the data, e.g. new double[] {0.2, 0.5, 1.0}. This can sometimes speedup convergence.
* Be sure to end in 1.0 if you want to train on all the data in the end.
* @return True if training has converged.
*/
public boolean train (InstanceList training, int numIterationsPerProportion, double[] trainingProportions)
{
int trainingIteration = 0;
assert (trainingProportions.length > 0);
boolean converged = false;
for (int i = 0; i < trainingProportions.length; i++) {
assert (trainingProportions[i] <= 1.0);
logger.info ("Training on "+trainingProportions[i]+"% of the data this round.");
if (trainingProportions[i] == 1.0) {
converged = this.train (training, numIterationsPerProportion);
}
else {
converged = this.train (training.split (new Random(1),
new double[] {trainingProportions[i], 1-trainingProportions[i]})[0], numIterationsPerProportion);
}
trainingIteration += numIterationsPerProportion;
}
return converged;
}
}
| 6,748 | 35.879781 | 120 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFOptimizableByGradientValues.java
|
package cc.mallet.fst;
import java.util.logging.Logger;
import cc.mallet.optimize.Optimizable;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.MalletLogger;
/**
* A CRF objective function that is the sum of multiple
* objective functions that implement Optimizable.ByGradientValue.
*
* @author Gregory Druck
* @author Gaurav Chandalia
*/
public class CRFOptimizableByGradientValues implements Optimizable.ByGradientValue {
private static Logger logger = MalletLogger.getLogger(CRFOptimizableByGradientValues.class.getName());
private int cachedValueWeightsStamp;
private int cachedGradientWeightsStamp;
private double cachedValue = Double.NEGATIVE_INFINITY;
private double[] cachedGradient;
private Optimizable.ByGradientValue[] optimizables;
private CRF crf;
/**
* @param crf CRF whose parameters we wish to estimate.
* @param opts Optimizable.ByGradientValue objective functions.
*
* Parameters are estimated by maximizing the sum of the individual
* objective functions.
*/
public CRFOptimizableByGradientValues (CRF crf, Optimizable.ByGradientValue[] opts) {
this.crf = crf;
this.optimizables = opts;
this.cachedGradient = new double[crf.parameters.getNumFactors()];
this.cachedValueWeightsStamp = -1;
this.cachedGradientWeightsStamp = -1;
}
public int getNumParameters () {
return crf.parameters.getNumFactors();
}
public void getParameters (double[] buffer) {
crf.parameters.getParameters(buffer);
}
public double getParameter (int index) {
return crf.parameters.getParameter(index);
}
public void setParameters (double [] buff) {
crf.parameters.setParameters(buff);
crf.weightsValueChanged();
}
public void setParameter (int index, double value) {
crf.parameters.setParameter(index, value);
crf.weightsValueChanged();
}
/** Returns the log probability of the training sequence labels and the prior over parameters. */
public double getValue () {
if (crf.weightsValueChangeStamp != cachedValueWeightsStamp) {
// The cached value is not up to date; it was calculated for a different set of CRF weights.
cachedValue = 0;
for (int i = 0; i < optimizables.length; i++)
cachedValue += optimizables[i].getValue();
cachedValueWeightsStamp = crf.weightsValueChangeStamp; // cachedValue is now no longer stale
logger.info ("getValue() = "+cachedValue);
}
return cachedValue;
}
public void getValueGradient (double [] buffer) {
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
getValue ();
MatrixOps.setAll(cachedGradient, 0);
double[] b2 = new double[buffer.length];
for (int i = 0; i < optimizables.length; i++) {
MatrixOps.setAll(b2, 0);
optimizables[i].getValueGradient(b2);
MatrixOps.plusEquals(cachedGradient, b2);
}
cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
}
| 2,935 | 30.234043 | 104 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/CRFOptimizableByLabelLikelihood.java
|
package cc.mallet.fst;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.BitSet;
import java.util.logging.Logger;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.optimize.Optimizable;
import cc.mallet.util.MalletLogger;
/** An objective function for CRFs that is the label likelihood plus a Gaussian or hyperbolic prior on parameters. */
public class CRFOptimizableByLabelLikelihood implements Optimizable.ByGradientValue, Serializable
{
private static Logger logger = MalletLogger.getLogger(CRFOptimizableByLabelLikelihood.class.getName());
static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1.0;
static final double DEFAULT_HYPERBOLIC_PRIOR_SLOPE = 0.2;
static final double DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS = 10.0;
// gsc: changing field access to make this class extensible
protected InstanceList trainingSet;
protected double cachedValue = -123456789;
protected double[] cachedGradient;
protected BitSet infiniteValues = null;
protected CRF crf;
protected CRF.Factors constraints, expectations;
// Various values from CRF acting as indicators of when we need to ...
private int cachedValueWeightsStamp = -1; // ... re-calculate expectations and values to getValue() because weights' values changed
private int cachedGradientWeightsStamp = -1; // ... re-calculate to getValueGradient() because weights' values changed
boolean usingHyperbolicPrior = false;
double gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
double hyperbolicPriorSlope = DEFAULT_HYPERBOLIC_PRIOR_SLOPE;
double hyperbolicPriorSharpness = DEFAULT_HYPERBOLIC_PRIOR_SHARPNESS;
public CRFOptimizableByLabelLikelihood (CRF crf, InstanceList ilist)
{
// Set up
this.crf = crf;
this.trainingSet = ilist;
//cachedGradient = new DenseVector (numParameters);
cachedGradient = new double[crf.parameters.getNumFactors()];
constraints = new CRF.Factors(crf.parameters);
expectations = new CRF.Factors(crf.parameters);
// This resets and values that may have been in expectations and constraints
//reallocateSufficientStatistics();
// This is unfortunately necessary, b/c cachedValue & cachedValueStale not in same place!
cachedValueWeightsStamp = -1;
cachedGradientWeightsStamp = -1;
gatherConstraints (ilist);
}
protected void gatherConstraints (InstanceList ilist)
{
// Set the constraints by running forward-backward with the *output
// label sequence provided*, thus restricting it to only those
// paths that agree with the label sequence.
// Zero the constraints[]
// Reset constraints[] to zero before we fill them again
assert (constraints.structureMatches(crf.parameters));
constraints.zero();
for (Instance instance : ilist) {
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
double instanceWeight = ilist.getInstanceWeight(instance);
//System.out.println ("Constraint-gathering on instance "+i+" of "+ilist.size());
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? constraints.new Incrementor() : constraints.new WeightedIncrementor(instanceWeight);
new SumLatticeDefault (this.crf, input, output, incrementor);
}
// System.out.println ("testing Value and Gradient");
// TestOptimizable.testValueAndGradientCurrentParameters (this);
}
// TODO Move these implementations into CRF.java, and put here stubs that call them!
public int getNumParameters () {return crf.parameters.getNumFactors();}
public void getParameters (double[] buffer) {
crf.parameters.getParameters(buffer);
}
public double getParameter (int index) {
return crf.parameters.getParameter(index);
}
public void setParameters (double [] buff) {
crf.parameters.setParameters(buff);
crf.weightsValueChanged();
}
public void setParameter (int index, double value) {
crf.parameters.setParameter(index, value);
crf.weightsValueChanged();
}
// log probability of the training sequence labels, and fill in expectations[]
protected double getExpectationValue ()
{
// Instance values must either always or never be included in
// the total values; we can't just sometimes skip a value
// because it is infinite, this throws off the total values.
boolean initializingInfiniteValues = false;
double value = 0;
if (infiniteValues == null) {
infiniteValues = new BitSet ();
initializingInfiniteValues = true;
}
// Reset expectations to zero before we fill them again
assert (expectations.structureMatches(crf.parameters));
expectations.zero();
// count the number of instances that have infinite weight
int numInfLabeledWeight = 0;
int numInfUnlabeledWeight = 0;
int numInfWeight = 0;
// Calculate the value of each instance, and also fill in expectations
double unlabeledWeight, labeledWeight, weight;
for (int ii = 0; ii < trainingSet.size(); ii++) {
Instance instance = trainingSet.get(ii);
double instanceWeight = trainingSet.getInstanceWeight(instance);
FeatureVectorSequence input = (FeatureVectorSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
labeledWeight = new SumLatticeDefault (this.crf, input, output, (Transducer.Incrementor)null).getTotalWeight();
String instanceName = instance.getName() == null ? "instance#"+ii : instance.getName().toString();
//System.out.println ("labeledWeight = "+labeledWeight);
if (Double.isInfinite (labeledWeight)) {
++numInfLabeledWeight;
logger.warning (instanceName + " has -infinite labeled weight.\n"+(instance.getSource() != null ? instance.getSource() : ""));
}
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? expectations.new Incrementor() : expectations.new WeightedIncrementor (instanceWeight);
unlabeledWeight = new SumLatticeDefault (this.crf, input, null, incrementor).getTotalWeight();
//System.out.println ("unlabeledWeight = "+unlabeledWeight);
if (Double.isInfinite (unlabeledWeight)) {
++numInfUnlabeledWeight;
logger.warning (instance.getName().toString() + " has -infinite unlabeled weight.\n"+(instance.getSource() != null ? instance.getSource() : ""));
}
// Here weight is log(conditional probability correct label sequence)
weight = labeledWeight - unlabeledWeight;
//System.out.println ("Instance "+ii+" CRF.MaximizableCRF.getWeight = "+weight);
if (Double.isInfinite(weight)) {
++numInfWeight;
logger.warning (instanceName + " has -infinite weight; skipping.");
if (initializingInfiniteValues)
infiniteValues.set (ii);
else if (!infiniteValues.get(ii))
throw new IllegalStateException ("Instance i used to have non-infinite value, but now it has infinite value.");
continue;
}
// Weights are log probabilities, and we want to return a log probability
value += weight * instanceWeight;
}
if (numInfLabeledWeight > 0 || numInfUnlabeledWeight > 0 || numInfWeight > 0) {
logger.warning("Number of instances with:\n" +
"\t -infinite labeled weight: " + numInfLabeledWeight + "\n" +
"\t -infinite unlabeled weight: " + numInfUnlabeledWeight + "\n" +
"\t -infinite weight: " + numInfWeight);
}
return value;
}
/** Returns the log probability of the training sequence labels and the prior over parameters. */
public double getValue ()
{
if (crf.weightsValueChangeStamp != cachedValueWeightsStamp) {
// The cached value is not up to date; it was calculated for a different set of CRF weights.
cachedValueWeightsStamp = crf.weightsValueChangeStamp; // cachedValue will soon no longer be stale
long startingTime = System.currentTimeMillis();
//crf.print();
// Get the value of all the all the true labels, also filling in expectations at the same time.
cachedValue = getExpectationValue ();
// Incorporate prior on parameters
if (usingHyperbolicPrior) // Hyperbolic prior
cachedValue += crf.parameters.hyberbolicPrior(hyperbolicPriorSlope, hyperbolicPriorSharpness);
else // Gaussian prior
cachedValue += crf.parameters.gaussianPrior(gaussianPriorVariance);
// gsc: make sure the prior gives a correct value
assert(!(Double.isNaN(cachedValue) || Double.isInfinite(cachedValue))) : "Label likelihood is NaN/Infinite";
logger.info ("getValue() (loglikelihood, optimizable by label likelihood) = "+cachedValue);
long endingTime = System.currentTimeMillis();
logger.fine ("Inference milliseconds = "+(endingTime - startingTime));
}
return cachedValue;
}
// gsc: changing method from assertNotNaN to assertNotNaNOrInfinite
private void assertNotNaNOrInfinite ()
{
// crf.parameters are allowed to have infinite values
crf.parameters.assertNotNaN();
expectations.assertNotNaNOrInfinite();
constraints.assertNotNaNOrInfinite();
}
public void getValueGradient (double [] buffer)
{
// PriorGradient is -parameter/gaussianPriorVariance
// Gradient is (constraint - expectation + PriorGradient)
// == -(expectation - constraint - PriorGradient).
// Gradient points "up-hill", i.e. in the direction of higher value
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
cachedGradientWeightsStamp = crf.weightsValueChangeStamp; // cachedGradient will soon no longer be stale
// This will fill in the this.expectation, updating it if necessary
getValue ();
assertNotNaNOrInfinite();
// Gradient is constraints - expectations + prior. We do this by -(expectations - constraints - prior).
expectations.plusEquals(constraints, -1.0);
if (usingHyperbolicPrior)
expectations.plusEqualsHyperbolicPriorGradient(crf.parameters, -hyperbolicPriorSlope, hyperbolicPriorSharpness);
else
expectations.plusEqualsGaussianPriorGradient(crf.parameters, -gaussianPriorVariance);
expectations.assertNotNaNOrInfinite();
expectations.getParameters(cachedGradient);
MatrixOps.timesEquals (cachedGradient, -1.0); // This implements the -(...) in the above comment
// xxx Show the feature with maximum gradient
// TODO Is something like this negation still necessary?????
// up to now we've been calculating the weightGradient.
// take the opposite to get the valueGradient
//cachedGradient.timesEquals (-1.0); // point uphill
}
// What the heck was this!?: if (buffer.length != this.numParameters) buffer = new double[this.numParameters];
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
//Arrays.fill (buffer, 0.0);
//System.arraycopy(cachedGradie, 0, buffer, 0, 2*crf.parameters.initialWeights.length); // TODO For now, just copy the state inital/final weights
}
// gsc: adding these get/set methods for the prior
public void setUseHyperbolicPrior (boolean f) { usingHyperbolicPrior = f; }
public void setHyperbolicPriorSlope (double p) { hyperbolicPriorSlope = p; }
public void setHyperbolicPriorSharpness (double p) { hyperbolicPriorSharpness = p; }
public double getUseHyperbolicPriorSlope () { return hyperbolicPriorSlope; }
public double getUseHyperbolicPriorSharpness () { return hyperbolicPriorSharpness; }
public void setGaussianPriorVariance (double p) { gaussianPriorVariance = p; }
public double getGaussianPriorVariance () { return gaussianPriorVariance; }
//Serialization of MaximizableCRF
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject(trainingSet);
out.writeDouble(cachedValue);
out.writeObject(cachedGradient);
out.writeObject(infiniteValues);
out.writeObject(crf);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
trainingSet = (InstanceList) in.readObject();
cachedValue = in.readDouble();
cachedGradient = (double[]) in.readObject();
infiniteValues = (BitSet) in.readObject();
crf = (CRF)in.readObject();
}
public static class Factory {
public Optimizable.ByGradientValue newCRFOptimizable (CRF crf, InstanceList trainingData) {
return new CRFOptimizableByLabelLikelihood (crf, trainingData);
}
}
}
| 12,452 | 41.501706 | 151 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/SegmentationEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Sequence;
import cc.mallet.types.TokenSequence;
import cc.mallet.util.MalletLogger;
public class SegmentationEvaluator extends TransducerEvaluator
{
private static Logger logger = MalletLogger.getLogger(SegmentationEvaluator.class.getName());
// equals() is called on these objects to determine if this token is the start (end) of a segment
// "segmentEndTag" should return "true" for the token *after* the end of the segment (i.e. that token
// is not part of the segment).
static Pattern startRegex = Pattern.compile ("^B.*");
//static Pattern endRegex = Pattern.compile ("^O.*");
Object segmentStartTag = new Object () { public boolean equals (Object o) { return startRegex.matcher(o.toString()).matches(); } };
Object segmentEndTag = new Object () { public boolean equals (Object o) { return false; } };
public SegmentationEvaluator (InstanceList[] instanceLists, String[] descriptions) {
super (instanceLists, descriptions);
}
public SegmentationEvaluator (InstanceList instanceList1, String description1) {
this (new InstanceList[] {instanceList1}, new String[] {description1});
}
public SegmentationEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2) {
this (new InstanceList[] {instanceList1, instanceList2}, new String[] {description1, description2});
}
public SegmentationEvaluator (InstanceList instanceList1, String description1,
InstanceList instanceList2, String description2,
InstanceList instanceList3, String description3) {
this (new InstanceList[] {instanceList1, instanceList2, instanceList3}, new String[] {description1, description2, description3});
}
public SegmentationEvaluator setSegmentStartTag (Object o) { this.segmentStartTag = o; return this; }
public SegmentationEvaluator setSegmentEndTag (Object o) { this.segmentEndTag = o; return this; }
public void evaluateInstanceList (TransducerTrainer tt, InstanceList data, String description)
{
Transducer model = tt.getTransducer();
int numCorrectTokens, totalTokens;
int numTrueSegments, numPredictedSegments, numCorrectSegments;
int numCorrectSegmentsInAlphabet, numCorrectSegmentsOOV;
int numIncorrectSegmentsInAlphabet, numIncorrectSegmentsOOV;
TokenSequence sourceTokenSequence = null;
totalTokens = numCorrectTokens = 0;
numTrueSegments = numPredictedSegments = numCorrectSegments = 0;
numCorrectSegmentsInAlphabet = numCorrectSegmentsOOV = 0;
numIncorrectSegmentsInAlphabet = numIncorrectSegmentsOOV = 0;
for (int i = 0; i < data.size(); i++) {
Instance instance = data.get(i);
Sequence input = (Sequence) instance.getData();
//String tokens = null;
//if (instance.getSource() != null)
//tokens = (String) instance.getSource().toString();
Sequence trueOutput = (Sequence) instance.getTarget();
assert (input.size() == trueOutput.size());
Sequence predOutput = model.transduce (input);
assert (predOutput.size() == trueOutput.size());
boolean trueStart, predStart;
for (int j = 0; j < trueOutput.size(); j++) {
totalTokens++;
trueStart = predStart = false;
if (segmentStartTag.equals(trueOutput.get(j))) {
numTrueSegments++;
trueStart = true;
}
if (segmentStartTag.equals(predOutput.get(j))) {
predStart = true;
numPredictedSegments++;
}
if (trueStart && predStart) {
int m;
//StringBuffer sb = new StringBuffer();
//sb.append (tokens.charAt(j));
for (m = j+1; m < trueOutput.size(); m++) {
trueStart = predStart = false; // Here, these actually mean "end", not "start"
if (segmentEndTag.equals(trueOutput.get(m)))
trueStart = true;
if (segmentEndTag.equals(predOutput.get(m)))
predStart = true;
if (trueStart || predStart) {
if (trueStart && predStart) {
// It is a correct segment
numCorrectSegments++;
//if (HashFile.allLexicons.contains(sb.toString()))
//numCorrectSegmentsInAlphabet++;
//else
//numCorrectSegmentsOOV++;
} else {
// It is an incorrect segment; let's find out if it was in the lexicon
//for (int mm = m; mm < trueOutput.size(); mm++) {
//if (segmentEndTag.equals(predOutput.get(mm)))
//break;
//sb.append (tokens.charAt(mm));
//}
//if (HashFile.allLexicons.contains(sb.toString()))
//numIncorrectSegmentsInAlphabet++;
//else
//numIncorrectSegmentsOOV++;
}
break;
}
//sb.append (tokens.charAt(m));
}
// for the case of the end of the sequence
if(m==trueOutput.size()) {
if (trueStart==predStart) {
numCorrectSegments++;
//if (HashFile.allLexicons.contains(sb.toString()))
//numCorrectSegmentsInAlphabet++;
//else
//numCorrectSegmentsOOV++;
} else {
//if (HashFile.allLexicons.contains(sb.toString()))
//numIncorrectSegmentsInAlphabet++;
//else
//numIncorrectSegmentsOOV++;
}
}
} else if (predStart) {
// Here is an incorrect predicted start, find out if the word is in the lexicon
//StringBuffer sb = new StringBuffer();
//sb.append (tokens.charAt(j));
//for (int mm = j+1; mm < trueOutput.size(); mm++) {
//if (segmentEndTag.equals(predOutput.get(mm)))
//break;
//sb.append (tokens.charAt(mm));
//}
//if (HashFile.allLexicons.contains(sb.toString()))
//numIncorrectSegmentsInAlphabet++;
//else
//numIncorrectSegmentsOOV++;
}
if (trueOutput.get(j).equals(predOutput.get(j)))
numCorrectTokens++;
}
}
logger.info (description +" accuracy="+((double)numCorrectTokens)/totalTokens);
double precision = numPredictedSegments == 0 ? 1 : ((double)numCorrectSegments) / numPredictedSegments;
double recall = numTrueSegments == 0 ? 1 : ((double)numCorrectSegments) / numTrueSegments;
double f1 = recall+precision == 0.0 ? 0.0 : (2.0 * recall * precision) / (recall + precision);
logger.info (" precision="+precision+" recall="+recall+" f1="+f1);
logger.info ("segments true="+numTrueSegments+" pred="+numPredictedSegments+" correct="+numCorrectSegments+" misses="+(numTrueSegments-numCorrectSegments)+" alarms="+(numPredictedSegments-numCorrectSegments));
//System.out.println ("correct segments OOV="+numCorrectSegmentsOOV+" IV="+numCorrectSegmentsInAlphabet);
//System.out.println ("incorrect segments OOV="+numIncorrectSegmentsOOV+" IV="+numIncorrectSegmentsInAlphabet);
}
}
| 7,733 | 43.705202 | 213 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/HMM.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Random;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Multinomial;
import cc.mallet.types.Sequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.util.MalletLogger;
/** A Hidden Markov Model. */
public class HMM extends Transducer implements Serializable {
private static Logger logger = MalletLogger.getLogger(HMM.class.getName());
static final String LABEL_SEPARATOR = ",";
Alphabet inputAlphabet;
Alphabet outputAlphabet;
ArrayList<State> states = new ArrayList<State>();
ArrayList<State> initialStates = new ArrayList<State>();
HashMap<String, State> name2state = new HashMap<String, State>();
Multinomial.Estimator[] transitionEstimator;
Multinomial.Estimator[] emissionEstimator;
Multinomial.Estimator initialEstimator;
Multinomial[] transitionMultinomial;
Multinomial[] emissionMultinomial;
Multinomial initialMultinomial;
public HMM(Pipe inputPipe, Pipe outputPipe) {
this.inputPipe = inputPipe;
this.outputPipe = outputPipe;
this.inputAlphabet = inputPipe.getDataAlphabet();
this.outputAlphabet = inputPipe.getTargetAlphabet();
}
public HMM(Alphabet inputAlphabet, Alphabet outputAlphabet) {
inputAlphabet.stopGrowth();
logger.info("HMM input dictionary size = " + inputAlphabet.size());
this.inputAlphabet = inputAlphabet;
this.outputAlphabet = outputAlphabet;
}
public Alphabet getInputAlphabet() {
return inputAlphabet;
}
public Alphabet getOutputAlphabet() {
return outputAlphabet;
}
public void print() {
StringBuffer sb = new StringBuffer();
for (int i = 0; i < numStates(); i++) {
State s = (State) getState(i);
sb.append("STATE NAME=\"");
sb.append(s.name);
sb.append("\" (");
sb.append(s.destinations.length);
sb.append(" outgoing transitions)\n");
sb.append(" ");
sb.append("initialWeight= ");
sb.append(s.initialWeight);
sb.append('\n');
sb.append(" ");
sb.append("finalWeight= ");
sb.append(s.finalWeight);
sb.append('\n');
sb.append("Emission distribution:\n" + emissionMultinomial[i]
+ "\n\n");
sb.append("Transition distribution:\n"
+ transitionMultinomial[i].toString());
}
System.out.println(sb.toString());
}
public void addState(String name, double initialWeight, double finalWeight,
String[] destinationNames, String[] labelNames) {
assert (labelNames.length == destinationNames.length);
if (name2state.get(name) != null)
throw new IllegalArgumentException("State with name `" + name
+ "' already exists.");
State s = new State(name, states.size(), initialWeight, finalWeight,
destinationNames, labelNames, this);
s.print();
states.add(s);
if (initialWeight > IMPOSSIBLE_WEIGHT)
initialStates.add(s);
name2state.put(name, s);
}
/**
* Add a state with parameters equal zero, and labels on out-going arcs the
* same name as their destination state names.
*/
public void addState(String name, String[] destinationNames) {
this.addState(name, 0, 0, destinationNames, destinationNames);
}
/**
* Add a group of states that are fully connected with each other, with
* parameters equal zero, and labels on their out-going arcs the same name
* as their destination state names.
*/
public void addFullyConnectedStates(String[] stateNames) {
for (int i = 0; i < stateNames.length; i++)
addState(stateNames[i], stateNames);
}
public void addFullyConnectedStatesForLabels() {
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
labels[i] = (String) outputAlphabet.lookupObject(i);
}
addFullyConnectedStates(labels);
}
private boolean[][] labelConnectionsIn(InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
boolean[][] connections = new boolean[numLabels][numLabels];
for (Instance instance : trainingSet) {
FeatureSequence output = (FeatureSequence) instance.getTarget();
for (int j = 1; j < output.size(); j++) {
int sourceIndex = outputAlphabet.lookupIndex(output.get(j - 1));
int destIndex = outputAlphabet.lookupIndex(output.get(j));
assert (sourceIndex >= 0 && destIndex >= 0);
connections[sourceIndex][destIndex] = true;
}
}
return connections;
}
/**
* Add states to create a first-order Markov model on labels, adding only
* those transitions the occur in the given trainingSet.
*/
public void addStatesForLabelsConnectedAsIn(InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn(trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
numDestinations++;
String[] destinationNames = new String[numDestinations];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
destinationNames[destinationIndex++] = (String) outputAlphabet
.lookupObject(j);
addState((String) outputAlphabet.lookupObject(i), destinationNames);
}
}
/**
* Add as many states as there are labels, but don't create separate weights
* for each source-destination pair of states. Instead have all the incoming
* transitions to a state share the same weights.
*/
public void addStatesForHalfLabelsConnectedAsIn(InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn(trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
numDestinations++;
String[] destinationNames = new String[numDestinations];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
destinationNames[destinationIndex++] = (String) outputAlphabet
.lookupObject(j);
addState((String) outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames);
}
}
/**
* Add as many states as there are labels, but don't create separate
* observational-test-weights for each source-destination pair of
* states---instead have all the incoming transitions to a state share the
* same observational-feature-test weights. However, do create separate
* default feature for each transition, (which acts as an HMM-style
* transition probability).
*/
public void addStatesForThreeQuarterLabelsConnectedAsIn(
InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn(trainingSet);
for (int i = 0; i < numLabels; i++) {
int numDestinations = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j])
numDestinations++;
String[] destinationNames = new String[numDestinations];
int destinationIndex = 0;
for (int j = 0; j < numLabels; j++)
if (connections[i][j]) {
String labelName = (String) outputAlphabet.lookupObject(j);
destinationNames[destinationIndex] = labelName;
// The "transition" weights will include only the default
// feature
// gsc: variable is never used
// String wn = (String)outputAlphabet.lookupObject(i) + "->"
// + (String)outputAlphabet.lookupObject(j);
destinationIndex++;
}
addState((String) outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames);
}
}
public void addFullyConnectedStatesForThreeQuarterLabels(
InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
for (int i = 0; i < numLabels; i++) {
String[] destinationNames = new String[numLabels];
for (int j = 0; j < numLabels; j++) {
String labelName = (String) outputAlphabet.lookupObject(j);
destinationNames[j] = labelName;
}
addState((String) outputAlphabet.lookupObject(i), 0.0, 0.0,
destinationNames, destinationNames);
}
}
public void addFullyConnectedStatesForBiLabels() {
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
labels[i] = outputAlphabet.lookupObject(i).toString();
}
for (int i = 0; i < labels.length; i++) {
for (int j = 0; j < labels.length; j++) {
String[] destinationNames = new String[labels.length];
for (int k = 0; k < labels.length; k++)
destinationNames[k] = labels[j] + LABEL_SEPARATOR
+ labels[k];
addState(labels[i] + LABEL_SEPARATOR + labels[j], 0.0, 0.0,
destinationNames, labels);
}
}
}
/**
* Add states to create a second-order Markov model on labels, adding only
* those transitions the occur in the given trainingSet.
*/
public void addStatesForBiLabelsConnectedAsIn(InstanceList trainingSet) {
int numLabels = outputAlphabet.size();
boolean[][] connections = labelConnectionsIn(trainingSet);
for (int i = 0; i < numLabels; i++) {
for (int j = 0; j < numLabels; j++) {
if (!connections[i][j])
continue;
int numDestinations = 0;
for (int k = 0; k < numLabels; k++)
if (connections[j][k])
numDestinations++;
String[] destinationNames = new String[numDestinations];
String[] labels = new String[numDestinations];
int destinationIndex = 0;
for (int k = 0; k < numLabels; k++)
if (connections[j][k]) {
destinationNames[destinationIndex] = (String) outputAlphabet
.lookupObject(j)
+ LABEL_SEPARATOR
+ (String) outputAlphabet.lookupObject(k);
labels[destinationIndex] = (String) outputAlphabet
.lookupObject(k);
destinationIndex++;
}
addState((String) outputAlphabet.lookupObject(i)
+ LABEL_SEPARATOR
+ (String) outputAlphabet.lookupObject(j), 0.0, 0.0,
destinationNames, labels);
}
}
}
public void addFullyConnectedStatesForTriLabels() {
String[] labels = new String[outputAlphabet.size()];
// This is assuming the the entries in the outputAlphabet are Strings!
for (int i = 0; i < outputAlphabet.size(); i++) {
logger.info("HMM: outputAlphabet.lookup class = "
+ outputAlphabet.lookupObject(i).getClass().getName());
labels[i] = outputAlphabet.lookupObject(i).toString();
}
for (int i = 0; i < labels.length; i++) {
for (int j = 0; j < labels.length; j++) {
for (int k = 0; k < labels.length; k++) {
String[] destinationNames = new String[labels.length];
for (int l = 0; l < labels.length; l++)
destinationNames[l] = labels[j] + LABEL_SEPARATOR
+ labels[k] + LABEL_SEPARATOR + labels[l];
addState(labels[i] + LABEL_SEPARATOR + labels[j]
+ LABEL_SEPARATOR + labels[k], 0.0, 0.0,
destinationNames, labels);
}
}
}
}
public void addSelfTransitioningStateForAllLabels(String name) {
String[] labels = new String[outputAlphabet.size()];
String[] destinationNames = new String[outputAlphabet.size()];
for (int i = 0; i < outputAlphabet.size(); i++) {
labels[i] = outputAlphabet.lookupObject(i).toString();
destinationNames[i] = name;
}
addState(name, 0.0, 0.0, destinationNames, labels);
}
private String concatLabels(String[] labels) {
String sep = "";
StringBuffer buf = new StringBuffer();
for (int i = 0; i < labels.length; i++) {
buf.append(sep).append(labels[i]);
sep = LABEL_SEPARATOR;
}
return buf.toString();
}
private String nextKGram(String[] history, int k, String next) {
String sep = "";
StringBuffer buf = new StringBuffer();
int start = history.length + 1 - k;
for (int i = start; i < history.length; i++) {
buf.append(sep).append(history[i]);
sep = LABEL_SEPARATOR;
}
buf.append(sep).append(next);
return buf.toString();
}
private boolean allowedTransition(String prev, String curr, Pattern no,
Pattern yes) {
String pair = concatLabels(new String[] { prev, curr });
if (no != null && no.matcher(pair).matches())
return false;
if (yes != null && !yes.matcher(pair).matches())
return false;
return true;
}
private boolean allowedHistory(String[] history, Pattern no, Pattern yes) {
for (int i = 1; i < history.length; i++)
if (!allowedTransition(history[i - 1], history[i], no, yes))
return false;
return true;
}
/**
* Assumes that the HMM's output alphabet contains <code>String</code>s.
* Creates an order-<em>n</em> HMM with input predicates and output labels
* given by <code>trainingSet</code> and order, connectivity, and weights
* given by the remaining arguments.
*
* @param trainingSet
* the training instances
* @param orders
* an array of increasing non-negative numbers giving the orders
* of the features for this HMM. The largest number <em>n</em> is
* the Markov order of the HMM. States are <em>n</em>-tuples of
* output labels. Each of the other numbers <em>k</em> in
* <code>orders</code> represents a weight set shared by all
* destination states whose last (most recent) <em>k</em> labels
* agree. If <code>orders</code> is <code>null</code>, an order-0
* HMM is built.
* @param defaults
* If non-null, it must be the same length as <code>orders</code>
* , with <code>true</code> positions indicating that the weight
* set for the corresponding order contains only the weight for a
* default feature; otherwise, the weight set has weights for all
* features built from input predicates.
* @param start
* The label that represents the context of the start of a
* sequence. It may be also used for sequence labels.
* @param forbidden
* If non-null, specifies what pairs of successive labels are not
* allowed, both for constructing <em>n</em>order states or for
* transitions. A label pair (<em>u</em>,<em>v</em>) is not
* allowed if <em>u</em> + "," + <em>v</em> matches
* <code>forbidden</code>.
* @param allowed
* If non-null, specifies what pairs of successive labels are
* allowed, both for constructing <em>n</em>order states or for
* transitions. A label pair (<em>u</em>,<em>v</em>) is allowed
* only if <em>u</em> + "," + <em>v</em> matches
* <code>allowed</code>.
* @param fullyConnected
* Whether to include all allowed transitions, even those not
* occurring in <code>trainingSet</code>,
* @returns The name of the start state.
*
*/
public String addOrderNStates(InstanceList trainingSet, int[] orders,
boolean[] defaults, String start, Pattern forbidden,
Pattern allowed, boolean fullyConnected) {
boolean[][] connections = null;
if (!fullyConnected)
connections = labelConnectionsIn(trainingSet);
int order = -1;
if (defaults != null && defaults.length != orders.length)
throw new IllegalArgumentException(
"Defaults must be null or match orders");
if (orders == null)
order = 0;
else {
for (int i = 0; i < orders.length; i++) {
if (orders[i] <= order)
throw new IllegalArgumentException(
"Orders must be non-negative and in ascending order");
order = orders[i];
}
if (order < 0)
order = 0;
}
if (order > 0) {
int[] historyIndexes = new int[order];
String[] history = new String[order];
String label0 = (String) outputAlphabet.lookupObject(0);
for (int i = 0; i < order; i++)
history[i] = label0;
int numLabels = outputAlphabet.size();
while (historyIndexes[0] < numLabels) {
logger.info("Preparing " + concatLabels(history));
if (allowedHistory(history, forbidden, allowed)) {
String stateName = concatLabels(history);
int nt = 0;
String[] destNames = new String[numLabels];
String[] labelNames = new String[numLabels];
for (int nextIndex = 0; nextIndex < numLabels; nextIndex++) {
String next = (String) outputAlphabet
.lookupObject(nextIndex);
if (allowedTransition(history[order - 1], next,
forbidden, allowed)
&& (fullyConnected || connections[historyIndexes[order - 1]][nextIndex])) {
destNames[nt] = nextKGram(history, order, next);
labelNames[nt] = next;
nt++;
}
}
if (nt < numLabels) {
String[] newDestNames = new String[nt];
String[] newLabelNames = new String[nt];
for (int t = 0; t < nt; t++) {
newDestNames[t] = destNames[t];
newLabelNames[t] = labelNames[t];
}
destNames = newDestNames;
labelNames = newLabelNames;
}
addState(stateName, 0.0, 0.0, destNames, labelNames);
}
for (int o = order - 1; o >= 0; o--)
if (++historyIndexes[o] < numLabels) {
history[o] = (String) outputAlphabet
.lookupObject(historyIndexes[o]);
break;
} else if (o > 0) {
historyIndexes[o] = 0;
history[o] = label0;
}
}
for (int i = 0; i < order; i++)
history[i] = start;
return concatLabels(history);
}
String[] stateNames = new String[outputAlphabet.size()];
for (int s = 0; s < outputAlphabet.size(); s++)
stateNames[s] = (String) outputAlphabet.lookupObject(s);
for (int s = 0; s < outputAlphabet.size(); s++)
addState(stateNames[s], 0.0, 0.0, stateNames, stateNames);
return start;
}
public State getState(String name) {
return (State) name2state.get(name);
}
public int numStates() {
return states.size();
}
public Transducer.State getState(int index) {
return (Transducer.State) states.get(index);
}
public Iterator initialStateIterator() {
return initialStates.iterator();
}
public boolean isTrainable() {
return true;
}
private Alphabet getTransitionAlphabet() {
Alphabet transitionAlphabet = new Alphabet();
for (int i = 0; i < numStates(); i++)
transitionAlphabet.lookupIndex(getState(i).getName(), true);
return transitionAlphabet;
}
@Deprecated
public void reset() {
emissionEstimator = new Multinomial.LaplaceEstimator[numStates()];
transitionEstimator = new Multinomial.LaplaceEstimator[numStates()];
emissionMultinomial = new Multinomial[numStates()];
transitionMultinomial = new Multinomial[numStates()];
Alphabet transitionAlphabet = getTransitionAlphabet();
for (int i = 0; i < numStates(); i++) {
emissionEstimator[i] = new Multinomial.LaplaceEstimator(
inputAlphabet);
transitionEstimator[i] = new Multinomial.LaplaceEstimator(
transitionAlphabet);
emissionMultinomial[i] = new Multinomial(
getUniformArray(inputAlphabet.size()), inputAlphabet);
transitionMultinomial[i] = new Multinomial(
getUniformArray(transitionAlphabet.size()),
transitionAlphabet);
}
initialMultinomial = new Multinomial(getUniformArray(transitionAlphabet
.size()), transitionAlphabet);
initialEstimator = new Multinomial.LaplaceEstimator(transitionAlphabet);
}
/**
* Separate initialization of initial/transitions and emissions. All
* probabilities are proportional to (1+Uniform[0,1])^noise.
*
* @author kedarb
* @param random
* Random object (if null use uniform distribution)
* @param noise
* Noise exponent to use. If zero, then uniform distribution.
*/
public void initTransitions(Random random, double noise) {
Alphabet transitionAlphabet = getTransitionAlphabet();
initialMultinomial = new Multinomial(getRandomArray(transitionAlphabet
.size(), random, noise), transitionAlphabet);
initialEstimator = new Multinomial.LaplaceEstimator(transitionAlphabet);
transitionMultinomial = new Multinomial[numStates()];
transitionEstimator = new Multinomial.LaplaceEstimator[numStates()];
for (int i = 0; i < numStates(); i++) {
transitionMultinomial[i] = new Multinomial(getRandomArray(
transitionAlphabet.size(), random, noise),
transitionAlphabet);
transitionEstimator[i] = new Multinomial.LaplaceEstimator(
transitionAlphabet);
// set state's initial weight
State s = (State) getState(i);
s.setInitialWeight(initialMultinomial.logProbability(s.getName()));
}
}
public void initEmissions(Random random, double noise) {
emissionMultinomial = new Multinomial[numStates()];
emissionEstimator = new Multinomial.LaplaceEstimator[numStates()];
for (int i = 0; i < numStates(); i++) {
emissionMultinomial[i] = new Multinomial(getRandomArray(
inputAlphabet.size(), random, noise), inputAlphabet);
emissionEstimator[i] = new Multinomial.LaplaceEstimator(
inputAlphabet);
}
}
public void estimate() {
Alphabet transitionAlphabet = getTransitionAlphabet();
initialMultinomial = initialEstimator.estimate();
initialEstimator = new Multinomial.LaplaceEstimator(transitionAlphabet);
for (int i = 0; i < numStates(); i++) {
State s = (State) getState(i);
emissionMultinomial[i] = emissionEstimator[i].estimate();
transitionMultinomial[i] = transitionEstimator[i].estimate();
s.setInitialWeight(initialMultinomial.logProbability(s.getName()));
// reset estimators
emissionEstimator[i] = new Multinomial.LaplaceEstimator(
inputAlphabet);
transitionEstimator[i] = new Multinomial.LaplaceEstimator(
transitionAlphabet);
}
}
/**
* Trains a HMM without validation and evaluation.
*/
public boolean train(InstanceList ilist) {
return train(ilist, (InstanceList) null, (InstanceList) null);
}
/**
* Trains a HMM with <tt>evaluator</tt> set to null.
*/
public boolean train(InstanceList ilist, InstanceList validation,
InstanceList testing) {
return train(ilist, validation, testing, (TransducerEvaluator) null);
}
public boolean train(InstanceList ilist, InstanceList validation,
InstanceList testing, TransducerEvaluator eval) {
assert (ilist.size() > 0);
if (emissionEstimator == null) {
emissionEstimator = new Multinomial.LaplaceEstimator[numStates()];
transitionEstimator = new Multinomial.LaplaceEstimator[numStates()];
emissionMultinomial = new Multinomial[numStates()];
transitionMultinomial = new Multinomial[numStates()];
Alphabet transitionAlphabet = new Alphabet();
for (int i = 0; i < numStates(); i++)
transitionAlphabet.lookupIndex(((State) states.get(i))
.getName(), true);
for (int i = 0; i < numStates(); i++) {
emissionEstimator[i] = new Multinomial.LaplaceEstimator(
inputAlphabet);
transitionEstimator[i] = new Multinomial.LaplaceEstimator(
transitionAlphabet);
emissionMultinomial[i] = new Multinomial(
getUniformArray(inputAlphabet.size()), inputAlphabet);
transitionMultinomial[i] = new Multinomial(
getUniformArray(transitionAlphabet.size()),
transitionAlphabet);
}
initialEstimator = new Multinomial.LaplaceEstimator(
transitionAlphabet);
}
for (Instance instance : ilist) {
FeatureSequence input = (FeatureSequence) instance.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
new SumLatticeDefault(this, input, output, new Incrementor());
}
initialMultinomial = initialEstimator.estimate();
for (int i = 0; i < numStates(); i++) {
emissionMultinomial[i] = emissionEstimator[i].estimate();
transitionMultinomial[i] = transitionEstimator[i].estimate();
getState(i).setInitialWeight(
initialMultinomial.logProbability(getState(i).getName()));
}
return true;
}
public class Incrementor implements Transducer.Incrementor {
public void incrementFinalState(Transducer.State s, double count) {
}
public void incrementInitialState(Transducer.State s, double count) {
initialEstimator.increment(s.getName(), count);
}
public void incrementTransition(Transducer.TransitionIterator ti,
double count) {
int inputFtr = (Integer) ti.getInput();
State src = (HMM.State) ((TransitionIterator) ti).getSourceState();
State dest = (HMM.State) ((TransitionIterator) ti)
.getDestinationState();
int index = ti.getIndex();
emissionEstimator[index].increment(inputFtr, count);
transitionEstimator[src.getIndex()]
.increment(dest.getName(), count);
}
}
public class WeightedIncrementor implements Transducer.Incrementor {
double weight = 1.0;
public WeightedIncrementor(double wt) {
this.weight = wt;
}
public void incrementFinalState(Transducer.State s, double count) {
}
public void incrementInitialState(Transducer.State s, double count) {
initialEstimator.increment(s.getName(), weight * count);
}
public void incrementTransition(Transducer.TransitionIterator ti,
double count) {
int inputFtr = (Integer) ti.getInput();
State src = (HMM.State) ((TransitionIterator) ti).getSourceState();
State dest = (HMM.State) ((TransitionIterator) ti)
.getDestinationState();
int index = ti.getIndex();
emissionEstimator[index].increment(inputFtr, weight * count);
transitionEstimator[src.getIndex()].increment(dest.getName(),
weight * count);
}
}
public void write(File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream(
new FileOutputStream(f));
oos.writeObject(this);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
private double[] getUniformArray(int size) {
double[] ret = new double[size];
for (int i = 0; i < size; i++)
// gsc: removing unnecessary cast from 'size'
ret[i] = 1.0 / size;
return ret;
}
// kedarb: p[i] = (1+random)^noise/sum
private double[] getRandomArray(int size, Random random, double noise) {
double[] ret = new double[size];
double sum = 0;
for (int i = 0; i < size; i++) {
ret[i] = random == null ? 1.0 : Math.pow(1.0 + random.nextDouble(),
noise);
sum += ret[i];
}
for (int i = 0; i < size; i++)
ret[i] /= sum;
return ret;
}
// Serialization
// For HMM class
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 1;
static final int NULL_INTEGER = -1;
/* Need to check for null pointers. */
/* Bug fix from Cheng-Ju Kuo [email protected] */
private void writeObject(ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeObject(inputPipe);
out.writeObject(outputPipe);
out.writeObject(inputAlphabet);
out.writeObject(outputAlphabet);
size = states.size();
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(states.get(i));
size = initialStates.size();
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(initialStates.get(i));
out.writeObject(name2state);
if (emissionEstimator != null) {
size = emissionEstimator.length;
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(emissionEstimator[i]);
} else
out.writeInt(NULL_INTEGER);
if (emissionMultinomial != null) {
size = emissionMultinomial.length;
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(emissionMultinomial[i]);
} else
out.writeInt(NULL_INTEGER);
if (transitionEstimator != null) {
size = transitionEstimator.length;
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(transitionEstimator[i]);
} else
out.writeInt(NULL_INTEGER);
if (transitionMultinomial != null) {
size = transitionMultinomial.length;
out.writeInt(size);
for (i = 0; i < size; i++)
out.writeObject(transitionMultinomial[i]);
} else
out.writeInt(NULL_INTEGER);
}
/* Bug fix from Cheng-Ju Kuo [email protected] */
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int size, i;
int version = in.readInt();
inputPipe = (Pipe) in.readObject();
outputPipe = (Pipe) in.readObject();
inputAlphabet = (Alphabet) in.readObject();
outputAlphabet = (Alphabet) in.readObject();
size = in.readInt();
states = new ArrayList();
for (i = 0; i < size; i++) {
State s = (HMM.State) in.readObject();
states.add(s);
}
size = in.readInt();
initialStates = new ArrayList();
for (i = 0; i < size; i++) {
State s = (HMM.State) in.readObject();
initialStates.add(s);
}
name2state = (HashMap) in.readObject();
size = in.readInt();
if (size == NULL_INTEGER) {
emissionEstimator = null;
} else {
emissionEstimator = new Multinomial.Estimator[size];
for (i = 0; i < size; i++) {
emissionEstimator[i] = (Multinomial.Estimator) in.readObject();
}
}
size = in.readInt();
if (size == NULL_INTEGER) {
emissionMultinomial = null;
} else {
emissionMultinomial = new Multinomial[size];
for (i = 0; i < size; i++) {
emissionMultinomial[i] = (Multinomial) in.readObject();
}
}
size = in.readInt();
if (size == NULL_INTEGER) {
transitionEstimator = null;
} else {
transitionEstimator = new Multinomial.Estimator[size];
for (i = 0; i < size; i++) {
transitionEstimator[i] = (Multinomial.Estimator) in
.readObject();
}
}
size = in.readInt();
if (size == NULL_INTEGER) {
transitionMultinomial = null;
} else {
transitionMultinomial = new Multinomial[size];
for (i = 0; i < size; i++) {
transitionMultinomial[i] = (Multinomial) in.readObject();
}
}
}
public static class State extends Transducer.State implements Serializable {
// Parameters indexed by destination state, feature index
String name;
int index;
double initialWeight, finalWeight;
String[] destinationNames;
State[] destinations;
String[] labels;
HMM hmm;
// No arg constructor so serialization works
protected State() {
super();
}
protected State(String name, int index, double initialWeight,
double finalWeight, String[] destinationNames,
String[] labelNames, HMM hmm) {
super();
assert (destinationNames.length == labelNames.length);
this.name = name;
this.index = index;
this.initialWeight = initialWeight;
this.finalWeight = finalWeight;
this.destinationNames = new String[destinationNames.length];
this.destinations = new State[labelNames.length];
this.labels = new String[labelNames.length];
this.hmm = hmm;
for (int i = 0; i < labelNames.length; i++) {
// Make sure this label appears in our output Alphabet
hmm.outputAlphabet.lookupIndex(labelNames[i]);
this.destinationNames[i] = destinationNames[i];
this.labels[i] = labelNames[i];
}
}
public Transducer getTransducer() {
return hmm;
}
public double getFinalWeight() {
return finalWeight;
}
public double getInitialWeight() {
return initialWeight;
}
public void setFinalWeight(double c) {
finalWeight = c;
}
public void setInitialWeight(double c) {
initialWeight = c;
}
public void print() {
System.out.println("State #" + index + " \"" + name + "\"");
System.out.println("initialWeight=" + initialWeight
+ ", finalWeight=" + finalWeight);
System.out.println("#destinations=" + destinations.length);
for (int i = 0; i < destinations.length; i++)
System.out.println("-> " + destinationNames[i]);
}
public State getDestinationState(int index) {
State ret;
if ((ret = destinations[index]) == null) {
ret = destinations[index] = (State) hmm.name2state
.get(destinationNames[index]);
assert (ret != null) : index;
}
return ret;
}
public Transducer.TransitionIterator transitionIterator(
Sequence inputSequence, int inputPosition,
Sequence outputSequence, int outputPosition) {
if (inputPosition < 0 || outputPosition < 0)
throw new UnsupportedOperationException(
"Epsilon transitions not implemented.");
if (inputSequence == null)
throw new UnsupportedOperationException(
"HMMs are generative models; but this is not yet implemented.");
if (!(inputSequence instanceof FeatureSequence))
throw new UnsupportedOperationException(
"HMMs currently expect Instances to have FeatureSequence data");
return new TransitionIterator(this,
(FeatureSequence) inputSequence, inputPosition,
(outputSequence == null ? null : (String) outputSequence
.get(outputPosition)), hmm);
}
public String getName() {
return name;
}
public int getIndex() {
return index;
}
public void incrementInitialCount(double count) {
}
public void incrementFinalCount(double count) {
}
// Serialization
// For class State
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject(ObjectOutputStream out) throws IOException {
int i, size;
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeObject(name);
out.writeInt(index);
size = (destinationNames == null) ? NULL_INTEGER
: destinationNames.length;
out.writeInt(size);
if (size != NULL_INTEGER) {
for (i = 0; i < size; i++) {
out.writeObject(destinationNames[i]);
}
}
size = (destinations == null) ? NULL_INTEGER : destinations.length;
out.writeInt(size);
if (size != NULL_INTEGER) {
for (i = 0; i < size; i++) {
out.writeObject(destinations[i]);
}
}
size = (labels == null) ? NULL_INTEGER : labels.length;
out.writeInt(size);
if (size != NULL_INTEGER) {
for (i = 0; i < size; i++)
out.writeObject(labels[i]);
}
out.writeObject(hmm);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int size, i;
int version = in.readInt();
name = (String) in.readObject();
index = in.readInt();
size = in.readInt();
if (size != NULL_INTEGER) {
destinationNames = new String[size];
for (i = 0; i < size; i++) {
destinationNames[i] = (String) in.readObject();
}
} else {
destinationNames = null;
}
size = in.readInt();
if (size != NULL_INTEGER) {
destinations = new State[size];
for (i = 0; i < size; i++) {
destinations[i] = (State) in.readObject();
}
} else {
destinations = null;
}
size = in.readInt();
if (size != NULL_INTEGER) {
labels = new String[size];
for (i = 0; i < size; i++)
labels[i] = (String) in.readObject();
// inputAlphabet = (Alphabet) in.readObject();
// outputAlphabet = (Alphabet) in.readObject();
} else {
labels = null;
}
hmm = (HMM) in.readObject();
}
}
protected static class TransitionIterator extends
Transducer.TransitionIterator implements Serializable {
State source;
int index, nextIndex, inputPos;
double[] weights; // -logProb
// Eventually change this because we will have a more space-efficient
// FeatureVectorSequence that cannot break out each FeatureVector
FeatureSequence inputSequence;
Integer inputFeature;
HMM hmm;
public TransitionIterator(State source, FeatureSequence inputSeq,
int inputPosition, String output, HMM hmm) {
this.source = source;
this.hmm = hmm;
this.inputSequence = inputSeq;
this.inputFeature = new Integer(inputSequence
.getIndexAtPosition(inputPosition));
this.inputPos = inputPosition;
this.weights = new double[source.destinations.length];
for (int transIndex = 0; transIndex < source.destinations.length; transIndex++) {
if (output == null || output.equals(source.labels[transIndex])) {
weights[transIndex] = 0;
// xxx should this be emission of the _next_ observation?
// double logEmissionProb =
// hmm.emissionMultinomial[source.getIndex()].logProbability
// (inputSeq.get (inputPosition));
int destIndex = source.getDestinationState(transIndex).getIndex();
double logEmissionProb = hmm.emissionMultinomial[destIndex]
.logProbability(inputSeq.get(inputPosition));
double logTransitionProb = hmm.transitionMultinomial[source
.getIndex()]
.logProbability(source.destinationNames[transIndex]);
// weight = logProbability
weights[transIndex] = (logEmissionProb + logTransitionProb);
assert (!Double.isNaN(weights[transIndex]));
} else
weights[transIndex] = IMPOSSIBLE_WEIGHT;
}
nextIndex = 0;
while (nextIndex < source.destinations.length
&& weights[nextIndex] == IMPOSSIBLE_WEIGHT)
nextIndex++;
}
public boolean hasNext() {
return nextIndex < source.destinations.length;
}
public Transducer.State nextState() {
assert (nextIndex < source.destinations.length);
index = nextIndex;
nextIndex++;
while (nextIndex < source.destinations.length
&& weights[nextIndex] == IMPOSSIBLE_WEIGHT)
nextIndex++;
return source.getDestinationState(index);
}
public int getIndex() {
return index;
}
/*
* Returns an Integer object containing the feature index of the symbol
* at this position in the input sequence.
*/
public Object getInput() {
return inputFeature;
}
// public int getInputPosition () { return inputPos; }
public Object getOutput() {
return source.labels[index];
}
public double getWeight() {
return weights[index];
}
public Transducer.State getSourceState() {
return source;
}
public Transducer.State getDestinationState() {
return source.getDestinationState(index);
}
// Serialization
// TransitionIterator
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeObject(source);
out.writeInt(index);
out.writeInt(nextIndex);
out.writeInt(inputPos);
if (weights != null) {
out.writeInt(weights.length);
for (int i = 0; i < weights.length; i++) {
out.writeDouble(weights[i]);
}
} else {
out.writeInt(NULL_INTEGER);
}
out.writeObject(inputSequence);
out.writeObject(inputFeature);
out.writeObject(hmm);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int version = in.readInt();
source = (State) in.readObject();
index = in.readInt();
nextIndex = in.readInt();
inputPos = in.readInt();
int size = in.readInt();
if (size == NULL_INTEGER) {
weights = null;
} else {
weights = new double[size];
for (int i = 0; i < size; i++) {
weights[i] = in.readDouble();
}
}
inputSequence = (FeatureSequence) in.readObject();
inputFeature = (Integer) in.readObject();
hmm = (HMM) in.readObject();
}
}
}
| 39,464 | 32.191758 | 89 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/CRFTrainerByEntropyRegularization.java
|
package cc.mallet.fst.semi_supervised;
import java.util.logging.Logger;
import cc.mallet.fst.CRF;
import cc.mallet.fst.CRFOptimizableByGradientValues;
import cc.mallet.fst.CRFOptimizableByLabelLikelihood;
import cc.mallet.fst.Transducer;
import cc.mallet.fst.TransducerTrainer;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.Optimizer;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletLogger;
/**
* A CRF trainer that maximizes the log-likelihood plus
* a weighted entropy regularization term on unlabeled
* data. Intuitively, it aims to make the CRF's predictions
* on unlabeled data more confident.
*
* References:
* Feng Jiao, Shaojun Wang, Chi-Hoon Lee, Russell Greiner, Dale Schuurmans
* "Semi-supervised conditional random fields for improved sequence segmentation and labeling"
* ACL 2006
*
* Gideon Mann, Andrew McCallum
* "Efficient Computation of Entropy Gradient for Semi-Supervised Conditional Random Fields"
* HLT/NAACL 2007
*
* @author Gregory Druck
*/
public class CRFTrainerByEntropyRegularization extends TransducerTrainer implements TransducerTrainer.ByOptimization {
private static Logger logger = MalletLogger.getLogger(CRFTrainerByEntropyRegularization.class.getName());
private static final int DEFAULT_NUM_RESETS = 1;
private static final double DEFAULT_ER_SCALING_FACTOR = 1;
private static final double DEFAULT_GAUSSIAN_PRIOR_VARIANCE = 1;
private boolean converged;
private int iteration;
private double entRegScalingFactor;
private double gaussianPriorVariance;
private CRF crf;
private LimitedMemoryBFGS bfgs;
public CRFTrainerByEntropyRegularization(CRF crf) {
this.crf = crf;
this.iteration = 0;
this.entRegScalingFactor = DEFAULT_ER_SCALING_FACTOR;
this.gaussianPriorVariance = DEFAULT_GAUSSIAN_PRIOR_VARIANCE;
}
public void setGaussianPriorVariance(double variance) {
this.gaussianPriorVariance = variance;
}
/**
* Sets the scaling factor for the entropy regularization term.
* In [Jiao et al. 06], this is gamma.
*
* @param gamma
*/
public void setEntropyWeight(double gamma) {
this.entRegScalingFactor = gamma;
}
@Override
public int getIteration() {
return this.iteration;
}
@Override
public Transducer getTransducer() {
return this.crf;
}
@Override
public boolean isFinishedTraining() {
return this.converged;
}
/*
* This is not used because we require both labeled and unlabeled data.
*/
public boolean train(InstanceList trainingSet, int numIterations) {
throw new RuntimeException("Use train(InstanceList labeled, InstanceList unlabeled, int numIterations) instead.");
}
/**
* Performs CRF training with label likelihood and entropy regularization.
* The CRF is first trained with label likelihood only. This parameter
* setting is used as a starting point for the combined optimization.
*
* @param labeled Labeled data, only used for label likelihood term.
* @param unlabeled Unlabeled data, only used for entropy regularization term.
* @param numIterations Number of iterations.
* @return True if training has converged.
*/
public boolean train(InstanceList labeled, InstanceList unlabeled, int numIterations) {
if (iteration == 0) {
// train with log-likelihood only first
CRFOptimizableByLabelLikelihood likelihood =
new CRFOptimizableByLabelLikelihood(crf, labeled);
likelihood.setGaussianPriorVariance(gaussianPriorVariance);
this.bfgs = new LimitedMemoryBFGS(likelihood);
logger.info ("CRF about to train with "+numIterations+" iterations");
for (int i = 0; i < numIterations; i++) {
try {
converged = bfgs.optimize(1);
iteration++;
logger.info ("CRF finished one iteration of maximizer, i="+i);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
} catch (Exception e) {
e.printStackTrace();
logger.info("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
iteration = 0;
}
// train with log-likelihood + entropy regularization
CRFOptimizableByLabelLikelihood likelihood = new CRFOptimizableByLabelLikelihood(crf, labeled);
likelihood.setGaussianPriorVariance(gaussianPriorVariance);
CRFOptimizableByEntropyRegularization regularization = new CRFOptimizableByEntropyRegularization(crf, unlabeled);
regularization.setScalingFactor(this.entRegScalingFactor);
CRFOptimizableByGradientValues regLikelihood = new CRFOptimizableByGradientValues(crf,
new Optimizable.ByGradientValue[] { likelihood, regularization} );
this.bfgs = new LimitedMemoryBFGS(regLikelihood);
converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
// sometimes resetting the optimizer helps to find
// a better parameter setting
for (int reset = 0; reset < DEFAULT_NUM_RESETS + 1; reset++) {
for (int i = 0; i < numIterations; i++) {
try {
converged = bfgs.optimize (1);
iteration++;
logger.info ("CRF finished one iteration of maximizer, i="+i);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
} catch (Exception e) {
e.printStackTrace();
logger.info("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+i);
break;
}
}
this.bfgs.reset();
}
return converged;
}
public Optimizer getOptimizer() {
return bfgs;
}
}
| 5,791 | 32.097143 | 118 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GEL2Criteria.java
|
package cc.mallet.fst.semi_supervised;
import java.util.Map;
public class GEL2Criteria extends GECriteria {
public GEL2Criteria(int numStates, StateLabelMap stateLabelMap,
Map<Integer, GECriterion> constraints) {
super(numStates, stateLabelMap, constraints);
}
/**
* Computes sum of GE constraint values. <p>
*
* <b>Note:</b> Label expectations are <b>not</b> re-computed here. If
* desired, then make a call to <tt>calculateLabelExp</tt>.
*/
public double getGEValue() {
double value = 0.0;
for (int fi : constraints.keySet()) {
GECriterion constraint = constraints.get(fi);
if ( constraint.getCount() > 0.0) {
double[] target = constraint.getTarget();
double[] expectation = constraint.getExpectation();
// value due to current constraint
double featureValue = 0.0;
for (int labelIndex = 0; labelIndex < stateLabelMap.getNumLabels(); ++labelIndex) {
featureValue -= Math.pow(target[labelIndex] - expectation[labelIndex],2);
}
assert(!Double.isNaN(featureValue) &&
!Double.isInfinite(featureValue));
value += featureValue * constraint.getWeight();
}
}
return value;
}
}
| 1,225 | 30.435897 | 91 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/CRFOptimizableByGECriteria.java
|
package cc.mallet.fst.semi_supervised;
import java.io.Serializable;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.Callable;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.logging.Logger;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.fst.CRF;
import cc.mallet.fst.SumLattice;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.fst.Transducer;
import cc.mallet.optimize.Optimizable;
import cc.mallet.util.MalletLogger;
/**
* GE criteria for training a linear chain CRF.
*
* @author Gaurav Chandalia
* @author Gregory Druck
*/
public class CRFOptimizableByGECriteria implements Optimizable.ByGradientValue,
Serializable {
private static double DEFAULT_GPV = Double.POSITIVE_INFINITY;
private static final long serialVersionUID = 1;
private static Logger logger =
MalletLogger.getLogger(CRFOptimizableByGECriteria.class.getName());
// unlabeled data
protected InstanceList data;
private int numThreads;
private int cachedValueWeightsStamp = -1;
private int cachedGradientWeightsStamp = -1;
// the model
protected CRF crf;
// GE criteria
protected GECriteria geCriteria;
// gradient of GE criteria
protected CRF.Factors gradient;
protected Transducer.Incrementor incrementor;
// GE value
protected double cachedValue;
// GE gradient (double[] form)
protected double[] cachedGradient;
protected double priorVariance;
// thread handler used to create lattices in new threads and update the
// gradient
protected transient LatticeCreationExecutor geLatticeExecutor;
protected transient ThreadPoolExecutor sumLatticeExecutor;
/**
* Initializes the structures.
*
* @param geCriteria GE criteria.
* @param crf Model.
* @param ilist Data used for training.
*/
public CRFOptimizableByGECriteria(GECriteria geCriteria,
CRF crf, InstanceList ilist,
int numThreads) {
this.data = ilist;
this.crf = crf;
this.geCriteria = geCriteria;
// initialize
gradient = new CRF.Factors(crf);
incrementor = gradient.new Incrementor();
cachedValue = 0.0;
cachedGradient = new double[crf.getParameters().getNumFactors()];
priorVariance = DEFAULT_GPV;
geCriteria.setConstraintBits(data, 0, data.size());
this.numThreads = numThreads;
geLatticeExecutor = new LatticeCreationExecutor(numThreads);
sumLatticeExecutor = (ThreadPoolExecutor)Executors.newFixedThreadPool(numThreads);
}
public void shutdown() {
geLatticeExecutor.shutdown();
sumLatticeExecutor.shutdown();
}
public void setGaussianPriorVariance(double priorVariance) {
this.priorVariance = priorVariance;
}
public GECriteria getGECriteria() {
return geCriteria;
}
/**
* Initializes the gradient to zero and re-computes expectations
* for a new iteration. <p>
*
* Also creates the executor to compute the gradient (if not done yet).
*/
public void initialize(Map<Integer, SumLattice> lattices) {
assert(gradient.structureMatches(crf.getParameters()));
gradient.zero();
geLatticeExecutor.initialize();
// compute the expected prior distribution over labels for all feature-label
// pairs (constraints)
geCriteria.calculateExpectations(data, crf, lattices);
}
/**
* Fills gradient from a single instance. <p>
*/
public void computeGradient(FeatureVectorSequence input,
double[][] gammas, double[][][] xis) {
new GELattice(input, gammas, xis, crf, incrementor, geCriteria, false);
}
/**
* Resets, computes and fills gradient from all instances. <p>
*
* Analogous to <tt>CRFOptimizableByLabelLikelihood.getExpectationValue<tt>.
*/
public void computeGradient(Map<Integer, SumLattice> lattices) {
this.initialize(lattices);
logger.info("Updating gradient...");
long time = System.currentTimeMillis();
geLatticeExecutor.computeGradient(lattices);
time = (System.currentTimeMillis() - time) / 1000;
logger.info(String.valueOf(time) + " secs.");
}
public double getValue() {
if (crf.getWeightsValueChangeStamp() != cachedValueWeightsStamp) {
// The cached value is not up to date; it was calculated for a different set of CRF weights.
cachedValueWeightsStamp = crf.getWeightsValueChangeStamp();
// compute lattices in multiple threads
ArrayList<Callable<Void>> handlers = new ArrayList<Callable<Void>>();
// number of instances per subset
int numInstancesSubset = data.size() / numThreads;
// range of each subset
int start = -1, end = -1;
for (int i = 0; i < numThreads; ++i) {
// get the indices of subset
if (i == 0) {
start = 0;
end = start + numInstancesSubset;
} else if (i == numThreads - 1) {
start = end;
end = data.size();
} else {
start = end;
end = start + numInstancesSubset;
}
SumLatticeHandler handler = new SumLatticeHandler(start, end);
handlers.add(handler);
}
// run tasks
try {
sumLatticeExecutor.invokeAll(handlers);
} catch (InterruptedException e) {
e.printStackTrace();
}
// combine lattices from multiple threads
HashMap<Integer,SumLattice> lattices = new HashMap<Integer,SumLattice>();
for (Callable<Void> handler : handlers) {
lattices.putAll(((SumLatticeHandler)handler).getLattices());
}
computeGradient(lattices);
cachedValue = geCriteria.getGEValue();
if (priorVariance != Double.POSITIVE_INFINITY) {
cachedValue += crf.getParameters().gaussianPrior(priorVariance);
}
assert(!Double.isNaN(cachedValue) && !Double.isInfinite(cachedValue))
: "Likelihood due to GE criteria is NaN/Infinite";
logger.info("getValue() (GE) = " + cachedValue);
}
return cachedValue;
}
public void getValueGradient(double[] buffer) {
if (cachedGradientWeightsStamp != crf.getWeightsValueChangeStamp()) {
cachedGradientWeightsStamp = crf.getWeightsValueChangeStamp();
getValue();
gradient.assertNotNaNOrInfinite();
// fill up gradient
cachedGradient = new double[cachedGradient.length];
if (priorVariance != Double.POSITIVE_INFINITY) {
gradient.plusEqualsGaussianPriorGradient(crf.getParameters(), -priorVariance);
}
gradient.getParameters(cachedGradient);
MatrixOps.timesEquals(cachedGradient, -1.0);
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
public void printGradientAbsNorm() {
logger.info("absNorm(GE Gradient): " + gradient.getParametersAbsNorm());
}
// some get/set methods that have to be implemented
public int getNumParameters() {
return crf.getParameters().getNumFactors();
}
public void getParameters(double[] buffer) {
crf.getParameters().getParameters(buffer);
}
public void setParameters(double[] buffer) {
crf.getParameters().setParameters(buffer);
crf.weightsValueChanged();
}
public double getParameter(int index) {
return crf.getParameters().getParameter(index);
}
public void setParameter(int index, double value) {
crf.getParameters().setParameter(index, value);
crf.weightsValueChanged();
}
/**
* Computes GE gradient. <p>
*
* Uses multi-threading, each thread does computations for a subset of the
* data. To reduce sharing, each thread has its own CRF.Factors structure for
* gradient. The final gradient is obtained by combining from all
* subset gradients.
*/
private class LatticeCreationExecutor {
// key: instance index, value: Lattice
private Map<Integer, SumLattice> lattices;
// number of data subsets == number of threads
private int numSubsets;
// gradient obtained from subsets of data
private List<FactorsIncrementorPair> mtGradient;
// thread pool to create lattices and update gradient
private ThreadPoolExecutor executor;
// milliseconds
public static final int SLEEP_TIME = 1000;
// key: unique integer identifying a thread running forward-backward, the
// respective thread sets the bit when its computation is over, range: (0,
// numSubsets - 1)
private BitSet threadIds;
/**
* Initializes the executor with specified number of threads.
*/
public LatticeCreationExecutor(int numThreads) {
lattices = null;
numSubsets = numThreads;
mtGradient = new ArrayList<FactorsIncrementorPair>(numSubsets);
// initialize from the main gradients object
for (int i = 0; i < numSubsets; ++i) {
mtGradient.add(new FactorsIncrementorPair(gradient));
}
logger.info("Creating " + numSubsets +
" threads for updating gradient...");
executor =
(ThreadPoolExecutor) Executors.newFixedThreadPool(numSubsets);
threadIds = new BitSet(numSubsets);
}
/**
* Initializes each thread's gradient to zero.
*/
public void initialize() {
for (int i = 0; i < mtGradient.size(); ++i) {
FactorsIncrementorPair exp = mtGradient.get(i);
exp.subsetFactors.zero();
}
}
/**
* Computes lattices and fills gradient from all instances. <p>
*/
public void computeGradient(Map<Integer, SumLattice> lattices) {
this.lattices = lattices;
threadIds.clear();
// number of instances per subset
int numInstancesSubset = data.size() / numSubsets;
// range of each subset
int start = -1, end = -1;
for (int i = 0; i < numSubsets; ++i) {
// get the indices of subset
if (i == 0) {
start = 0;
end = start + numInstancesSubset;
} else if (i == numSubsets - 1) {
start = end;
end = data.size();
} else {
start = end;
end = start + numInstancesSubset;
}
executor.execute(new LatticeHandler(i, start, end));
}
// wait till all threads finish
int numSetBits = 0;
while (numSetBits != numSubsets) {
synchronized(this) {
numSetBits = threadIds.cardinality();
}
try {
Thread.sleep(SLEEP_TIME);
} catch (InterruptedException ie) {
ie.printStackTrace();
System.exit(1);
}
}
// update main gradient
this.updateGradient();
lattices = null;
}
/**
* Aggregates all subset gradients into the main gradient object.
*/
private void updateGradient() {
for (int i = 0; i < mtGradient.size(); ++i) {
CRF.Factors subsetGradient = mtGradient.get(i).subsetFactors;
gradient.plusEquals(subsetGradient, 1.0);
}
}
public void shutdown() {
executor.shutdown();
}
/**
* Runs forward-backward for a subset of data in a new thread, uses
* subset-specific incrementor.
*/
private class LatticeHandler implements Runnable {
// index to determine which incrementor to use
private int index;
// start, end indices of subset of data
private int start;
private int end;
/**
* Initializes the indices.
*/
public LatticeHandler(int index, int startIndex, int endIndex) {
this.index = index;
this.start = startIndex;
this.end = endIndex;
}
/**
* Creates lattice, updates gradient.
*/
public void run() {
Transducer.Incrementor incrementor =
mtGradient.get(index).subsetIncrementor;
BitSet constraintBits = geCriteria.getConstraintBits();
for (int i = start; i < end; ++i) {
// skip if the instance doesn't have any constraints
if (!constraintBits.get(i)) {
continue;
}
FeatureVectorSequence fvs =
(FeatureVectorSequence) data.get(i).getData();
SumLattice lattice = lattices.get(i);
assert(lattice != null)
: "Lattice is null:: " + i + ", size: " + lattices.size();
new GELattice(
fvs, lattice.getGammas(), lattice.getXis(), crf, incrementor,
geCriteria, false);
}
synchronized(LatticeCreationExecutor.this) {
threadIds.set(index);
}
}
}
}
/**
* Runs forward-backward for a subset of data in a new thread, uses
* subset-specific incrementor.
*/
private class SumLatticeHandler implements Callable<Void> {
// start, end indices of subset of data
private int start;
private int end;
private HashMap<Integer,SumLatticeDefault> lattices;
/**
* Initializes the indices.
*/
public SumLatticeHandler(int startIndex, int endIndex) {
this.start = startIndex;
this.end = endIndex;
this.lattices = new HashMap<Integer,SumLatticeDefault>();
}
public HashMap<Integer,SumLatticeDefault> getLattices() {
return lattices;
}
public Void call() throws Exception {
BitSet constraintBits = geCriteria.getConstraintBits();
for (int ii = start; ii < end; ii++) {
if (!constraintBits.get(ii)) {
continue;
}
FeatureVectorSequence fvs = (FeatureVectorSequence)data.get(ii).getData();
lattices.put(ii, new SumLatticeDefault(crf,fvs,true));
}
return null;
}
}
private class FactorsIncrementorPair {
// model's Factors from a subset of data
public CRF.Factors subsetFactors;
public Transducer.Incrementor subsetIncrementor;
/**
* Initialize Factors using the structure of main Factors object.
*/
public FactorsIncrementorPair(CRF.Factors other) {
subsetFactors = new CRF.Factors(other);
subsetIncrementor = subsetFactors.new Incrementor();
}
}
}
| 14,179 | 29.560345 | 96 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GEKLCriteria.java
|
package cc.mallet.fst.semi_supervised;
import java.util.Map;
public class GEKLCriteria extends GECriteria {
public GEKLCriteria(int numStates, StateLabelMap stateLabelMap,
Map<Integer, GECriterion> constraints) {
super(numStates, stateLabelMap, constraints);
}
/**
* Computes sum of GE constraint values. <p>
*
* <b>Note:</b> Label expectations are <b>not</b> re-computed here. If
* desired, then make a call to <tt>calculateLabelExp</tt>.
*/
public double getGEValue() {
double value = 0.0;
for (int fi : constraints.keySet()) {
GECriterion constraint = constraints.get(fi);
if ( constraint.getCount() > 0.0) {
double[] target = constraint.getTarget();
double[] expectation = constraint.getExpectation();
// value due to current constraint
double featureValue = 0.0;
for (int labelIndex = 0; labelIndex < stateLabelMap.getNumLabels();
++labelIndex) {
if (expectation[labelIndex] > 0.0 && target[labelIndex] > 0.0) {
// p*log(q) - p*log(p)
featureValue +=
target[labelIndex] * Math.log(expectation[labelIndex]) -
target[labelIndex] * Math.log(target[labelIndex]);
}
}
assert(!Double.isNaN(featureValue) &&
!Double.isInfinite(featureValue));
value += featureValue * constraint.getWeight();
}
}
return value;
}
}
| 1,439 | 30.304348 | 75 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GELattice.java
|
package cc.mallet.fst.semi_supervised;
import java.util.BitSet;
import java.util.Iterator;
import cc.mallet.fst.Transducer;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.MatrixOps;
/**
* Runs the dynamic programming algorithm of [Mann and McCallum 08] for
* computing the gradient of a Generalized Expectation constraint that
* considers a single label of a linear chain CRF.
*
* See:
* "Generalized Expectation Criteria for Semi-Supervised Learning of Conditional Random Fields"
* Gideon Mann and Andrew McCallum
* ACL 2008
*
* @author Gregory Druck
* @author Gaurav Chandalia
*/
public class GELattice {
// input length + 1
protected int latticeLength;
// the model
protected Transducer transducer;
// number of states in the FST
protected int numStates;
// dynamic programming lattice
protected LatticeNode[][] lattice;
// cache for the gradient on each
protected double[][][] transGradientCache;
public GELattice(
FeatureVectorSequence fvs, double[][] gammas, double[][][] xis,
Transducer transducer, Transducer.Incrementor incrementor,
GECriteria geCriteria, boolean check) {
assert(incrementor != null);
latticeLength = fvs.size() + 1;
this.transducer = transducer;
numStates = transducer.numStates();
// lattice
lattice = new LatticeNode[latticeLength][numStates];
for (int ip = 0; ip < latticeLength; ++ip) {
for (int a = 0; a < numStates; ++a) {
lattice[ip][a] = new LatticeNode();
}
}
transGradientCache = new double[latticeLength][numStates][numStates];
StateLabelMap stateLabelMap = geCriteria.getStateLabelMap();
int numLabels = stateLabelMap.getNumLabels();
BitSet constraintBits = geCriteria.getConstraintBitsForInstance(fvs);
Iterator<Integer> iter = geCriteria.getFeatureIndexIterator();
while (iter.hasNext()) {
int fi = iter.next();
GECriterion constraint = geCriteria.getConstraint(fi);
// skip if the instance doesn't have this constraint
if (!constraintBits.get(fi)) {
continue;
}
// weight of this constraint
double constraintGamma = constraint.getWeight();
// model expectations over labels for feature
double[] expectation = constraint.getExpectation();
if (!MatrixOps.isNonZero(expectation)) {
// if label expectations are zero, then this instance won't contribute
// anything to the weights
return;
}
// model expectation over labels for this instance
double[] labelExpInstance =
geCriteria.getExpectationForInstance(fi, fvs, gammas);
// target distribution over labels for feature
double[] target = constraint.getTarget();
for (int li = 0; li < numLabels; ++li) {
// only compute the lattice
// if the target expectation is greater than 0
if (constraint instanceof GEL2Criterion ||
(expectation[li] > 0.0 && target[li] > 0.0)) {
// create one lattice for this feature-label constraint,
// run dynamic programming
this.initLattice();
this.runForward(stateLabelMap, gammas, xis, li, fi, fvs);
this.runBackward(stateLabelMap, gammas, xis, li, fi, fvs);
// used to weight the contribution of this feature-label pair to the gradient
double targetModelExpRatio = constraint.getGradientConstant(li);
this.updateGradientCache(fi, constraintGamma, gammas, xis, fvs,
targetModelExpRatio, labelExpInstance[li],
incrementor);
if (check) {
// check if lattice computations are correct
this.check(gammas, xis, li, fi, fvs);
}
}
}
}
// update gradient using cache
updateGradient(fvs,incrementor);
}
/**
* Initialize the lattice (for a particular feature-label constraint), lattice
* should have already been created.
*/
private final void initLattice() {
// ip: input position, si: state index
for (int ip = 0; ip < latticeLength; ++ip) {
for (int a = 0; a < numStates; ++a) {
LatticeNode node = lattice[ip][a];
for (int b = 0; b < numStates; ++b) {
node.alpha[b] = Transducer.IMPOSSIBLE_WEIGHT;
node.beta[b] = Transducer.IMPOSSIBLE_WEIGHT;
}
}
}
}
private void runForward(StateLabelMap stateLabelMap, double[][] gammas,
double[][][] xis, int li, int fi, FeatureVectorSequence fvs) {
double featureValue;
for (int ip = 1; ip < latticeLength; ++ip) {
featureValue = logValueOfIndicatorFeature(fvs, fi, ip-1);
for (int prevState = 0; prevState < numStates; ++prevState) {
// calculate only once: \sum_y_{i-1} w_a(y_{i-1},y_i)
double nuAlpha = Transducer.IMPOSSIBLE_WEIGHT;
for (int prevPrevState = 0; prevPrevState < numStates; ++prevPrevState) {
nuAlpha = Transducer.sumLogProb(nuAlpha, lattice[ip - 1][prevPrevState].alpha[prevState]);
}
assert (!Double.isNaN(nuAlpha));
LatticeNode node = lattice[ip][prevState];
double[] xi = xis[ip][prevState];
double gamma = gammas[ip][prevState];
for (int currState = 0; currState < numStates; ++currState) {
node.alpha[currState] = Transducer.IMPOSSIBLE_WEIGHT;
if (stateLabelMap.getLabelIndex(prevState) == li) {
node.alpha[currState] = Transducer.sumLogProb(node.alpha[currState], xi[currState] + featureValue);
}
if (gamma == Transducer.IMPOSSIBLE_WEIGHT) {
node.alpha[currState] = Transducer.IMPOSSIBLE_WEIGHT;
} else {
node.alpha[currState] = Transducer.sumLogProb(node.alpha[currState], nuAlpha
+ xi[currState] - gamma);
}
assert (!Double.isNaN(node.alpha[currState])) : "xi: " + xi[currState] + ", gamma: "
+ gamma + ", log(indicatorFeat): " + featureValue
+ ", nuApha: " + nuAlpha;
}
}
}
}
private void runBackward(StateLabelMap stateLabelMap,
double[][] gammas, double[][][] xis,
int li, int fi, FeatureVectorSequence fvs) {
double featureValue;
for (int ip = latticeLength-2; ip >= 0; --ip) {
featureValue = logValueOfIndicatorFeature(fvs, fi, ip);
for (int currState = 0; currState < numStates; ++currState) {
// calculate only once: \sum_y_{i+1} w_b(y_i,y+i)
double nuBeta = Transducer.IMPOSSIBLE_WEIGHT;
for (int nextState = 0; nextState < numStates; ++nextState){
nuBeta = Transducer.sumLogProb(nuBeta,
lattice[ip+1][currState].beta[nextState]);
}
assert(!Double.isNaN(nuBeta));
double gamma = gammas[ip+1][currState];
for (int prevState = 0; prevState < numStates; ++prevState) {
LatticeNode node = lattice[ip][prevState];
double xi = xis[ip][prevState][currState];
node.beta[currState] = Transducer.IMPOSSIBLE_WEIGHT;
if (stateLabelMap.getLabelIndex(currState) == li) {
node.beta[currState] = Transducer.sumLogProb(node.beta[currState], xi + featureValue);
}
if (gamma == Transducer.IMPOSSIBLE_WEIGHT) {
node.beta[currState] = Transducer.IMPOSSIBLE_WEIGHT;
} else {
node.beta[currState] = Transducer.sumLogProb(node.beta[currState], nuBeta + xi - gamma);
}
assert(!Double.isNaN(node.beta[currState]))
: "xi: " + xi + ", gamma: " + gamma + ", xi: " + xi +
", log(indicatorFeat): " + featureValue;
}
}
}
}
/**
* Caches expectations with respect to a single instance and constraint.
*
* @param priorExpRatio labelPrior / labelExpectations.
* @param labelExpInstance Label expectation value due to this instance.
*/
private void updateGradientCache(double fi, double featureGamma, double[][] gammas,
double[][][] xis, FeatureVectorSequence fvs, double priorExpRatio, double labelExpInstance,
Transducer.Incrementor incrementor) {
for (int ip = 0; ip < latticeLength-1; ++ip) {
for (int prevState = 0; prevState < numStates; ++prevState) {
LatticeNode node = lattice[ip][prevState];
double[] xi = xis[ip][prevState];
for (int currState = 0; currState < numStates; ++currState) {
double covFirstTerm = Math.exp(node.alpha[currState]) + Math.exp(node.beta[currState]);
double transProb = Math.exp(xi[currState]);
double contribution = - priorExpRatio * (covFirstTerm - (transProb * labelExpInstance));
transGradientCache[ip][prevState][currState] += featureGamma * contribution;
}
}
}
}
/**
* Updates the expectations due to a single instance and all constraints.
* This saves re-computing the dot product multiple times in
* TransitionIterator.
*
* @param fvs FeatureVectorSequence
* @param labelExpInstance Label expectation value due to this instance.
*/
private void updateGradient(FeatureVectorSequence fvs, Transducer.Incrementor incrementor) {
for (int ip = 0; ip < latticeLength-1; ++ip) {
for (int currState = 0; currState < numStates; ++currState) {
Transducer.State state = transducer.getState(currState);
Transducer.TransitionIterator iter = state.transitionIterator(fvs, ip, null, ip);
while (iter.hasNext()) {
int nextState = iter.next().getIndex();
incrementor.incrementTransition(iter, transGradientCache[ip][currState][nextState]);
}
}
}
}
/**
* Returns indicator value of feature at specified position in logspace. <p>
*
* Returns: <tt>0.0</tt> for <tt>log(1)</tt>,
* <tt>Transducer.IMPOSSIBLE_WEIGHT</tt> for <tt>log(0)</tt>.
*/
public final static double logValueOfIndicatorFeature(
FeatureVectorSequence fvs, int fi, int ip) {
if ((ip < 0) || (ip >= fvs.size())) {
return Transducer.IMPOSSIBLE_WEIGHT;
} else if (fvs.getFeatureVector(ip).value(fi) > 0.0) {
// log(1)
return 0.0;
} else {
// log(0)
return Transducer.IMPOSSIBLE_WEIGHT;
}
}
/**
* Verifies the correctness of the lattice computations.
*/
public void check(double[][] gammas, double[][][] xis, int li, int fi, FeatureVectorSequence fvs) {
// sum of marginal probabilities
double marginalProb = 0.0;
for (int ip = 0; ip < latticeLength-1; ++ip) {
double prob = Math.exp(gammas[ip+1][li] + logValueOfIndicatorFeature(fvs, fi, ip));
marginalProb += prob;
}
double altMarginalProb = 0.0;
for (int ip = 0; ip < latticeLength-1; ++ip) {
double joint = 0.0;
for (int s1 = 0; s1 < numStates; ++s1) {
LatticeNode node = lattice[ip][s1];
for (int s2 = 0; s2 < numStates; ++s2) {
joint += Math.exp(node.alpha[s2]) + Math.exp(node.beta[s2]);
}
}
// should be equal to marginal prob.
assert(marginalProb - joint < 1e-6);
altMarginalProb += joint;
}
altMarginalProb = altMarginalProb / (latticeLength - 1);
// should be equal to marginal prob.
assert(marginalProb - altMarginalProb < 1e-6);
}
/**
* Contains forward-backward vectors correspoding to an input position and a
* state index.
*/
protected class LatticeNode {
// ip -> input position, a vector of doubles since for each node we need to
// keep track of the alpha, beta values of state@(ip+1)
protected double[] alpha;
protected double[] beta;
public LatticeNode() {
alpha = new double[numStates];
beta = new double[numStates];
for (int si = 0; si < numStates; ++si) {
alpha[si] = Transducer.IMPOSSIBLE_WEIGHT;
beta[si] = Transducer.IMPOSSIBLE_WEIGHT;
}
}
}
}
| 11,971 | 36.065015 | 111 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/EntropyLattice.java
|
package cc.mallet.fst.semi_supervised;
import cc.mallet.fst.Transducer;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.util.Maths;
/**
* Runs subsequence constrained forward-backward to compute the entropy of label
* sequences. <p>
*
* Reference:
* Gideon Mann, Andrew McCallum
* "Efficient Computation of Entropy Gradient for Semi-Supervised Conditional Random Fields"
* HLT/NAACL 2007
*
* @author Gideon Mann
* @author Gaurav Chandalia
* @author Gregory Druck
*/
public class EntropyLattice {
// input_sequence_size + 1
protected int latticeLength;
// input_sequence_size
protected int inputLength;
// the model
protected Transducer transducer;
// number of states in the lattice (or the model's finite state machine)
protected int numStates;
// ip: input position, each node has a forward and backward factor used in the
// forward-backward algorithm, indexed by ip, state@ip (state index / si)
protected LatticeNode[][] nodes;
// subsequence constrained (forward) entropy
protected double entropy;
/**
* Runs constrained forward-backward. <p>
*
* If <tt>incrementor</tt> is null then do not update expectations due to
* these computations. <p>
*
* The contribution of entropy to the expectations is multiplies by the
* scaling factor.
*/
public EntropyLattice(FeatureVectorSequence fvs, double[][] gammas,
double[][][] xis, Transducer transducer,
Transducer.Incrementor incrementor,
double scalingFactor) {
inputLength = fvs.size();
latticeLength = inputLength + 1;
this.transducer = transducer;
numStates = transducer.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// run forward-backward and compute the entropy
entropy = this.forwardLattice(gammas, xis);
double backwardEntropy = this.backwardLattice(gammas, xis);
assert(Maths.almostEquals(entropy, backwardEntropy)) : entropy + " " + backwardEntropy;
if (incrementor != null) {
// add the entropy to expectations
this.updateCounts(fvs, gammas, xis, scalingFactor, incrementor);
}
}
public double getEntropy() {
return entropy;
}
/**
* Computes the forward entropies (H^alpha).
*/
public double forwardLattice(double[][] gammas, double[][][] xis) {
// initialize entropy of start states to 0
for (int a = 0; a < numStates; ++a) {
this.getLatticeNode(0, a).alpha = 0;
}
for (int ip = 1; ip < latticeLength; ++ip) {
for (int a = 0; a < numStates; ++a) {
// position ip-1 in input sequence, state a
LatticeNode node = this.getLatticeNode(ip, a);
double gamma = gammas[ip][a];
if (gamma > Transducer.IMPOSSIBLE_WEIGHT) {
for (int b = 0; b < numStates; ++b) {
// position ip in input sequence, state a, coming from state b
double xi = xis[ip-1][b][a];
if (xi > Transducer.IMPOSSIBLE_WEIGHT) {
// p(y_{ip-1}=b|y_{ip}=a)
double condProb = Math.exp(xi) / Math.exp(gamma);
node.alpha += condProb * ((xi - gamma) +
this.getLatticeNode(ip-1, b).alpha);
}
}
}
}
}
double entropy = 0.0;
for (int a = 0; a < numStates; ++a) {
double gamma = gammas[inputLength][a];
double gammaProb = Math.exp(gamma);
if (gamma > Transducer.IMPOSSIBLE_WEIGHT) {
entropy += gammaProb * gamma;
entropy += gammaProb * this.getLatticeNode(inputLength, a).alpha;
}
}
return entropy;
}
/**
* Computes the backward entropies (H^beta).
*/
public double backwardLattice(double[][] gammas, double[][][] xis) {
// initialize entropy of end states to 0
for (int a = 0; a < numStates; ++a) {
this.getLatticeNode(inputLength, a).beta = 0;
}
for (int ip = inputLength; ip >= 0; --ip) {
for (int a = 0; a < numStates; ++a) {
// position ip-1 in input sequence, state a
LatticeNode node = this.getLatticeNode(ip, a);
double gamma = gammas[ip][a];
if (gamma > Transducer.IMPOSSIBLE_WEIGHT) {
for (int b = 0; b < numStates; ++b) {
// position ip in input sequence, state a
double xi = xis[ip][a][b];
if (xi > Transducer.IMPOSSIBLE_WEIGHT) {
// p(y_{ip}=b|y_{ip-1}=a)
double condProb = Math.exp(xi) / Math.exp(gamma);
node.beta += condProb * ((xi - gamma) +
this.getLatticeNode(ip+1, b).beta);
}
}
}
}
}
double entropy = 0.0;
for (int a = 0; a < numStates; ++a) {
double gamma = gammas[0][a];
double gammaProb = Math.exp(gamma);
if (gamma > Transducer.IMPOSSIBLE_WEIGHT) {
entropy += gammaProb * gamma;
entropy += gammaProb * this.getLatticeNode(0, a).beta;
}
}
return entropy;
}
/**
* Updates the expectations due to the entropy. <p>
*/
private void updateCounts(FeatureVectorSequence fvs, double[][] gammas,
double[][][] xis, double scalingFactor, Transducer.Incrementor incrementor) {
for (int ip = 0; ip < inputLength; ++ip) {
for (int a = 0 ; a < numStates; ++a) {
if (nodes[ip][a] == null) {
continue;
}
Transducer.State sourceState = transducer.getState(a);
Transducer.TransitionIterator iter = sourceState.transitionIterator(fvs, ip, null, ip);
while (iter.hasNext()) {
int b = iter.next().getIndex();
double xi = xis[ip][a][b];
if (xi == Transducer.IMPOSSIBLE_WEIGHT) {
continue;
}
double xiProb = Math.exp(xi);
// This is obtained after substituting and re-arranging the equation
// at the end of the third page of the paper into the equation of
// d/d_theta -H(Y|x) at the end of the second page.
// \sum_(y_i,y_{i+1})
// f_k(y_i,y_{i+1},x) p(y_i, y_{i+1}) *
// (log p(y_i,y_{i+1}) + H^a(Y_{1..(i-1)},y_i) +
// H^b(Y_{(i+2)..T}|y_{i+1}))
double constrEntropy = xiProb * (xi + nodes[ip][a].alpha + nodes[ip+1][b].beta);
assert(constrEntropy <= 0) : "Negative entropy should be negative! " + constrEntropy;
// full covariance, (note: it could be positive *or* negative)
double covContribution = constrEntropy - xiProb * entropy;
assert(!Double.isNaN(covContribution))
: "xi: " + xi + ", nodes[" + ip + "][" + a + "].alpha: " +
nodes[ip][a].alpha + ", nodes[" + (ip+1) + "][" + b +
"].beta: " + nodes[ip+1][b].beta;
incrementor.incrementTransition(iter, covContribution * scalingFactor);
}
}
}
}
public LatticeNode getLatticeNode(int ip, int si) {
if (nodes[ip][si] == null) {
nodes[ip][si] = new LatticeNode(ip, transducer.getState(si));
}
return nodes[ip][si];
}
/**
* Contains alpha, beta values at a particular input position and state pair.
*/
public class LatticeNode {
public int ip;
public Transducer.State state;
public double alpha;
public double beta;
LatticeNode(int ip, Transducer.State state) {
this.ip = ip;
this.state = state;
this.alpha = 0.0;
this.beta = 0.0;
}
}
}
| 7,427 | 32.013333 | 95 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/StateLabelMap.java
|
package cc.mallet.fst.semi_supervised;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import cc.mallet.types.Alphabet;
/**
* Maps states in the lattice to labels. <p>
*
* When a custom state machine is constructed while training a CRF, it is
* possible that several states map to the same label. In this case, there will
* be a discrepancy between the number of states used in the lattice and the
* number of output labels (targets). Use this mapping if such an FST is used in
* training a CRF model. <p>
*
* If the number of states in the lattice is expected to be equal to the number
* of output labels, then set <tt>isOneToOneMap</tt> to <tt>true</tt> in the
* constructor. <p>
*
* This map associates the state with the appropriate label (indexing is zero
* onwards). <p>
*
* <b>Note:</b> Add the states to the map in the same order in which they are
* added to the CRF while constructing the FST. This is necessary to keep a
* correct mapping of the state indices in this map to the state indices used
* within the CRF.
*
* @author Gaurav Chandalia
*/
public class StateLabelMap {
// mapping labels to integers
private Alphabet stateAlphabet;
// mapping state names to integers
private Alphabet labelAlphabet;
// true if a standard FST is used (using one of the methods provided in CRF
// class), in this case the state and label alphabets are the same
private boolean isOneToOneMap;
// key: index identifying a state
// value: index identifying a label that the state maps to in the state
// machine
private HashMap<Integer, Integer> stateToLabel;
// key: index identifying a label
// value: indices of states that are associated with the label
private HashMap<Integer, LinkedHashSet<Integer>> labelToState;
/**
* Initializes the state and label maps.
*
* <b>Note:</b> If a standard FST is used (using one of the methods
* provided in CRF class), the state and label alphabets are the same. In this
* case, there will be a one-to-one mapping between the states and labels.
* Also, the <tt>addStates</tt> method can no longer be used. This is done
* when <tt>isOneToOneMap</tt> is <tt>true</tt>.
*
* @param labelAlphabet Target alphabet that maps label names to integers.
* @param isOneToOneMap True if a one to one mapping of states and labels
* is to be created.
*/
public StateLabelMap(Alphabet labelAlphabet, boolean isOneToOneMap) {
this.labelAlphabet = labelAlphabet;
this.isOneToOneMap = isOneToOneMap;
stateToLabel = new HashMap<Integer, Integer>();
labelToState = new HashMap<Integer, LinkedHashSet<Integer>>();
Iterator<?> labelIter = null;
if (isOneToOneMap) {
// use the same alphabet for state and label
stateAlphabet = labelAlphabet;
labelIter = labelAlphabet.iterator();
while (labelIter.hasNext()) {
String label = (String) labelIter.next();
int labelIndex = labelAlphabet.lookupIndex(label, false);
stateToLabel.put(labelIndex, labelIndex);
LinkedHashSet<Integer> stateIndices = new LinkedHashSet<Integer>();
stateIndices.add(labelIndex);
labelToState.put(labelIndex, stateIndices);
}
} else {
stateAlphabet = new Alphabet();
labelIter = labelAlphabet.iterator();
while (labelIter.hasNext()) {
String label = (String) labelIter.next();
labelToState.put(labelAlphabet.lookupIndex(label, false),
new LinkedHashSet<Integer>());
}
}
}
/**
* Returns <tt>true</tt> if there is a one-to-one mapping between the states
* and labels and <tt>false</tt> otherwise.
*/
public boolean isOneToOneMapping() {
return isOneToOneMap;
}
/**
* Returns the number of labels in the map.
*/
public int getNumLabels() {
return labelToState.size();
}
/**
* Returns the number of states in the map.
*/
public int getNumStates() {
return stateToLabel.size();
}
/**
* Returns the label (target) alphabet.
*/
public Alphabet getLabelAlphabet() {
return labelAlphabet;
}
/**
* Returns the state alphabet.
*/
public Alphabet getStateAlphabet() {
return stateAlphabet;
}
/**
* Returns the label index mapped to the state index.
*
* @param stateIndex State index.
* @return Index of the label that is mapped to the state. Returns <tt>-1</tt>
* if there is no label (index) that maps to the specified state.
*/
public int getLabelIndex(int stateIndex) {
// since no null values are allowed in our map, directly use the get method
Integer labelIndex = stateToLabel.get(stateIndex);
if (labelIndex == null) {
return -1;
}
return labelIndex.intValue();
}
/**
* Returns the state indices that map to the label index.
*
* @param labelIndex Label (target) index.
* @return Indices of the states that map to the label. Returns <tt>null</tt>
* if there are no states that map to the label.
*/
public LinkedHashSet<Integer> getStateIndices(int labelIndex) {
return labelToState.get(labelIndex);
}
/**
* Adds a state to the map.
*
* @param stateName Name of the state.
* @param labelName Label (target) name with which the state is associated.
* @return The index associated with the state that was added.
* @throws IllegalArgumentException If an invalid label name or a duplicate
* state name is provided.
* @throws IllegalStateError If this method is called when there is a
* one-to-one mapping between the states and labels.
*/
public int addState(String stateName, String labelName) {
if (isOneToOneMap)
throw new IllegalStateException("Trying to add a state when there is a " +
"one to one mapping between the states " +
"and labels.");
// get the label index
int labelIndex = labelAlphabet.lookupIndex(labelName, false);
if (labelIndex == -1) {
throw new IllegalArgumentException("Invalid label: " + labelName);
}
// add the state and get its index
int stateIndex = stateAlphabet.lookupIndex(stateName, false);
if (stateIndex != -1) {
throw new IllegalArgumentException("Duplicate state: " + stateName);
}
stateIndex = stateAlphabet.lookupIndex(stateName, true);
// add the indices to the label-state and state-label maps
try {
labelToState.get(labelIndex).add(stateIndex);
} catch (NullPointerException npe) {
// It is possible that a label is never seen in the training data. In that
// case the true number of labels will not be equal to the size of the
// label (target) alphabet until the state with the unseen label is added
// to the label alphabet while constructing the FST, add such a label
// here.
LinkedHashSet<Integer> stateIndices = new LinkedHashSet<Integer>();
stateIndices.add(stateIndex);
labelToState.put(labelIndex, stateIndices);
}
stateToLabel.put(stateIndex, labelIndex);
return stateIndex;
}
}
| 7,190 | 32.919811 | 80 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GEL2Criterion.java
|
package cc.mallet.fst.semi_supervised;
public class GEL2Criterion extends GECriterion {
public GEL2Criterion(String name, double[] target, double weight) {
super(name, target, weight);
}
@Override
protected double getGradientConstant(int labelIndex) {
return 2 * (target[labelIndex] - expectation[labelIndex]) / count;
}
}
| 335 | 23 | 68 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/CRFOptimizableByEntropyRegularization.java
|
package cc.mallet.fst.semi_supervised;
import java.io.Serializable;
import java.util.logging.Logger;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.InstanceList;
import cc.mallet.fst.CRF;
import cc.mallet.fst.SumLattice;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.fst.Transducer;
import cc.mallet.optimize.Optimizable;
import cc.mallet.util.MalletLogger;
/**
* A CRF objective function that is the entropy of the CRF's
* predictions on unlabeled data.
*
* References:
* Feng Jiao, Shaojun Wang, Chi-Hoon Lee, Russell Greiner, Dale Schuurmans
* "Semi-supervised conditional random fields for improved sequence segmentation and labeling"
* ACL 2006
*
* Gideon Mann, Andrew McCallum
* "Efficient Computation of Entropy Gradient for Semi-Supervised Conditional Random Fields"
* HLT/NAACL 2007
*
* @author Gaurav Chandalia
* @author Gregory Druck
*/
public class CRFOptimizableByEntropyRegularization implements Optimizable.ByGradientValue,
Serializable {
private static Logger logger = MalletLogger.getLogger(CRFOptimizableByEntropyRegularization.class.getName());
private int cachedValueWeightsStamp = -1;
private int cachedGradientWeightsStamp = -1;
// model's expectations according to entropy reg.
protected CRF.Factors expectations;
// used to update gradient
protected Transducer.Incrementor incrementor;
// contains labeled and unlabeled data
protected InstanceList data;
// the model
protected CRF crf;
// scale entropy values,
// typically, (entropyRegGamma * numLabeled / numUnlabeled)
protected double scalingFactor;
// log probability of all the sequences, this is also the entropy due to all
// the instances (updated in computeExpectations())
protected double cachedValue;
// gradient due to this optimizable (updated in getValueGradient())
protected double[] cachedGradient;
/**
* Initializes the structures.
*/
public CRFOptimizableByEntropyRegularization(CRF crf, InstanceList ilist,
double scalingFactor) {
data = ilist;
this.crf = crf;
this.scalingFactor = scalingFactor;
// initialize the expectations using the model
expectations = new CRF.Factors(crf);
incrementor = expectations.new Incrementor();
cachedValue = 0.0;
cachedGradient = new double[crf.getParameters().getNumFactors()];
}
/**
* Initializes the structures (sets the scaling factor to 1.0).
*/
public CRFOptimizableByEntropyRegularization(CRF crf, InstanceList ilist) {
this(crf, ilist, 1.0);
}
public double getScalingFactor() {
return scalingFactor;
}
public void setScalingFactor(double scalingFactor) {
this.scalingFactor = scalingFactor;
}
/**
* Resets, computes and fills expectations from all instances, also updating
* the entropy value. <p>
*
* Analogous to <tt>CRFOptimizableByLabelLikelihood.getExpectationValue<tt>.
*/
public void computeExpectations() {
expectations.zero();
// now, update the expectations due to each instance for entropy reg.
for (int ii = 0; ii < data.size(); ii++) {
FeatureVectorSequence input = (FeatureVectorSequence) data.get(ii).getData();
SumLattice lattice = new SumLatticeDefault(crf,input, true);
// udpate the expectations
EntropyLattice entropyLattice = new EntropyLattice(
input, lattice.getGammas(), lattice.getXis(), crf,
incrementor, scalingFactor);
cachedValue += entropyLattice.getEntropy();
}
}
public double getValue() {
if (crf.getWeightsValueChangeStamp() != cachedValueWeightsStamp) {
// The cached value is not up to date; it was calculated for a different set of CRF weights.
cachedValueWeightsStamp = crf.getWeightsValueChangeStamp();
cachedValue = 0;
computeExpectations();
cachedValue = scalingFactor * cachedValue;
assert(!Double.isNaN(cachedValue) && !Double.isInfinite(cachedValue))
: "Likelihood due to Entropy Regularization is NaN/Infinite";
logger.info("getValue() (entropy regularization) = " + cachedValue);
}
return cachedValue;
}
public void getValueGradient(double[] buffer) {
if (cachedGradientWeightsStamp != crf.getWeightsValueChangeStamp()) {
cachedGradientWeightsStamp = crf.getWeightsValueChangeStamp(); // cachedGradient will soon no longer be stale
getValue();
// if this fails then look in computeExpectations
expectations.assertNotNaNOrInfinite();
// fill up gradient
expectations.getParameters(cachedGradient);
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
// some get/set methods that have to be implemented
public int getNumParameters() {
return crf.getParameters().getNumFactors();
}
public void getParameters(double[] buffer) {
crf.getParameters().getParameters(buffer);
}
public void setParameters(double[] buffer) {
crf.getParameters().setParameters(buffer);
crf.weightsValueChanged();
}
public double getParameter(int index) {
return crf.getParameters().getParameter(index);
}
public void setParameter(int index, double value) {
crf.getParameters().setParameter(index, value);
crf.weightsValueChanged();
}
// serialization stuff
private static final long serialVersionUID = 1;
}
| 5,400 | 31.341317 | 112 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GEKLCriterion.java
|
package cc.mallet.fst.semi_supervised;
public class GEKLCriterion extends GECriterion {
public GEKLCriterion(String name, double[] target, double weight) {
super(name, target, weight);
}
/**
* Returns the target/expectation ratio required in lattice computations. <p>
*
* *Note*: The ratio is divided by the feature count if the label expectations
* have been normalized.
*/
@Override
protected double getGradientConstant(int labelIndex) {
return target[labelIndex] / (expectation[labelIndex] * count);
}
}
| 529 | 26.894737 | 79 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/CRFTrainerByGE.java
|
package cc.mallet.fst.semi_supervised;
import java.util.HashMap;
import java.util.logging.Logger;
import cc.mallet.fst.CRF;
import cc.mallet.fst.Transducer;
import cc.mallet.fst.TransducerTrainer;
import cc.mallet.optimize.LimitedMemoryBFGS;
import cc.mallet.types.InstanceList;
import cc.mallet.util.MalletLogger;
/**
* Trains a CRF using Generalized Expectation constraints that
* considers a single label of a linear chain CRF.
*
* See:
* "Generalized Expectation Criteria for Semi-Supervised Learning of Conditional Random Fields"
* Gideon Mann and Andrew McCallum
* ACL 2008
*
* @author Gregory Druck
*/
public class CRFTrainerByGE extends TransducerTrainer {
private static Logger logger = MalletLogger.getLogger(CRFTrainerByGE.class.getName());
private static final int DEFAULT_NUM_RESETS = 1;
private static final int DEFAULT_GPV = 10;
private boolean converged;
private int iteration;
private int numThreads;
private double gaussianPriorVariance;
private HashMap<Integer,GECriterion> constraints;
private CRF crf;
private StateLabelMap stateLabelMap;
public CRFTrainerByGE(CRF crf, HashMap<Integer,GECriterion> constraints) {
this(crf,constraints,1);
}
public CRFTrainerByGE(CRF crf, HashMap<Integer,GECriterion> constraints, int numThreads) {
this.converged = false;
this.iteration = 0;
this.constraints = constraints;
this.crf = crf;
this.numThreads = numThreads;
this.gaussianPriorVariance = DEFAULT_GPV;
// default one to one state label map
// other maps can be set with setStateLabelMap
this.stateLabelMap = new StateLabelMap(crf.getOutputAlphabet(),true);
}
@Override
public int getIteration() {
return iteration;
}
@Override
public Transducer getTransducer() {
return crf;
}
@Override
public boolean isFinishedTraining() {
return converged;
}
public void setGaussianPriorVariance(double gpv) {
this.gaussianPriorVariance = gpv;
}
// map between states in CRF FST and labels
public void setStateLabelMap(StateLabelMap map) {
this.stateLabelMap = map;
}
@Override
public boolean train(InstanceList unlabeledSet, int numIterations) {
assert(constraints.size() > 0);
if (constraints.size() == 0) {
throw new RuntimeException("No constraints specified!");
}
// TODO implement initialization
//initMaxEnt(crf);
// Check what type of constraints we have.
// XXX Could instead implement separate trainers...
boolean kl = false;
boolean l2 = false;
for (GECriterion constraint : constraints.values()) {
if (constraint instanceof GEL2Criterion) {
l2 = true;
}
else if (constraint instanceof GEKLCriterion) {
kl = true;
}
else {
throw new RuntimeException("Only KL and L2 constraints are supported " +
"by this trainer. Constraint type is " + constraint.getClass());
}
}
if (kl && l2) {
throw new RuntimeException("Currently constraints must be either all KL " +
"or all L2.");
}
GECriteria criteria;
if (kl) {
logger.info("kl");
criteria = new GEKLCriteria(crf.numStates(), stateLabelMap, constraints);
}
else {
logger.info("l2");
criteria = new GEL2Criteria(crf.numStates(), stateLabelMap, constraints);
}
CRFOptimizableByGECriteria ge =
new CRFOptimizableByGECriteria(criteria, crf, unlabeledSet, numThreads);
ge.setGaussianPriorVariance(gaussianPriorVariance);
LimitedMemoryBFGS bfgs = new LimitedMemoryBFGS(ge);
converged = false;
logger.info ("CRF about to train with "+numIterations+" iterations");
// sometimes resetting the optimizer helps to find
// a better parameter setting
int iter = 0;
for (int reset = 0; reset < DEFAULT_NUM_RESETS + 1; reset++) {
for (; iter < numIterations; iter++) {
try {
converged = bfgs.optimize (1);
iteration++;
logger.info ("CRF finished one iteration of maximizer, i="+iter);
runEvaluators();
} catch (IllegalArgumentException e) {
e.printStackTrace();
logger.info ("Catching exception; saying converged.");
converged = true;
} catch (Exception e) {
e.printStackTrace();
logger.info("Catching exception; saying converged.");
converged = true;
}
if (converged) {
logger.info ("CRF training has converged, i="+iter);
break;
}
}
bfgs.reset();
}
ge.shutdown();
return converged;
}
}
| 4,447 | 26.627329 | 95 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GECriteria.java
|
package cc.mallet.fst.semi_supervised;
import java.util.BitSet;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.Executors;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.logging.Logger;
import cc.mallet.fst.SumLattice;
import cc.mallet.fst.Transducer;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.util.MalletLogger;
import cc.mallet.util.Maths;
/**
* Represents GE criteria specified in the form of feature-label associations.
*
* @author Gaurav Chandalia
* @author Gregory Druck
*/
public abstract class GECriteria {
private static Logger logger =
MalletLogger.getLogger(GECriteria.class.getName());
// number of states in the lattice
protected int numStates;
// mapping of states to labels (when using a custom FST in the transducer)
protected StateLabelMap stateLabelMap;
// key: feature index, value: FeatureInfo object that will hold for each
// feature: the prior distribution over labels, gamma value, model's
// expectation over labels, feature count across all instances
protected Map<Integer, GECriterion> constraints;
// number of bits == number of instances, a bit is set if the instance
// contains at least one feature constraint
protected BitSet constraintBits;
// thread handler to calculate label expectations
protected transient FeatureLabelExpExecutor labelExpExecutor;
/**
* Initializes the feature-label association constraints. <p>
*
* @param numStates Number of states in the lattice.
* @param stateLabelMap Mapping of states to labels (used when a custom FST
* is used to train a CRF).
* @param constraints Map, key: feature index, value: FeatureInfo object.
*/
public GECriteria(int numStates, StateLabelMap stateLabelMap,
Map<Integer, GECriterion> constraints) {
this.numStates = numStates;
this.stateLabelMap = stateLabelMap;
this.constraints = constraints;
// will be set later
this.constraintBits = null;
this.labelExpExecutor = new FeatureLabelExpExecutor();
}
/**
* Returns the state-label mapping. <p>
*/
public StateLabelMap getStateLabelMap() {
return stateLabelMap;
}
/**
* Returns the <tt>FeatureInfo</tt> object mapped to the feature index.
*
* <b>Note:</b> No check is performed to make sure feature index is valid.
* Method can return <tt>null</tt>.
*/
public GECriterion getConstraint(int featureIndex) {
return constraints.get(featureIndex);
}
/**
* Returns an iterator to the indices of the feature constraints.
*/
public Iterator<Integer> getFeatureIndexIterator() {
return constraints.keySet().iterator();
}
/**
* Returns bits for all instances, each set if instance has at least one
* feature constraint.
*/
public BitSet getConstraintBits() {
return constraintBits;
}
/**
* Sets a bit for each instance if it has at least one feature constraint
* (anywhere in the sequence).
*
* <tt>start, end</tt> indicate range of indices that will be used for semisup
* computations.
*/
public void setConstraintBits(InstanceList ilist, int start, int end) {
logger.info("Setting bits for instances...");
constraintBits = new BitSet(ilist.size());
// feature indices
Set<Integer> indices = constraints.keySet();
// true if at least on feature constraint is present anywhere in the
// *instance*
boolean featurePresent = false;
for (int i = start; i < end; ++i) {
FeatureVectorSequence fvs =
(FeatureVectorSequence) ilist.get(i).getData();
featurePresent = false;
for (int ip = 0; ip < fvs.size(); ++ip) {
FeatureVector fv = fvs.getFeatureVector(ip);
// set flag and bit if any constraint is present
for (int index : indices) {
if (fv.value(index) > 0.0) {
featurePresent = true;
break;
}
}
if (featurePresent) {
constraintBits.set(i);
break;
}
}
}
logger.info("Number of instances with at least one GE constraint: " +
constraintBits.cardinality());
}
/**
* Returns bits for an instance, each bit corresponds to a feature index and
* is set if the feature is present in the instance.
*
* @return Constraint bits, size == number of feature constraints
*/
public final BitSet getConstraintBitsForInstance(FeatureVectorSequence fvs) {
BitSet constraintBits = new BitSet();
// feature indices
Set<Integer> indices = constraints.keySet();
for (int index : indices) {
for (int ip = 0; ip < fvs.size(); ++ip) {
if (fvs.getFeatureVector(ip).value(index) > 0.0) {
constraintBits.set(index);
break;
}
}
}
return constraintBits;
}
/**
* Returns the number of times the feature occurred in the sequence (an
* instance). <p>
*
* Also updates the expectation of a feature in one instance.
*
* @param featureIndex Feature to look for.
* @param fvs Observation sequence.
* @param gammas Log probability of being in state 'i' at input position 'j'.
* @param expectation Model expectation (filled by this method).
* @return Number of times the feature occurred in the input sequence.
* @throws IndexOutOfBoundsException If an invalid feature index is specified.
*/
protected final int getExpectationForInstance(
int featureIndex, FeatureVectorSequence fvs, double[][] gammas,
double[] expectation) {
int featureCount = 0;
for (int ip = 0; ip < fvs.size(); ++ip) {
if (fvs.getFeatureVector(ip).value(featureIndex) > 0.0) {
++featureCount;
for (int s = 0; s < numStates; ++s) {
int labelIndex = stateLabelMap.getLabelIndex(s);
expectation[labelIndex] += Math.exp(gammas[ip+1][s]);
}
}
}
return featureCount;
}
/**
* Returns the expectation of a feature in one instance. <p>
*
* *Note*: These expectations are not normalized.
*/
public final double[] getExpectationForInstance(
int featureIndex, FeatureVectorSequence fvs, double[][] gammas) {
double[] expectation = new double[stateLabelMap.getNumLabels()];
this.getExpectationForInstance(featureIndex, fvs, gammas, expectation);
return expectation;
}
/**
* Calculates the model expectation of all feature constraints. <p>
*
* <tt>lattices</tt> contains the SumLattice objects of instances to be used
* for semisup computations.
*/
public void calculateExpectations(InstanceList ilist, Transducer transducer,
Map<Integer, SumLattice> lattices) {
labelExpExecutor.calculateLabelExp(ilist, transducer, lattices);
this.print(stateLabelMap.getLabelAlphabet());
}
/**
* Computes sum of GE constraint values. <p>
*
* <b>Note:</b> Label expectations are <b>not</b> re-computed here. If
* desired, then make a call to <tt>calculateLabelExp</tt>.
*/
public abstract double getGEValue();
protected void assertLabelExpNonNull() {
Iterator<Integer> iter = constraints.keySet().iterator();
while (iter.hasNext()) {
int fi = iter.next();
assert(constraints.get(fi).getExpectation() != null)
: "model exp null, fi: " + fi;
}
}
/**
* Prints the constraints.
*/
public void print(Alphabet targetAlphabet) {
StringBuilder sb = new StringBuilder(constraints.size() * 50);
sb.append("Printing feature-label constraints...\n");
Iterator<Map.Entry<Integer, GECriterion>> featureIter =
constraints.entrySet().iterator();
while (featureIter.hasNext()) {
Map.Entry<Integer, GECriterion> entry = featureIter.next();
int fi = entry.getKey();
GECriterion constraint = entry.getValue();
sb.append("index: " + fi + ", name: " + constraint.getName() +
", gamma: " + constraint.getWeight() +
", count: " + constraint.getCount() + "\n");
double[] target = constraint.getTarget();
for (int li = 0; li < target.length; ++li){
sb.append("\t ");
if (targetAlphabet != null) {
sb.append(targetAlphabet.lookupObject(li) + "--");
}
sb.append(String.format("%1.4f", target[li]));
}
sb.append("\n");
double[] expectation = constraint.getExpectation();
if (expectation != null) {
for (int li = 0; li < expectation.length; ++li){
sb.append("\t ");
if (targetAlphabet != null) {
sb.append(targetAlphabet.lookupObject(li) + "--");
}
sb.append(String.format("%1.4f", expectation[li]));
}
sb.append("\n\t" + Maths.klDivergence(target, expectation) + "\n");
}
}
System.out.println(sb.toString());
}
/**
* Executes threads to calculate model expectations of all feature
* constraints.
*/
private class FeatureLabelExpExecutor {
// key: instance index, value: already computed Lattice
private Map<Integer, SumLattice> lattices;
private InstanceList ilist;
// all indices of feature constraints, used for multi-threading, initialized
// in caclulateLabelExp
private Set<Integer> featureIndices;
// number of threads == number of feature constraints
private int numThreads;
// thread pool, each thread computes a feature constraint's label
// expectations
private ThreadPoolExecutor executor;
// milliseconds
public static final int SLEEP_TIME = 100;
public FeatureLabelExpExecutor() {
lattices = null;
ilist = null;
featureIndices = null;
numThreads = constraints.size();
logger.info("Creating " + numThreads +
" threads for calculating label expectations...");
executor =
(ThreadPoolExecutor) Executors.newFixedThreadPool(numThreads);
}
public void calculateLabelExp(InstanceList ilist, Transducer transducer,
Map<Integer, SumLattice> lattices) {
this.lattices = lattices;
this.ilist = ilist;
featureIndices = new HashSet<Integer>(constraints.size());
logger.info("Calculating label expectations...");
long time = System.currentTimeMillis();
for (int fi : constraints.keySet()) {
executor.execute(new FeatureExpectationHandler(fi));
}
// wait for all constraints to finish
int numConstraints = -1;
while (numConstraints != constraints.size()) {
synchronized(this) {
numConstraints = featureIndices.size();
}
try {
Thread.sleep(SLEEP_TIME);
} catch (InterruptedException ie) {
ie.printStackTrace();
System.exit(1);
}
}
time = (System.currentTimeMillis() - time) / 1000;
logger.info(String.valueOf(time) + " secs.");
assertLabelExpNonNull();
lattices = null;
ilist = null;
featureIndices = null;
}
/**
* Calculates the model expectation of the feature in a new thread.
*/
private class FeatureExpectationHandler implements Runnable {
// feature constraint whose label expectations are to be computed
private int fi;
/**
* Initializes the handler.
*
* @param fi Index of feature constraint whose expectations are to be
* computed
*/
public FeatureExpectationHandler(int fi) {
this.fi = fi;
}
/**
* Calculates the model expectation of the feature.
*/
public void run() {
int numLabels = stateLabelMap.getNumLabels();
double[] expectation = new double[numLabels];
int featureCount = 0;
SumLattice lattice = null;
for (int i : lattices.keySet()) {
// skip if the instance doesn't have any constraints
if (!constraintBits.get(i)) {
continue;
}
FeatureVectorSequence fvs = (FeatureVectorSequence) ilist.get(i).getData();
lattice = lattices.get(i);
assert(lattice != null)
: "Lattice is null:: " + i + ", size: " + lattices.size();
// update the number of times this feature occurred in the sequence
// and the label expectations due to this sequence
featureCount += getExpectationForInstance(
fi, fvs, lattice.getGammas(), expectation);
}
assert(!MatrixOps.isNaNOrInfinite(expectation));
if (MatrixOps.isNonZero(expectation)) {
// normalizing label expectations
MatrixOps.timesEquals(expectation, 1/MatrixOps.sum(expectation));
GECriterion constraint = constraints.get(fi);
constraint.setExpectation(expectation);
constraint.setCount(featureCount);
}
else {
throw new RuntimeException("Feature " + fi + " does not occur!");
}
synchronized(FeatureLabelExpExecutor.this) {
featureIndices.add(fi);
}
}
}
}
}
| 13,254 | 31.809406 | 85 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/semi_supervised/GECriterion.java
|
package cc.mallet.fst.semi_supervised;
public abstract class GECriterion {
protected String name;
protected double weight;
// target expectation
protected double[] target;
// model expectation
protected double[] expectation;
protected double count;
public GECriterion(String name, double[] target, double weight) {
this.name = name;
this.weight = weight;
this.target = target;
}
/**
* Returns the constraint name.
*/
public String getName() {
return name;
}
/**
* Returns the weight (gamma) for the constraint.
*/
public double getWeight() {
return weight;
}
/**
* Returns the target expectation for the feature.
*/
public double[] getTarget() {
return target;
}
/**
* Returns the target expectation for the feature and label li.
* @param li label index
*/
public double getTarget(int li) {
return target[li];
}
/**
* Returns the model expectation of the feature.
*/
public double[] getExpectation() {
return expectation;
}
/**
* Returns the model expectation for the feature and label li.
* @param li label index
*/
public double getExpectation(int li) {
return expectation[li];
}
protected void setExpectation(double[] expectation) {
this.expectation = expectation;
}
/**
* Returns the count of the feature.
*/
public double getCount() {
return count;
}
protected void setCount(double count) {
this.count = count;
}
/**
* Returns the constant value from the gradient, which
* will be different for different criteria.
*/
protected abstract double getGradientConstant(int labelIndex);
}
| 1,826 | 20.244186 | 69 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/tests/TestCRF.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.tests;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.PrintWriter;
import java.io.Serializable;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.Random;
import java.util.regex.Pattern;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Sequence;
import cc.mallet.types.SparseVector;
import cc.mallet.types.Token;
import cc.mallet.types.TokenSequence;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.LineGroupString2TokenSequence;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.PrintInputAndTarget;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.Target2LabelSequence;
import cc.mallet.pipe.TokenSequence2FeatureVectorSequence;
import cc.mallet.pipe.TokenSequenceLowercase;
import cc.mallet.pipe.TokenSequenceMatchDataAndTarget;
import cc.mallet.pipe.TokenSequenceParseFeatureString;
import cc.mallet.pipe.iterator.ArrayIterator;
import cc.mallet.pipe.iterator.LineGroupIterator;
import cc.mallet.pipe.tsf.OffsetConjunctions;
import cc.mallet.pipe.tsf.TokenText;
import cc.mallet.fst.CRF;
import cc.mallet.fst.CRFTrainerByLabelLikelihood;
import cc.mallet.fst.CRFTrainerByStochasticGradient;
import cc.mallet.fst.MaxLattice;
import cc.mallet.fst.MaxLatticeDefault;
import cc.mallet.fst.SumLattice;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.fst.SumLatticeScaling;
import cc.mallet.fst.TokenAccuracyEvaluator;
import cc.mallet.fst.Transducer;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.tests.TestOptimizable;
import cc.mallet.util.FileUtils;
// TODO (gsc (08/25/08)): some tests fail because tests are using CRFTrainerByLabelLikelihood
// instead of CRFOptimizableByLabelLikelihood and CRFOptimizableByValueGradients
/** Tests for CRF training. */
public class TestCRF extends TestCase {
public TestCRF(String name) {
super(name);
}
public static final String[] data = new String[] {
"Free software is a matter of the users' freedom to run, copy, distribute, study, change and improve the software. More precisely, it refers to four kinds of freedom, for the users of the software.",
"The freedom to run the program, for any purpose.",
"The freedom to study how the program works, and adapt it to your needs.",
"The freedom to redistribute copies so you can help your neighbor.",
"The freedom to improve the program, and release your improvements to the public, so that the whole community benefits.",
"A program is free software if users have all of these freedoms. Thus, you should be free to redistribute copies, either with or without modifications, either gratis or charging a fee for distribution, to anyone anywhere. Being free to do these things means (among other things) that you do not have to ask or pay for permission.",
"You should also have the freedom to make modifications and use them privately in your own work or play, without even mentioning that they exist. If you do publish your changes, you should not be required to notify anyone in particular, or in any particular way.",
"In order for the freedoms to make changes, and to publish improved versions, to be meaningful, you must have access to the source code of the program. Therefore, accessibility of source code is a necessary condition for free software.",
"Finally, note that criteria such as those stated in this free software definition require careful thought for their interpretation. To decide whether a specific software license qualifies as a free software license, we judge it based on these criteria to determine whether it fits their spirit as well as the precise words. If a license includes unconscionable restrictions, we reject it, even if we did not anticipate the issue in these criteria. Sometimes a license requirement raises an issue that calls for extensive thought, including discussions with a lawyer, before we can decide if the requirement is acceptable. When we reach a conclusion about a new issue, we often update these criteria to make it easier to see why certain licenses do or don't qualify.",
"In order for these freedoms to be real, they must be irrevocable as long as you do nothing wrong; if the developer of the software has the power to revoke the license, without your doing anything to give cause, the software is not free.",
"However, certain kinds of rules about the manner of distributing free software are acceptable, when they don't conflict with the central freedoms. For example, copyleft (very simply stated) is the rule that when redistributing the program, you cannot add restrictions to deny other people the central freedoms. This rule does not conflict with the central freedoms; rather it protects them.",
"Thus, you may have paid money to get copies of free software, or you may have obtained copies at no charge. But regardless of how you got your copies, you always have the freedom to copy and change the software, even to sell copies.",
"Rules about how to package a modified version are acceptable, if they don't effectively block your freedom to release modified versions. Rules that ``if you make the program available in this way, you must make it available in that way also'' can be acceptable too, on the same condition. (Note that such a rule still leaves you the choice of whether to publish the program or not.) It is also acceptable for the license to require that, if you have distributed a modified version and a previous developer asks for a copy of it, you must send one.",
"Sometimes government export control regulations and trade sanctions can constrain your freedom to distribute copies of programs internationally. Software developers do not have the power to eliminate or override these restrictions, but what they can and must do is refuse to impose them as conditions of use of the program. In this way, the restrictions will not affect activities and people outside the jurisdictions of these governments.",
"Finally, note that criteria such as those stated in this free software definition require careful thought for their interpretation. To decide whether a specific software license qualifies as a free software license, we judge it based on these criteria to determine whether it fits their spirit as well as the precise words. If a license includes unconscionable restrictions, we reject it, even if we did not anticipate the issue in these criteria. Sometimes a license requirement raises an issue that calls for extensive thought, including discussions with a lawyer, before we can decide if the requirement is acceptable. When we reach a conclusion about a new issue, we often update these criteria to make it easier to see why certain licenses do or don't qualify.",
"The GNU Project was launched in 1984 to develop a complete Unix-like operating system which is free software: the GNU system." };
public void testGetSetParameters() {
int inputVocabSize = 100;
int numStates = 5;
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
CRF crf = new CRF(inputAlphabet, outputAlphabet);
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++)
stateNames[i] = "state" + i;
crf.addFullyConnectedStates(stateNames);
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
Optimizable.ByGradientValue mcrf = crft
.getOptimizableCRF(new InstanceList(null));
TestOptimizable.testGetSetParameters(mcrf);
}
public void testSumLogProb() {
double w1 = Math.log(.2);
double w2 = Math.log(.8);
double s1 = Math.log(.2 + .8);
double s2 = Transducer.sumLogProb(w1, w2);
assertEquals(s1, s2, 0.00001);
w1 = Math.log(99999);
w2 = Math.log(.0001);
s1 = Math.log(99999.0001);
s2 = Transducer.sumLogProb(w1, w2);
assertEquals(s1, s2, 0.00001);
}
public void testSumLattice() {
int inputVocabSize = 1;
int numStates = 2;
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
CRF crf = new CRF(inputAlphabet, outputAlphabet);
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++)
stateNames[i] = "state" + i;
crf.addFullyConnectedStates(stateNames);
crf.setWeightsDimensionDensely();
crf.getState(0).setInitialWeight(1.0);
crf.getState(1).setInitialWeight(Transducer.IMPOSSIBLE_WEIGHT);
crf.getState(0).setFinalWeight(0.0);
crf.getState(1).setFinalWeight(0.0);
crf.setParameter(0, 0, 0, Transducer.IMPOSSIBLE_WEIGHT); // state0
// self-transition
crf.setParameter(0, 1, 0, 1.0); // state0->state1
crf.setParameter(1, 1, 0, 1.0); // state1 self-transition
crf.setParameter(1, 0, 0, Transducer.IMPOSSIBLE_WEIGHT); // state1->state0
FeatureVectorSequence fvs = new FeatureVectorSequence(
new FeatureVector[] {
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }),
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }),
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }), });
SumLattice lattice = new SumLatticeDefault(crf, fvs, true);
// We start in state0
assertTrue(lattice.getGammaProbability(0, crf.getState(0)) == 1.0);
assertTrue(lattice.getGammaProbability(0, crf.getState(1)) == 0.0);
// We go to state1
assertTrue(lattice.getGammaProbability(1, crf.getState(0)) == 0.0);
assertTrue(lattice.getGammaProbability(1, crf.getState(1)) == 1.0);
// And on through a self-transition
assertTrue(lattice
.getXiProbability(1, crf.getState(1), crf.getState(1)) == 1.0);
assertTrue(lattice
.getXiProbability(1, crf.getState(1), crf.getState(0)) == 0.0);
assertTrue("Lattice weight = " + lattice.getTotalWeight(), lattice
.getTotalWeight() == 4.0);
// Gammas at all times sum to 1.0
for (int time = 0; time < lattice.length() - 1; time++) {
double gammasum = lattice
.getGammaProbability(time, crf.getState(0))
+ lattice.getGammaProbability(time, crf.getState(1));
assertEquals("Gammas at time step " + time + " sum to " + gammasum,
1.0, gammasum, 0.0001);
}
// Xis at all times sum to 1.0
for (int time = 0; time < lattice.length() - 1; time++) {
double xissum = lattice.getXiProbability(time, crf.getState(0), crf
.getState(0))
+ lattice.getXiProbability(time, crf.getState(0), crf
.getState(1))
+ lattice.getXiProbability(time, crf.getState(1), crf
.getState(0))
+ lattice.getXiProbability(time, crf.getState(1), crf
.getState(1));
assertEquals("Xis at time step " + time + " sum to " + xissum, 1.0,
xissum, 0.0001);
}
}
public void testMaxLattice() {
int inputVocabSize = 1;
int numStates = 2;
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
CRF crf = new CRF(inputAlphabet, outputAlphabet);
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++)
stateNames[i] = "state" + i;
crf.addFullyConnectedStates(stateNames);
crf.setWeightsDimensionDensely();
crf.getState(0).setInitialWeight(1.0);
crf.getState(1).setInitialWeight(Transducer.IMPOSSIBLE_WEIGHT);
crf.getState(0).setFinalWeight(0.0);
crf.getState(1).setFinalWeight(0.0);
crf.setParameter(0, 0, 0, Transducer.IMPOSSIBLE_WEIGHT); // state0
// self-transition
crf.setParameter(0, 1, 0, 1.0); // state0->state1
crf.setParameter(1, 1, 0, 1.0); // state1 self-transition
crf.setParameter(1, 0, 0, Transducer.IMPOSSIBLE_WEIGHT); // state1->state0
FeatureVectorSequence fvs = new FeatureVectorSequence(
new FeatureVector[] {
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }),
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }),
new FeatureVector((Alphabet) crf.getInputAlphabet(),
new double[] { 1 }), });
MaxLattice lattice = new MaxLatticeDefault(crf, fvs);
Sequence<Transducer.State> viterbiPath = lattice.bestStateSequence();
// We start in state0
assertTrue(viterbiPath.get(0) == crf.getState(0));
// We go to state1
assertTrue(viterbiPath.get(1) == crf.getState(1));
// And on through a self-transition to state1 again
assertTrue(viterbiPath.get(2) == crf.getState(1));
}
// Should print at end:
// parameters 4 4 3: unconstrainedWeight=2912.0 constrainedWeight=428.0
// maxWeight=35770.0 minGrad=520.0
public void doTestCost(boolean useSave) {
int inputVocabSize = 4;
int numStates = 5;
// Create a file to store the CRF
File f = new File("TestObject.obj");
File f2 = new File("TestObject2.obj");
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++) {
stateNames[i] = "state" + i;
outputAlphabet.lookupIndex(stateNames[i]);
}
CRF crf = new CRF(inputAlphabet, outputAlphabet);
CRF saveCRF = crf;
// inputAlphabet = (Feature.Alphabet) crf.getInputAlphabet();
FeatureVectorSequence fvs = new FeatureVectorSequence(
new FeatureVector[] {
new FeatureVector(crf.getInputAlphabet(), new int[] {
1, 2, 3 }),
new FeatureVector(crf.getInputAlphabet(), new int[] {
1, 2, 3 }),
new FeatureVector(crf.getInputAlphabet(), new int[] {
1, 2, 3 }),
new FeatureVector(crf.getInputAlphabet(), new int[] {
1, 2, 3 }), });
FeatureSequence ss = new FeatureSequence(crf.getOutputAlphabet(),
new int[] { 0, 1, 2, 3 });
InstanceList ilist = new InstanceList(new Noop(inputAlphabet,
outputAlphabet));
ilist.add(fvs, ss, null, null);
crf.addFullyConnectedStates(stateNames);
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crft.setUseSparseWeights(false);
if (useSave) {
try {
ObjectOutputStream oos = new ObjectOutputStream(
new FileOutputStream(f));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
System.err.println("CRF parameters. hyperbolicPriorSlope: "
+ crft.getUseHyperbolicPriorSlope()
+ ". hyperbolicPriorSharpness: "
+ crft.getUseHyperbolicPriorSharpness()
+ ". gaussianPriorVariance: "
+ crft.getGaussianPriorVariance());
// And read it back in
crf = null;
try {
ObjectInputStream ois = new ObjectInputStream(
new FileInputStream(f));
crf = (CRF) ois.readObject();
ois.close();
} catch (IOException e) {
System.err.println("Exception reading file: " + e);
} catch (ClassNotFoundException cnfe) {
System.err.println("Cound not find class reading in object: "
+ cnfe);
}
System.err.println("Read in CRF.");
System.err.println("CRF parameters. hyperbolicPriorSlope: "
+ crft.getUseHyperbolicPriorSlope()
+ ". hyperbolicPriorSharpness: "
+ crft.getUseHyperbolicPriorSharpness()
+ ". gaussianPriorVariance: "
+ crft.getGaussianPriorVariance());
try {
ObjectOutputStream oos = new ObjectOutputStream(
new FileOutputStream(f2));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
crf = saveCRF;
}
Optimizable.ByGradientValue mcrf = crft.getOptimizableCRF(ilist);
double unconstrainedWeight = new SumLatticeDefault(crf, fvs)
.getTotalWeight();
double constrainedWeight = new SumLatticeDefault(crf, fvs, ss)
.getTotalWeight();
double optimizableValue = 0, gradientNorm = 0;
double[] gradient = new double[mcrf.getNumParameters()];
// System.out.println
// ("unconstrainedCost="+unconstrainedCost+" constrainedCost="+constrainedCost);
for (int i = 0; i < numStates; i++)
for (int j = 0; j < numStates; j++)
for (int k = 0; k < inputVocabSize; k++) {
crf.setParameter(i, j, k, (k + i + j) * (k * i + i * j));
unconstrainedWeight = new SumLatticeDefault(crf, fvs)
.getTotalWeight();
constrainedWeight = new SumLatticeDefault(crf, fvs, ss)
.getTotalWeight();
optimizableValue = mcrf.getValue();
mcrf.getValueGradient(gradient);
gradientNorm = MatrixOps.oneNorm(gradient);
System.out.println("parameters " + i + " " + j + " " + k
+ ": unconstrainedWeight =" + unconstrainedWeight
+ " constrainedWeight =" + constrainedWeight
+ " optimizableValue =" + optimizableValue
+ " gradientNorm =" + gradientNorm);
}
assertTrue("Value should be 35770 but is" + optimizableValue, Math
.abs(optimizableValue + 35770) < 0.001);
assertTrue(Math.abs(gradientNorm - 520) < 0.001);
}
public void testCost() {
doTestCost(false);
}
public void testCostSerialized() {
doTestCost(true);
}
public void testIncrement() {
}
public static class TestCRFTokenSequenceRemoveSpaces extends Pipe implements
Serializable {
public TestCRFTokenSequenceRemoveSpaces() {
super(null, new Alphabet());
}
public Instance pipe(Instance carrier) {
TokenSequence ts = (TokenSequence) carrier.getData();
TokenSequence newTs = new TokenSequence();
FeatureSequence labelSeq = new FeatureSequence(getTargetAlphabet());
boolean lastWasSpace = true;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < ts.size(); i++) {
Token t = ts.get(i);
if (t.getText().equals(" "))
lastWasSpace = true;
else {
sb.append(t.getText());
newTs.add(t);
labelSeq.add(lastWasSpace ? "start" : "notstart");
lastWasSpace = false;
}
}
if (isTargetProcessing())
carrier.setTarget(labelSeq);
carrier.setData(newTs);
carrier.setSource(sb.toString());
return carrier;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int version = in.readInt();
}
}
public class TestCRF2String extends Pipe implements Serializable {
public TestCRF2String() {
super();
}
public Instance pipe(Instance carrier) {
StringBuffer sb = new StringBuffer();
String source = (String) carrier.getSource();
Sequence as = (Sequence) carrier.getTarget();
// int startLabelIndex = as.getAlphabet().lookupIndex("start");
for (int i = 0; i < source.length(); i++) {
System.out.println("target[" + i + "]=" + as.get(i).toString());
if (as.get(i).toString().equals("start") && i != 0)
sb.append(' ');
sb.append(source.charAt(i));
}
carrier.setSource(sb.toString());
System.out.println("carrier.getSource() = " + carrier.getSource());
return carrier;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException,
ClassNotFoundException {
int version = in.readInt();
}
}
public void testValueGradient() {
doTestSpacePrediction(true);
}
public void testTrain() {
doTestSpacePrediction(false);
}
public void doTestSpacePrediction(boolean testValueAndGradient) {
Pipe p = makeSpacePredictionPipe();
Pipe p2 = new TestCRF2String();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new Random(1), new double[] {
.5, .5 });
CRF crf = new CRF(p, p2);
crf.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
if (testValueAndGradient) {
Optimizable.ByGradientValue optable = crft
.getOptimizableCRF(lists[0]);
// TestOptimizable.testValueAndGradient(minable);
double[] gradient = new double[optable.getNumParameters()];
optable.getValueGradient(gradient);
// TestOptimizable.testValueAndGradientInDirection(optable,
// gradient);
// TestOptimizable.testValueAndGradientCurrentParameters(optable);
TestOptimizable.testValueAndGradient(optable); // This tests at
// current
// parameters and at
// parameters
// purturbed toward
// the gradient
} else {
System.out.println("Training Accuracy before training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = "
+ crf.averageTokenAccuracy(lists[1]));
System.out.println("Training...");
crft.trainIncremental(lists[0]);
System.out.println("Training Accuracy after training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy after training = "
+ crf.averageTokenAccuracy(lists[1]));
System.out.println("Training results:");
for (int i = 0; i < lists[0].size(); i++) {
Instance inst = lists[0].get(i);
Sequence input = (Sequence) inst.getData();
Sequence output = crf.transduce(input);
System.out.println(output);
}
System.out.println("Testing results:");
for (int i = 0; i < lists[1].size(); i++) {
Instance inst = lists[1].get(i);
Sequence input = (Sequence) inst.getData();
Sequence output = crf.transduce(input);
System.out.println(output);
}
}
}
public void doTestSpacePrediction(boolean testValueAndGradient,
boolean useSaved, boolean useSparseWeights) {
Pipe p = makeSpacePredictionPipe();
CRF savedCRF;
File f = new File("TestObject.obj");
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new double[] { .5, .5 });
CRF crf = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crft.setUseSparseWeights(useSparseWeights);
if (testValueAndGradient) {
Optimizable.ByGradientValue minable = crft
.getOptimizableCRF(lists[0]);
TestOptimizable.testValueAndGradient(minable);
} else {
System.out.println("Training Accuracy before training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = "
+ crf.averageTokenAccuracy(lists[1]));
savedCRF = crf;
System.out.println("Training serialized crf.");
crft.trainIncremental(lists[0]);
double preTrainAcc = crf.averageTokenAccuracy(lists[0]);
double preTestAcc = crf.averageTokenAccuracy(lists[1]);
System.out.println("Training Accuracy after training = "
+ preTrainAcc);
System.out.println("Testing Accuracy after training = "
+ preTestAcc);
try {
ObjectOutputStream oos = new ObjectOutputStream(
new FileOutputStream(f));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
System.err.println("CRF parameters. hyperbolicPriorSlope: "
+ crft.getUseHyperbolicPriorSlope()
+ ". hyperbolicPriorSharpness: "
+ crft.getUseHyperbolicPriorSharpness()
+ ". gaussianPriorVariance: "
+ crft.getGaussianPriorVariance());
// And read it back in
if (useSaved) {
crf = null;
try {
ObjectInputStream ois = new ObjectInputStream(
new FileInputStream(f));
crf = (CRF) ois.readObject();
ois.close();
} catch (IOException e) {
System.err.println("Exception reading file: " + e);
} catch (ClassNotFoundException cnfe) {
System.err
.println("Cound not find class reading in object: "
+ cnfe);
}
System.err.println("Read in CRF.");
crf = savedCRF;
double postTrainAcc = crf.averageTokenAccuracy(lists[0]);
double postTestAcc = crf.averageTokenAccuracy(lists[1]);
System.out.println("Training Accuracy after saving = "
+ postTrainAcc);
System.out.println("Testing Accuracy after saving = "
+ postTestAcc);
assertEquals(postTrainAcc, preTrainAcc, 0.0001);
assertEquals(postTestAcc, preTestAcc, 0.0001);
}
}
}
private Pipe makeSpacePredictionPipe() {
Pipe p = new SerialPipes(new Pipe[] {
new CharSequence2TokenSequence("."),
new TokenSequenceLowercase(),
new TestCRFTokenSequenceRemoveSpaces(),
new TokenText(),
new OffsetConjunctions(true, new int[][] { { 0 }, { 1 },
{ -1, 0 },
// Original test had this conjunction in it too
// {1},{-1,0},{0,1},
// {0, 1},
// I'd like to comment out this next line to make it run
// faster, but then we'd need to adjust likelihood and
// accuracy test values. -akm 12/2007
// TODO uncomment this line
// {-2, -1, 0}, {0, 1, 2}, {-3, -2, -1}, {1, 2, 3},
// (These were commented before...)
// {-2,-1}, {-1,0}, {0,1}, {1,2},
// {-3,-2,-1}, {-2,-1,0}, {-1,0,1}, {0,1,2}, {1,2,3},
}),
// new PrintInputAndTarget(),
new TokenSequence2FeatureVectorSequence() });
return p;
}
public void testAddOrderNStates() {
Pipe p = makeSpacePredictionPipe();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new java.util.Random(678),
new double[] { .5, .5 });
// Compare 3 CRFs trained with addOrderNStates, and make sure
// that having more features leads to a higher likelihood
CRF crf1 = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf1.addOrderNStates(lists[0], new int[] { 1, },
new boolean[] { false, }, "START", null, null, false);
new CRFTrainerByLabelLikelihood(crf1).trainIncremental(lists[0]);
CRF crf2 = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf2.addOrderNStates(lists[0], new int[] { 1, 2, }, new boolean[] {
false, true }, "START", null, null, false);
new CRFTrainerByLabelLikelihood(crf2).trainIncremental(lists[0]);
CRF crf3 = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf3.addOrderNStates(lists[0], new int[] { 1, 2, }, new boolean[] {
false, false }, "START", null, null, false);
new CRFTrainerByLabelLikelihood(crf3).trainIncremental(lists[0]);
// Prevent cached values
double lik1 = getLikelihood(crf1, lists[0]);
double lik2 = getLikelihood(crf2, lists[0]);
double lik3 = getLikelihood(crf3, lists[0]);
System.out.println("CRF1 likelihood " + lik1);
assertTrue("Final zero-order likelihood <" + lik1
+ "> greater than first-order <" + lik2 + ">", lik1 < lik2);
assertTrue("Final defaults-only likelihood <" + lik2
+ "> greater than full first-order <" + lik3 + ">", lik2 < lik3);
assertEquals(-167.2234457483949, lik1, 0.0001);
assertEquals(-165.81326484466342, lik2, 0.0001);
assertEquals(-90.37680146432787, lik3, 0.0001);
}
double getLikelihood(CRF crf, InstanceList data) {
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
Optimizable.ByGradientValue mcrf = crft.getOptimizableCRF(data);
// Do this elaborate thing so that crf.cachedValueStale is forced true
double[] params = new double[mcrf.getNumParameters()];
mcrf.getParameters(params);
mcrf.setParameters(params);
return mcrf.getValue();
}
public void testFrozenWeights() {
Pipe p = makeSpacePredictionPipe();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
CRF crf1 = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf1.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft1 = new CRFTrainerByLabelLikelihood(
crf1);
crft1.trainIncremental(instances);
CRF crf2 = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf2.addFullyConnectedStatesForLabels();
// Freeze some weights, before training
for (int i = 0; i < crf2.getWeights().length; i += 2)
crf2.freezeWeights(i);
CRFTrainerByLabelLikelihood crft2 = new CRFTrainerByLabelLikelihood(
crf2);
crft2.trainIncremental(instances);
SparseVector[] w = crf2.getWeights();
double[] b = crf2.getDefaultWeights();
for (int i = 0; i < w.length; i += 2) {
assertEquals(0.0, b[i], 1e-10);
for (int loc = 0; loc < w[i].numLocations(); loc++) {
assertEquals(0.0, w[i].valueAtLocation(loc), 1e-10);
}
}
// Check that the frozen weights has worse likelihood
Optimizable.ByGradientValue optable1 = crft1
.getOptimizableCRF(instances);
Optimizable.ByGradientValue optable2 = crft2
.getOptimizableCRF(instances);
double val1 = optable1.getValue();
double val2 = optable2.getValue();
assertTrue(
"Error: Freezing weights does not harm log-likelihood! Full "
+ val1 + ", Frozen " + val2, val1 > val2);
}
public void testDenseTrain() {
doTestSpacePrediction(false, false, false);
}
public void testTrainStochasticGradient() {
Pipe p = makeSpacePredictionPipe();
Pipe p2 = new TestCRF2String();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new double[] { .5, .5 });
CRF crf = new CRF(p, p2);
crf.addFullyConnectedStatesForLabels();
crf.setWeightsDimensionAsIn(lists[0], false);
CRFTrainerByStochasticGradient crft = new CRFTrainerByStochasticGradient(
crf, 0.0001);
System.out.println("Training Accuracy before training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = "
+ crf.averageTokenAccuracy(lists[1]));
System.out.println("Training...");
// either fixed learning rate or selected on a sample
crft.setLearningRateByLikelihood(lists[0]);
// crft.setLearningRate(0.01);
crft.train(lists[0], 100);
crf.print();
System.out.println("Training Accuracy after training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy after training = "
+ crf.averageTokenAccuracy(lists[1]));
}
public void testSumLatticeImplementations() {
Pipe p = makeSpacePredictionPipe();
Pipe p2 = new TestCRF2String();
// first do normal training for getting weights
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new double[] { .5, .5 });
CRF crf = new CRF(p, p2);
crf.addFullyConnectedStatesForLabels();
crf.setWeightsDimensionAsIn(lists[0], false);
CRFTrainerByStochasticGradient crft = new CRFTrainerByStochasticGradient(
crf, 0.0001);
System.out.println("Training Accuracy before training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = "
+ crf.averageTokenAccuracy(lists[1]));
System.out.println("Training...");
// either fixed learning rate or selected on a sample
crft.setLearningRateByLikelihood(lists[0]);
// crft.setLearningRate(0.01);
crft.train(lists[0], 100);
crf.print();
System.out.println("Training Accuracy after training = "
+ crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy after training = "
+ crf.averageTokenAccuracy(lists[1]));
// now check the speeds of SumLatticeDefault vs SumLatticeScaling
long totalTimeDefault = 0, totalTimeScaling = 0;
for (int iter = 0; iter < 10000; iter++) {
for (int ii = 0; ii < lists[1].size(); ii++) {
FeatureVectorSequence input = (FeatureVectorSequence) lists[1]
.get(ii).getData();
totalTimeDefault -= System.currentTimeMillis();
SumLattice defaultLattice = new SumLatticeDefault(crf, input,
true);
totalTimeDefault += System.currentTimeMillis();
totalTimeScaling -= System.currentTimeMillis();
SumLattice scalingLattice = new SumLatticeScaling(crf, input,
true);
totalTimeScaling += System.currentTimeMillis();
if (iter == 0) {
// check that total weight is same
assertEquals(defaultLattice.getTotalWeight(),
scalingLattice.getTotalWeight(), 0.0001);
// check that gammas
double[][] g1 = defaultLattice.getGammas(), g2 = scalingLattice
.getGammas();
for (int i = 0; i < g1.length; i++) {
for (int j = 0; j < g1[i].length; j++) {
assertEquals(g1[i][j], g2[i][j], 0.0001);
}
}
// check that xis match
double[][][] x1 = defaultLattice.getXis(), x2 = scalingLattice
.getXis();
for (int i = 0; i < x1.length; i++) {
for (int j = 0; j < x1[i].length; j++) {
for (int k = 0; k < x1[i][j].length; k++) {
assertEquals(x1[i][j][k], x2[i][j][k], 0.0001);
}
}
}
}
}
if ((iter + 1) % 100 == 0) {
System.out.print((iter + 1) + ". ");
System.out.flush();
}
if ((iter + 1) % 1000 == 0)
System.out.println();
}
System.out.println();
System.out.println("Time in ms (default) = " + totalTimeDefault);
System.out.println("Time in ms (scaling) = " + totalTimeScaling);
if (totalTimeScaling > totalTimeDefault)
System.out.println("SumLatticeDefault FTW!! (timeDiff="
+ (totalTimeScaling - totalTimeDefault) + " ms)");
else
System.out.println("SumLatticeScaling FTW!! (timeDiff="
+ (totalTimeDefault - totalTimeScaling) + " ms)");
}
public void testSerialization() {
doTestSpacePrediction(false, true, true);
}
public void testDenseSerialization() {
doTestSpacePrediction(false, true, false);
}
public void testTokenAccuracy() {
Pipe p = makeSpacePredictionPipe();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new Random(777), new double[] {
.5, .5 });
CRF crf = new CRF(p.getDataAlphabet(), p.getTargetAlphabet());
crf.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crft.setUseSparseWeights(true);
crft.trainIncremental(lists[0]);
TokenAccuracyEvaluator eval = new TokenAccuracyEvaluator(lists,
new String[] { "Train", "Test" });
eval.evaluateInstanceList(crft, lists[1], "Test");
assertEquals(0.9409, eval.getAccuracy("Test"), 0.001);
}
public void testPrint() {
Pipe p = new SerialPipes(new Pipe[] {
new CharSequence2TokenSequence("."), new TokenText(),
new TestCRFTokenSequenceRemoveSpaces(),
new TokenSequence2FeatureVectorSequence(),
new PrintInputAndTarget(), });
InstanceList one = new InstanceList(p);
String[] data = new String[] { "ABCDE", };
one.addThruPipe(new ArrayIterator(data));
CRF crf = new CRF(p, null);
crf.addFullyConnectedStatesForThreeQuarterLabels(one);
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crf.setWeightsDimensionAsIn(one, false);
Optimizable mcrf = crft.getOptimizableCRF(one);
double[] params = new double[mcrf.getNumParameters()];
for (int i = 0; i < params.length; i++) {
params[i] = i;
}
mcrf.setParameters(params);
crf.print();
}
public void testCopyStatesAndWeights() {
Pipe p = new SerialPipes(new Pipe[] {
new CharSequence2TokenSequence("."), new TokenText(),
new TestCRFTokenSequenceRemoveSpaces(),
new TokenSequence2FeatureVectorSequence(),
new PrintInputAndTarget(), });
InstanceList one = new InstanceList(p);
String[] data = new String[] { "ABCDE", };
one.addThruPipe(new ArrayIterator(data));
CRF crf = new CRF(p, null);
crf.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crf.setWeightsDimensionAsIn(one, false);
Optimizable.ByGradientValue mcrf = crft.getOptimizableCRF(one);
double[] params = new double[mcrf.getNumParameters()];
for (int i = 0; i < params.length; i++) {
params[i] = i;
}
mcrf.setParameters(params);
StringWriter out = new StringWriter();
crf.print(new PrintWriter(out, true));
System.out.println("------------- CRF1 -------------");
crf.print();
// Make a copy of this CRF
CRF crf2 = new CRF(crf);
StringWriter out2 = new StringWriter();
crf2.print(new PrintWriter(out2, true));
System.out.println("------------- CRF2 -------------");
crf2.print();
assertEquals(out.toString(), out2.toString());
double val1 = mcrf.getValue();
CRFTrainerByLabelLikelihood crft2 = new CRFTrainerByLabelLikelihood(
crf2);
double val2 = crft2.getOptimizableCRF(one).getValue();
assertEquals(val1, val2, 1e-5);
}
static String toy = "A a\nB b\nC c\nD d\nB b\nC c\n";
public void testStartState() {
Pipe p = new SerialPipes(new Pipe[] {
new LineGroupString2TokenSequence(),
new TokenSequenceMatchDataAndTarget(Pattern
.compile("^(\\S+) (.*)"), 2, 1),
new TokenSequenceParseFeatureString(false), new TokenText(),
new TokenSequence2FeatureVectorSequence(true, false),
new Target2LabelSequence(), new PrintInputAndTarget(), });
InstanceList data = new InstanceList(p);
data.addThruPipe(new LineGroupIterator(new StringReader(toy), Pattern
.compile("\n"), true));
CRF crf = new CRF(p, null);
crf.print();
crf.addStatesForLabelsConnectedAsIn(data);
crf.addStartState();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
Optimizable.ByGradientValue maxable = crft.getOptimizableCRF(data);
assertEquals(-1.3862, maxable.getValue(), 1e-4);
crf = new CRF(p, null);
crf
.addOrderNStates(data, new int[] { 1 }, null, "A", null, null,
false);
crf.print();
crft = new CRFTrainerByLabelLikelihood(crf);
maxable = crft.getOptimizableCRF(data);
assertEquals(-3.09104245335831, maxable.getValue(), 1e-4);
}
// Tests that setWeightsDimensionDensely respects featureSelections
public void testDenseFeatureSelection() {
Pipe p = makeSpacePredictionPipe();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
// Test that dense observations wights aren't added for
// "default-feature" edges.
CRF crf1 = new CRF(p, null);
crf1.addOrderNStates(instances, new int[] { 0 }, null, "start", null,
null, true);
CRFTrainerByLabelLikelihood crft1 = new CRFTrainerByLabelLikelihood(
crf1);
crft1.setUseSparseWeights(false);
crft1.train(instances, 1); // Set weights dimension
int nParams1 = crft1.getOptimizableCRF(instances).getNumParameters();
CRF crf2 = new CRF(p, null);
crf2.addOrderNStates(instances, new int[] { 0, 1 }, new boolean[] {
false, true }, "start", null, null, true);
CRFTrainerByLabelLikelihood crft2 = new CRFTrainerByLabelLikelihood(
crf2);
crft2.setUseSparseWeights(false);
crft2.train(instances, 1); // Set weights dimension
int nParams2 = crft2.getOptimizableCRF(instances).getNumParameters();
assertEquals(nParams2, nParams1 + 4);
}
public void testXis() {
Pipe p = makeSpacePredictionPipe();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
CRF crf1 = new CRF(p, null);
crf1.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft1 = new CRFTrainerByLabelLikelihood(
crf1);
crft1.train(instances, 10); // Let's get some parameters
Instance inst = instances.get(0);
Sequence input = (Sequence) inst.getData();
SumLatticeDefault lattice = new SumLatticeDefault(crf1, input,
(Sequence) inst.getTarget(), null, true);
for (int ip = 0; ip < lattice.length() - 1; ip++) {
for (int i = 0; i < crf1.numStates(); i++) {
Transducer.State state = crf1.getState(i);
Transducer.TransitionIterator it = state.transitionIterator(
input, ip);
double gamma = lattice.getGammaProbability(ip, state);
double xiSum = 0;
while (it.hasNext()) {
Transducer.State dest = it.nextState();
double xi = lattice.getXiProbability(ip, state, dest);
xiSum += xi;
}
assertEquals(gamma, xiSum, 1e-5);
}
}
}
public static Test suite() {
return new TestSuite(TestCRF.class);
}
public void testStateAddWeights() {
Pipe p = makeSpacePredictionPipe(); // This used to be
// MEMM.makeSpacePredictionPipe(),
// but I don't know why -akm 12/2007
InstanceList training = new InstanceList(p);
training.addThruPipe(new ArrayIterator(data)); // This used to be
// MEMM.data, but I
// don't know why -akm
// 12/2007
CRF crf = new CRF(p, null);
crf.addFullyConnectedStatesForLabels();
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood(crf);
crft.trainIncremental(training);
// Check that the notstart state is used at test time
Sequence input = (Sequence) training.get(0).getData();
Sequence output = new MaxLatticeDefault(crf, input)
.bestOutputSequence();
boolean notstartFound = false;
for (int i = 0; i < output.size(); i++) {
if (output.get(i).toString().equals("notstart")) {
notstartFound = true;
}
}
System.err.println(output.toString());
assertTrue(notstartFound);
// Now add -infinite weight onto a transition, and make sure that it's
// honored.
CRF.State state = crf.getState("notstart");
int widx = crf.getWeightsIndex("BadBad");
int numFeatures = crf.getInputAlphabet().size();
SparseVector w = new SparseVector(new double[numFeatures]);
w.setAll(Double.NEGATIVE_INFINITY);
crf.setWeights(widx, w);
state.addWeight(0, "BadBad");
state.addWeight(1, "BadBad");
// Verify that this effectively prevents the notstart state from being
// used
output = new MaxLatticeDefault(crf, input).bestOutputSequence();
notstartFound = false;
for (int i = 0; i < output.size() - 1; i++) {
if (output.get(i).toString().equals("notstart")) {
notstartFound = true;
}
}
assertTrue(!notstartFound);
}
private static String oldCrfFile = "test/edu/umass/cs/mallet/base/fst/crf.cnl03.ser.gz";
private static String testString = "John NNP B-NP O\nDoe NNP I-NP O\nsaid VBZ B-VP O\nhi NN B-NP O\n";
public void skiptestOldCrf() {
CRF crf = (CRF) FileUtils.readObject(new File(oldCrfFile));
Instance inst = crf.getInputPipe().instanceFrom(
new Instance(testString, null, null, null));
Sequence output = crf.transduce((Sequence) inst.getData());
String std = output.toString();
assertEquals(" B-PER I-PER O O", std);
}
public static void main(String[] args) {
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite();
for (int i = 0; i < args.length; i++) {
theSuite.addTest(new TestCRF(args[i]));
}
} else {
theSuite = (TestSuite) suite();
}
junit.textui.TestRunner.run(theSuite);
}
}
| 43,750 | 38.063393 | 771 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/tests/TestSumNegLogProb2.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.tests;
import junit.framework.*;
public class TestSumNegLogProb2 extends TestCase
{
public TestSumNegLogProb2 (String name)
{
super (name);
}
private double sumNegLogProb (double a, double b) {
return - Math.log (Math.exp(-a) + Math.exp(-b));
}
public void testSum (double a, double b)
{
double al = - Math.log (a);
double bl = - Math.log (b);
double abl = sumNegLogProb (al, bl);
double ab = Math.exp (-abl);
System.out.println (" " + a +" + "+ b +" = "+ab);
System.out.println (">" + al +" + "+ bl +" = "+abl);
assertTrue (Math.abs (ab - (a+b)) < 0.001);
}
public void testTwo ()
{
testSum (.5, .5);
testSum (.9, .1);
testSum (.99, .01);
testSum (.99999, .00001);
testSum (.00001, 0.00001);
testSum (.00000001, 0.00001);
testSum (.0000000000001, 0.00001);
}
public static Test suite ()
{
return new TestSuite (TestSumNegLogProb2.class);
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 1,542 | 23.492063 | 91 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/tests/TestMEMM.java
|
package cc.mallet.fst.tests;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import cc.mallet.types.Alphabet;
import cc.mallet.types.FeatureSequence;
import cc.mallet.types.FeatureVector;
import cc.mallet.types.FeatureVectorSequence;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import cc.mallet.types.Label;
import cc.mallet.types.LabelAlphabet;
import cc.mallet.types.LabelSequence;
import cc.mallet.types.MatrixOps;
import cc.mallet.types.Sequence;
import cc.mallet.types.tests.TestSerializable;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.PrintInputAndTarget;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureVectorSequence;
import cc.mallet.pipe.TokenSequenceLowercase;
import cc.mallet.pipe.iterator.ArrayIterator;
import cc.mallet.pipe.tsf.OffsetConjunctions;
import cc.mallet.pipe.tsf.TokenText;
import cc.mallet.fst.MEMM;
import cc.mallet.fst.MEMMTrainer;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.optimize.Optimizable;
import cc.mallet.optimize.tests.TestOptimizable;
import cc.mallet.extract.StringSpan;
import cc.mallet.extract.StringTokenization;
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
* Tests for MEMM training.
*
* @author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
// gsc (08/25/08): made changes to all tests after removing the option for
// useSparseWeights from MEMMTrainer, now, the users has to set the weights manually
// irrespective of above changes, two tests fail (testSpaceMaximizable, testSpaceSerializable)
public class TestMEMM extends TestCase {
public TestMEMM (String name)
{
super(name);
}
public static final String[] data = new String[]{
"Free software is a matter of the users' freedom to run, copy, distribute, study, change and improve the software. More precisely, it refers to four kinds of freedom, for the users of the software.",
"The freedom to run the program, for any purpose.",
"The freedom to study how the program works, and adapt it to your needs.",
"The freedom to redistribute copies so you can help your neighbor.",
"The freedom to improve the program, and release your improvements to the public, so that the whole community benefits.",
"A program is free software if users have all of these freedoms. Thus, you should be free to redistribute copies, either with or without modifications, either gratis or charging a fee for distribution, to anyone anywhere. Being free to do these things means (among other things) that you do not have to ask or pay for permission.",
"You should also have the freedom to make modifications and use them privately in your own work or play, without even mentioning that they exist. If you do publish your changes, you should not be required to notify anyone in particular, or in any particular way.",
"In order for the freedoms to make changes, and to publish improved versions, to be meaningful, you must have access to the source code of the program. Therefore, accessibility of source code is a necessary condition for free software.",
"Finally, note that criteria such as those stated in this free software definition require careful thought for their interpretation. To decide whether a specific software license qualifies as a free software license, we judge it based on these criteria to determine whether it fits their spirit as well as the precise words. If a license includes unconscionable restrictions, we reject it, even if we did not anticipate the issue in these criteria. Sometimes a license requirement raises an issue that calls for extensive thought, including discussions with a lawyer, before we can decide if the requirement is acceptable. When we reach a conclusion about a new issue, we often update these criteria to make it easier to see why certain licenses do or don't qualify.",
"In order for these freedoms to be real, they must be irrevocable as long as you do nothing wrong; if the developer of the software has the power to revoke the license, without your doing anything to give cause, the software is not free.",
"However, certain kinds of rules about the manner of distributing free software are acceptable, when they don't conflict with the central freedoms. For example, copyleft (very simply stated) is the rule that when redistributing the program, you cannot add restrictions to deny other people the central freedoms. This rule does not conflict with the central freedoms; rather it protects them.",
"Thus, you may have paid money to get copies of free software, or you may have obtained copies at no charge. But regardless of how you got your copies, you always have the freedom to copy and change the software, even to sell copies.",
"Rules about how to package a modified version are acceptable, if they don't effectively block your freedom to release modified versions. Rules that ``if you make the program available in this way, you must make it available in that way also'' can be acceptable too, on the same condition. (Note that such a rule still leaves you the choice of whether to publish the program or not.) It is also acceptable for the license to require that, if you have distributed a modified version and a previous developer asks for a copy of it, you must send one.",
"Sometimes government export control regulations and trade sanctions can constrain your freedom to distribute copies of programs internationally. Software developers do not have the power to eliminate or override these restrictions, but what they can and must do is refuse to impose them as conditions of use of the program. In this way, the restrictions will not affect activities and people outside the jurisdictions of these governments.",
"Finally, note that criteria such as those stated in this free software definition require careful thought for their interpretation. To decide whether a specific software license qualifies as a free software license, we judge it based on these criteria to determine whether it fits their spirit as well as the precise words. If a license includes unconscionable restrictions, we reject it, even if we did not anticipate the issue in these criteria. Sometimes a license requirement raises an issue that calls for extensive thought, including discussions with a lawyer, before we can decide if the requirement is acceptable. When we reach a conclusion about a new issue, we often update these criteria to make it easier to see why certain licenses do or don't qualify.",
"The GNU Project was launched in 1984 to develop a complete Unix-like operating system which is free software: the GNU system."
};
public void testGetSetParameters()
{
int inputVocabSize = 100;
int numStates = 5;
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
MEMM memm = new MEMM (inputAlphabet, outputAlphabet);
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++)
stateNames[i] = "state" + i;
memm.addFullyConnectedStates(stateNames);
MEMMTrainer memmt = new MEMMTrainer (memm);
MEMMTrainer.MEMMOptimizableByLabelLikelihood omemm = memmt.getOptimizableMEMM (new InstanceList(null));
TestOptimizable.testGetSetParameters(omemm);
}
public void testSpaceMaximizable ()
{
Pipe p = makeSpacePredictionPipe ();
InstanceList training = new InstanceList (p);
// String[] data = { TestMEMM.data[0], }; // TestMEMM.data[1], TestMEMM.data[2], TestMEMM.data[3], };
// String[] data = { "ab" };
training.addThruPipe (new ArrayIterator (data));
// CRF4 memm = new CRF4 (p, null);
MEMM memm = new MEMM (p, null);
memm.addFullyConnectedStatesForLabels ();
memm.addStartState();
memm.setWeightsDimensionAsIn(training);
MEMMTrainer memmt = new MEMMTrainer (memm);
// memm.gatherTrainingSets (training); // ANNOYING: Need to set up per-instance training sets
memmt.train (training, 1); // Set weights dimension, gathers training sets, etc.
// memm.print();
// memm.printGradient = true;
// memm.printInstanceLists();
// memm.setGaussianPriorVariance (Double.POSITIVE_INFINITY);
Optimizable.ByGradientValue mcrf = memmt.getOptimizableMEMM(training);
TestOptimizable.setNumComponents (150);
TestOptimizable.testValueAndGradient (mcrf);
}
public void testSpaceSerializable () throws IOException, ClassNotFoundException
{
Pipe p = makeSpacePredictionPipe ();
InstanceList training = new InstanceList (p);
training.addThruPipe (new ArrayIterator (data));
MEMM memm = new MEMM (p, null);
memm.addFullyConnectedStatesForLabels ();
memm.addStartState();
memm.setWeightsDimensionAsIn(training);
MEMMTrainer memmt = new MEMMTrainer (memm);
memmt.train (training, 10);
MEMM memm2 = (MEMM) TestSerializable.cloneViaSerialization (memm);
Optimizable.ByGradientValue mcrf1 = memmt.getOptimizableMEMM(training);
double val1 = mcrf1.getValue ();
Optimizable.ByGradientValue mcrf2 = memmt.getOptimizableMEMM(training);
double val2 = mcrf2.getValue ();
assertEquals (val1, val2, 1e-5);
}
// Should print at end:
// parameters 4 4 3: unconstrainedCost=-2912.0 constrainedCost=-428.0 minCost=35770.0 minGrad=520.0
public void disabledtestCost(int useSave)
{
int inputVocabSize = 4;
int numStates = 5;
// Create a file to store the CRF
File f = new File("TestObject.obj");
File f2 = new File("TestObject2.obj");
Alphabet inputAlphabet = new Alphabet();
for (int i = 0; i < inputVocabSize; i++)
inputAlphabet.lookupIndex("feature" + i);
Alphabet outputAlphabet = new Alphabet();
// Store the dictionary
if (outputAlphabet == null) {
System.err.println("Output dictionary null.");
}
MEMM crf = new MEMM(inputAlphabet, outputAlphabet);
MEMMTrainer memmt = new MEMMTrainer (crf);
String[] stateNames = new String[numStates];
for (int i = 0; i < numStates; i++)
stateNames[i] = "state" + i;
MEMM saveCRF = crf;
//inputAlphabet = (Feature.Alphabet) crf.getInputAlphabet();
FeatureVectorSequence fvs = new FeatureVectorSequence(new FeatureVector[]{
new FeatureVector(crf.getInputAlphabet(), new int[]{1, 2, 3}, new double[]{1, 1, 1}),
new FeatureVector(crf.getInputAlphabet(), new int[]{1, 2, 3}, new double[]{1, 1, 1}),
new FeatureVector(crf.getInputAlphabet(), new int[]{1, 2, 3}, new double[]{1, 1, 1}),
new FeatureVector(crf.getInputAlphabet(), new int[]{1, 2, 3}, new double[]{1, 1, 1}),
});
FeatureSequence ss = new FeatureSequence(crf.getOutputAlphabet(), new int[]{0, 1, 2, 3});
InstanceList ilist = new InstanceList(null);
ilist.add(fvs, ss, null, null);
crf.addFullyConnectedStates(stateNames);
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
// And read it back in
crf = null;
try {
ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f));
crf = (MEMM) ois.readObject();
ois.close();
} catch (IOException e) {
System.err.println("Exception reading file: " + e);
} catch (ClassNotFoundException cnfe) {
System.err.println("Cound not find class reading in object: " + cnfe);
}
System.err.println("Read in CRF.");
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f2));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
if (useSave == 1) {
crf = saveCRF;
}
// MEMM.OptimizableCRF mcrf = crf.getMaximizableCRF(ilist);
Optimizable.ByGradientValue mcrf = memmt.getOptimizableMEMM(ilist);
double unconstrainedCost = new SumLatticeDefault (crf, fvs).getTotalWeight();
double constrainedCost = new SumLatticeDefault (crf, fvs, ss).getTotalWeight();
double minimizableCost = 0, minimizableGradientNorm = 0;
double[] gradient = new double [mcrf.getNumParameters()];
//System.out.println ("unconstrainedCost="+unconstrainedCost+" constrainedCost="+constrainedCost);
for (int i = 0; i < numStates; i++)
for (int j = 0; j < numStates; j++)
for (int k = 0; k < inputVocabSize; k++) {
crf.setParameter(i, j, k, (k + i + j) * (k * i + i * j));
unconstrainedCost = new SumLatticeDefault (crf, fvs).getTotalWeight();
constrainedCost = new SumLatticeDefault (crf, fvs, ss).getTotalWeight();
minimizableCost = mcrf.getValue ();
mcrf.getValueGradient (gradient);
minimizableGradientNorm = MatrixOps.oneNorm (gradient);
System.out.println("parameters " + i + " " + j + " " + k
+ ": unconstrainedCost=" + unconstrainedCost
+ " constrainedCost=" + constrainedCost
+ " minCost=" + minimizableCost
+ " minGrad=" + minimizableGradientNorm);
}
assertEquals (true, Math.abs (minimizableCost - 35770) < 0.001);
assertEquals (true, Math.abs (minimizableGradientNorm - 520) < 0.001);
}
public void testIncrement()
{
}
public static class TestMEMMTokenSequenceRemoveSpaces extends Pipe implements Serializable {
public TestMEMMTokenSequenceRemoveSpaces()
{
super(null, new LabelAlphabet());
}
public Instance pipe(Instance carrier)
{
StringTokenization ts = (StringTokenization) carrier.getData();
StringTokenization newTs = new StringTokenization((CharSequence) ts.getDocument ());
final LabelAlphabet dict = (LabelAlphabet) getTargetAlphabet();
LabelSequence labelSeq = new LabelSequence(dict);
Label start = dict.lookupLabel ("start");
Label notstart = dict.lookupLabel ("notstart");
boolean lastWasSpace = true;
StringBuffer sb = new StringBuffer();
for (int i = 0; i < ts.size(); i++) {
StringSpan t = (StringSpan) ts.getSpan(i);
if (t.getText().equals(" "))
lastWasSpace = true;
else {
sb.append(t.getText());
newTs.add(t);
labelSeq.add(lastWasSpace ? "start" : "notstart");
lastWasSpace = false;
}
}
if (isTargetProcessing())
carrier.setTarget(labelSeq);
carrier.setData(newTs);
carrier.setSource(sb.toString());
return carrier;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException
{
int version = in.readInt();
}
}
public class TestMEMM2String extends Pipe implements Serializable {
public TestMEMM2String()
{
super();
}
public Instance pipe(Instance carrier)
{
StringBuffer sb = new StringBuffer();
String source = (String) carrier.getSource();
Sequence as = (Sequence) carrier.getTarget();
//int startLabelIndex = as.getAlphabet().lookupIndex("start");
for (int i = 0; i < source.length(); i++) {
System.out.println("target[" + i + "]=" + as.get(i).toString());
if (as.get(i).toString().equals("start") && i != 0)
sb.append(' ');
sb.append(source.charAt(i));
}
carrier.setSource(sb.toString());
System.out.println("carrier.getSource() = " + carrier.getSource());
return carrier;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject(ObjectOutputStream out) throws IOException
{
out.writeInt(CURRENT_SERIAL_VERSION);
}
private void readObject(ObjectInputStream in) throws IOException
{
int version = in.readInt();
}
}
public void doTestSpacePrediction(boolean testValueAndGradient)
{
Pipe p = makeSpacePredictionPipe ();
Pipe p2 = new TestMEMM2String();
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new double[]{.5, .5});
MEMM memm = new MEMM(p, p2);
memm.addFullyConnectedStatesForLabels();
memm.setWeightsDimensionAsIn(lists[0]);
MEMMTrainer memmt = new MEMMTrainer (memm);
if (testValueAndGradient) {
Optimizable.ByGradientValue minable = memmt.getOptimizableMEMM(lists[0]);
TestOptimizable.testValueAndGradient(minable);
} else {
System.out.println("Training Accuracy before training = " + memm.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = " + memm.averageTokenAccuracy(lists[1]));
System.out.println("Training...");
memmt.train(lists[0], 1);
System.out.println("Training Accuracy after training = " + memm.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy after training = " + memm.averageTokenAccuracy(lists[1]));
System.out.println("Training results:");
for (int i = 0; i < lists[0].size(); i++) {
Instance inst = lists[0].get(i);
Sequence input = (Sequence) inst.getData ();
Sequence output = memm.transduce (input);
System.out.println (output);
}
System.out.println ("Testing results:");
for (int i = 0; i < lists[1].size(); i++) {
Instance inst = lists[1].get(i);
Sequence input = (Sequence) inst.getData ();
Sequence output = memm.transduce (input);
System.out.println (output);
}
}
}
public void doTestSpacePrediction(boolean testValueAndGradient,
boolean useSaved,
boolean useSparseWeights)
{
Pipe p = makeSpacePredictionPipe ();
MEMM savedCRF;
File f = new File("TestObject.obj");
InstanceList instances = new InstanceList(p);
instances.addThruPipe(new ArrayIterator(data));
InstanceList[] lists = instances.split(new double[]{.5, .5});
MEMM crf = new MEMM(p.getDataAlphabet(), p.getTargetAlphabet());
crf.addFullyConnectedStatesForLabels();
if (useSparseWeights)
crf.setWeightsDimensionAsIn(lists[0]);
else
crf.setWeightsDimensionDensely();
MEMMTrainer memmt = new MEMMTrainer (crf);
// memmt.setUseSparseWeights (useSparseWeights);
if (testValueAndGradient) {
Optimizable.ByGradientValue minable = memmt.getOptimizableMEMM(lists[0]);
TestOptimizable.testValueAndGradient(minable);
} else {
System.out.println("Training Accuracy before training = " + crf.averageTokenAccuracy(lists[0]));
System.out.println("Testing Accuracy before training = " + crf.averageTokenAccuracy(lists[1]));
savedCRF = crf;
System.out.println("Training serialized crf.");
memmt.train(lists[0], 100);
double preTrainAcc = crf.averageTokenAccuracy(lists[0]);
double preTestAcc = crf.averageTokenAccuracy(lists[1]);
System.out.println("Training Accuracy after training = " + preTrainAcc);
System.out.println("Testing Accuracy after training = " + preTestAcc);
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
oos.writeObject(crf);
oos.close();
} catch (IOException e) {
System.err.println("Exception writing file: " + e);
}
System.err.println("Wrote out CRF");
// And read it back in
if (useSaved) {
crf = null;
try {
ObjectInputStream ois = new ObjectInputStream(new FileInputStream(f));
crf = (MEMM) ois.readObject();
ois.close();
} catch (IOException e) {
System.err.println("Exception reading file: " + e);
} catch (ClassNotFoundException cnfe) {
System.err.println("Cound not find class reading in object: " + cnfe);
}
System.err.println("Read in CRF.");
crf = savedCRF;
double postTrainAcc = crf.averageTokenAccuracy(lists[0]);
double postTestAcc = crf.averageTokenAccuracy(lists[1]);
System.out.println("Training Accuracy after saving = " + postTrainAcc);
System.out.println("Testing Accuracy after saving = " + postTestAcc);
assertEquals(postTrainAcc, preTrainAcc, 0.0001);
assertEquals(postTestAcc, preTestAcc, 0.0001);
}
}
}
public static Pipe makeSpacePredictionPipe ()
{
Pipe p = new SerialPipes(new Pipe[]{
new CharSequence2TokenSequence("."),
new TokenSequenceLowercase(),
new TestMEMMTokenSequenceRemoveSpaces(),
new TokenText(),
new OffsetConjunctions(true,
new int[][]{//{0}, /*{1},{-1,0},{0,1}, */
{1}, {-1, 0}, {0, 1},
// {-2, -1, 0}, {0, 1, 2}, {-3, -2, -1}, {1, 2, 3},
//{-2,-1}, {-1,0}, {0,1}, {1,2},
//{-3,-2,-1}, {-2,-1,0}, {-1,0,1}, {0,1,2}, {1,2,3},
}),
// new PrintInputAndTarget(),
new TokenSequence2FeatureVectorSequence()
});
return p;
}
public void disabledtestAddOrderNStates ()
{
Pipe p = makeSpacePredictionPipe ();
InstanceList instances = new InstanceList (p);
instances.addThruPipe (new ArrayIterator(data));
InstanceList[] lists = instances.split (new java.util.Random (678), new double[]{.5, .5});
// Compare 3 CRFs trained with addOrderNStates, and make sure
// that having more features leads to a higher likelihood
MEMM crf1 = new MEMM(p.getDataAlphabet(), p.getTargetAlphabet());
crf1.addOrderNStates (lists [0],
new int[] { 1, },
new boolean[] { false, },
"START",
null,
null,
false);
crf1.setWeightsDimensionAsIn(lists[0]);
MEMMTrainer memmt1 = new MEMMTrainer (crf1);
memmt1.train(lists [0]);
MEMM crf2 = new MEMM(p.getDataAlphabet(), p.getTargetAlphabet());
crf2.addOrderNStates (lists [0],
new int[] { 1, 2, },
new boolean[] { false, true },
"START",
null,
null,
false);
crf2.setWeightsDimensionAsIn(lists[0]);
MEMMTrainer memmt2 = new MEMMTrainer (crf2);
memmt2.train(lists [0]);
MEMM crf3 = new MEMM(p.getDataAlphabet(), p.getTargetAlphabet());
crf3.addOrderNStates (lists [0],
new int[] { 1, 2, },
new boolean[] { false, false },
"START",
null,
null,
false);
crf3.setWeightsDimensionAsIn(lists[0]);
MEMMTrainer memmt3 = new MEMMTrainer (crf3);
memmt3.train(lists [0]);
// Prevent cached values
double lik1 = getLikelihood (memmt1, lists[0]);
double lik2 = getLikelihood (memmt2, lists[0]);
double lik3 = getLikelihood (memmt3, lists[0]);
System.out.println("CRF1 likelihood "+lik1);
assertTrue ("Final zero-order likelihood <"+lik1+"> greater than first-order <"+lik2+">",
lik1 < lik2);
assertTrue ("Final defaults-only likelihood <"+lik2+"> greater than full first-order <"+lik3+">",
lik2 < lik3);
assertEquals (-167.335971702, lik1, 0.0001);
assertEquals (-166.212235389, lik2, 0.0001);
assertEquals ( -90.386005741, lik3, 0.0001);
}
double getLikelihood (MEMMTrainer memmt, InstanceList data) {
Optimizable.ByGradientValue mcrf = memmt.getOptimizableMEMM(data);
// Do this elaborate thing so that crf.cachedValueStale is forced true
double[] params = new double [mcrf.getNumParameters()];
mcrf.getParameters (params);
mcrf.setParameters (params);
return mcrf.getValue ();
}
public void disabledtestValueGradient()
{
doTestSpacePrediction(true);
}
public void disabledtestTrain()
{
doTestSpacePrediction(false);
}
public void disabledtestDenseTrain ()
{
doTestSpacePrediction (false, false, false);
}
public void disabledtestSerialization()
{
doTestSpacePrediction(false, true, true);
}
public void disabledtestDenseSerialization ()
{
doTestSpacePrediction(false, true, false);
}
public void disabledtestPrint ()
{
Pipe p = new SerialPipes (new Pipe[] {
new CharSequence2TokenSequence("."),
new TokenText(),
new TestMEMM.TestMEMMTokenSequenceRemoveSpaces(),
new TokenSequence2FeatureVectorSequence(),
new PrintInputAndTarget(),
});
InstanceList one = new InstanceList (p);
String[] data = new String[] { "ABCDE", };
one.addThruPipe (new ArrayIterator (data));
MEMM crf = new MEMM (p, null);
crf.addFullyConnectedStatesForLabels();
crf.setWeightsDimensionAsIn (one);
MEMMTrainer memmt = new MEMMTrainer (crf);
MEMMTrainer.MEMMOptimizableByLabelLikelihood mcrf = memmt.getOptimizableMEMM(one);
double[] params = new double[mcrf.getNumParameters()];
for (int i = 0; i < params.length; i++) {
params [i] = i;
}
mcrf.setParameters (params);
crf.print ();
}
public static Test suite()
{
return new TestSuite(TestMEMM.class);
}
public static void main(String[] args)
{
TestMEMM tm = new TestMEMM ("");
tm.doTestSpacePrediction (true);
return;
/*
TestSuite theSuite;
if (args.length > 0) {
theSuite = new TestSuite();
for (int i = 0; i < args.length; i++) {
theSuite.addTest (new TestMEMM (args [i]));
}
} else {
theSuite = (TestSuite) suite();
}
junit.textui.TestRunner.run (theSuite);
*/
}
}
| 26,266 | 39.598145 | 771 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/tests/TestFeatureTransducer.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.tests;
import java.util.Iterator;
import junit.framework.Test;
import junit.framework.TestCase;
import junit.framework.TestSuite;
import cc.mallet.types.Alphabet;
import cc.mallet.types.ArrayListSequence;
import cc.mallet.types.Multinomial;
import cc.mallet.fst.FeatureTransducer;
import cc.mallet.fst.MaxLatticeDefault;
import cc.mallet.fst.SumLatticeDefault;
import cc.mallet.fst.Transducer;
public class TestFeatureTransducer extends TestCase
{
public TestFeatureTransducer (String name)
{
super (name);
}
FeatureTransducer transducer;
ArrayListSequence seq;
double seqWeight;
public void setUp ()
{
System.out.println ("Setup");
transducer = new FeatureTransducer ();
FeatureTransducer t = transducer;
t.addState ("0", 0, Transducer.IMPOSSIBLE_WEIGHT,
new String[] {"a", "b"},
new String[] {"x", "y"},
new double[] {44, 66},
new String[] {"0", "1"});
t.addState ("1", Transducer.IMPOSSIBLE_WEIGHT, Transducer.IMPOSSIBLE_WEIGHT,
new String[] {"c", "d", "d"},
new String[] {"x", "y", "z"},
new double[] {44, 11, 66},
new String[] {"1", "1", "2"});
t.addState ("2", Transducer.IMPOSSIBLE_WEIGHT, 8,
new String[] {"e"},
new String[] {"z"},
new double[] {11},
new String[] {"2"});
seq = new ArrayListSequence ();
Alphabet dict = transducer.getInputAlphabet ();
seq.add ("a");
seq.add ("a");
seq.add ("b");
seq.add ("c");
seq.add ("d");
seq.add ("e");
seqWeight = 0 + 44 + 44 + 66 + 44 + 66 + 11 + 8;
}
public void testInitialState ()
{
Iterator iter = transducer.initialStateIterator ();
int count = 0;
FeatureTransducer.State state;
while (iter.hasNext ()) {
count++;
state = (FeatureTransducer.State) iter.next();
assertTrue (state.getName().equals ("0"));
}
assertTrue (count == 1);
}
public void testForwardBackward ()
{
SumLatticeDefault lattice = new SumLatticeDefault (transducer, seq);
System.out.println ("weight= "+lattice.getTotalWeight());
assertTrue (lattice.getTotalWeight() == seqWeight);
}
public void testViterbi ()
{
double weight = new MaxLatticeDefault (transducer, seq).bestWeight();
System.out.println ("weight = "+weight);
assertTrue (weight == seqWeight);
}
public void testEstimate ()
{
transducer.setTrainable (true);
SumLatticeDefault lattice = new SumLatticeDefault (transducer, seq); // used to have third argument: true
double oldWeight = lattice.getTotalWeight ();
transducer.estimate ();
lattice = new SumLatticeDefault (transducer, seq); // used to have third argument: false
double newWeight = lattice.getTotalWeight ();
System.out.println ("oldWeight="+oldWeight+" newWeight="+newWeight);
assertTrue (newWeight < oldWeight);
}
public void testIncrement ()
{
transducer.setTrainable (true);
SumLatticeDefault lattice = new SumLatticeDefault (transducer, seq); // used to have third argument: true
double oldWeight = lattice.getTotalWeight ();
System.out.println ("State 0 transition estimator");
Multinomial.Estimator est
= ((FeatureTransducer.State)transducer.getState(0)).getTransitionEstimator();
est.print();
assertTrue (est.getCount(0) == 2.0);
assertTrue (est.getCount(1) == 1.0);
}
public static Test suite ()
{
return new TestSuite (TestFeatureTransducer.class);
}
public static void main (String[] args)
{
junit.textui.TestRunner.run (suite());
}
}
| 3,985 | 27.676259 | 107 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ViterbiRatioConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by the ration of the
probabilities of the first and second best Viterbi paths.
*/
public class ViterbiRatioConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
private static Logger logger = MalletLogger.getLogger(
SegmentProductConfidenceEstimator.class.getName());
public ViterbiRatioConfidenceEstimator (Transducer model) {
super(model);
}
/**
Calculates the confidence in the tagging of an {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
SumLatticeDefault lattice = new SumLatticeDefault (model, (Sequence)instance.getData());
//ViterbiPathNBest bestViterbis = new ViterbiPathNBest (model, (Sequence)instance.getData(), 2);
//double[] costs = bestViterbis.costNBest();
MaxLatticeDefault vlat = new MaxLatticeDefault (model, (Sequence)instance.getData(), null, 2);
List<SequencePairAlignment<Object,Object>> alignments = vlat.bestOutputAlignments(2);
double cost1 = alignments.get(0).getWeight();
double cost2 = alignments.get(1).getWeight();
double latticeCost = lattice.getTotalWeight();
return (Math.exp (-cost1 + latticeCost) / Math.exp(-cost2 + latticeCost));
}
}
| 2,009 | 35.545455 | 98 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ViterbiConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by the probability
of the Viterbi path normalized by the probabliity of the entire
lattice.
*/
public class ViterbiConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
private static Logger logger = MalletLogger.getLogger(
ViterbiConfidenceEstimator.class.getName());
public ViterbiConfidenceEstimator (Transducer model) {
super(model);
}
/**
Calculates the confidence in the tagging of a {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
SumLatticeDefault lattice = new SumLatticeDefault (model, (Sequence)instance.getData());
SequencePairAlignment viterbi = new MaxLatticeDefault (model, (Sequence)instance.getData()).bestOutputAlignment();
return Math.exp (viterbi.getWeight() - lattice.getTotalWeight());
}
}
| 1,639 | 32.469388 | 118 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/MaxEntConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.*;
import cc.mallet.classify.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
/**
* Estimates the confidence of a {@link Segment} extracted by a {@link
* Transducer} using a {@link MaxEnt} classifier to classify segments
* as "correct" or "incorrect." xxx needs some interface work
*/
public class MaxEntConfidenceEstimator extends TransducerConfidenceEstimator
{
MaxEntTrainer meTrainer;
MaxEnt meClassifier;
Pipe pipe;
String correct, incorrect;
public MaxEntConfidenceEstimator (Transducer model, double gaussianVariance) {
super(model);
meTrainer = new MaxEntTrainer (gaussianVariance);
}
public MaxEntConfidenceEstimator (Transducer model) {
this (model, 10.0);
}
public MaxEnt trainClassifier (InstanceList ilist, String correct, String incorrect) {
this.meClassifier = (MaxEnt) meTrainer.train (ilist);
this.pipe = ilist.getPipe ();
this.correct = correct;
this.incorrect = incorrect;
InfoGain ig = new InfoGain (ilist);
int igl = Math.min (30, ig.numLocations());
for (int i = 0; i < igl; i++)
System.out.println ("InfoGain["+ig.getObjectAtRank(i)+"]="+ig.getValueAtRank(i));
return this.meClassifier;
}
/**
Calculates the confidence in the tagging of a {@link Segment}.
*/
public double estimateConfidenceFor (Segment segment, SumLatticeDefault cachedLattice) {
Classification c = this.meClassifier.classify (pipe.instanceFrom(new Instance (
segment, segment.getTruth(), null, null)));
return c.getLabelVector().value (this.correct);
}
}
| 2,129 | 32.809524 | 91 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ConfidenceEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.fst.confidence;
import java.util.Vector;
import java.util.Collections;
import java.util.Comparator;
import cc.mallet.fst.*;
import cc.mallet.types.*;
public class ConfidenceEvaluator
{
static int DEFAULT_NUM_BINS = 20;
Vector confidences;
int nBins;
int numCorrect;
public ConfidenceEvaluator (Vector confidences, int nBins)
{
this.confidences = confidences;
this.nBins = nBins;
this.numCorrect = getNumCorrectEntities();
// sort confidences by score
Collections.sort (confidences, new ConfidenceComparator());
}
public ConfidenceEvaluator (Vector confidences)
{
this (confidences, DEFAULT_NUM_BINS);
}
public ConfidenceEvaluator (Segment[] segments, boolean sorted)
{
this.confidences = new Vector ();
for (int i=0; i < segments.length; i++) {
confidences.add (new EntityConfidence (segments[i].getConfidence(),
segments[i].correct(), segments[i].getInput(),
segments[i].getStart(), segments[i].getEnd()));
}
if (!sorted)
Collections.sort (confidences, new ConfidenceComparator());
this.nBins = DEFAULT_NUM_BINS;
this.numCorrect = getNumCorrectEntities ();
}
public ConfidenceEvaluator (InstanceWithConfidence[] instances, boolean sorted) {
this.confidences = new Vector ();
for (int i=0; i < instances.length; i++) {
Sequence input = (Sequence) instances[i].getInstance().getData();
confidences.add (new EntityConfidence (instances[i].getConfidence(),
instances[i].correct(), input,
0, input.size()-1));
}
if (!sorted)
Collections.sort (confidences, new ConfidenceComparator());
this.nBins = DEFAULT_NUM_BINS;
this.numCorrect = getNumCorrectEntities ();
}
public ConfidenceEvaluator (PipedInstanceWithConfidence[] instances, boolean sorted) {
this.confidences = new Vector ();
for (int i=0; i < instances.length; i++) {
confidences.add (new EntityConfidence (instances[i].getConfidence(),
instances[i].correct(), null,
0, 1));
}
if (!sorted)
Collections.sort (confidences, new ConfidenceComparator());
this.nBins = DEFAULT_NUM_BINS;
this.numCorrect = getNumCorrectEntities ();
}
/** Correlation when one variable (X) is binary: r = (bar(x1) -
bar(x0)) * sqrt(p(1-p)) / sx , where bar(x1) = mean of X when Y
is 1 bar(x0) = mean of X when Y is 0 sx = standard deviation of
X p = proportion of values where Y=1
*/
public double pointBiserialCorrelation ()
{
// here, Y = {incorrect = 0,correct = 1}, X = confidence
double x0bar = getAverageIncorrectConfidence ();
double x1bar = getAverageCorrectConfidence ();
double p = (double)this.numCorrect / size();
double sx = getConfidenceStandardDeviation ();
return (x1bar - x0bar) * Math.sqrt(p*(1-p)) / sx;
}
/**
IR Average precision measure. Analogous to ranking _correct_
documents by confidence score.
*/
public double getAveragePrecision () {
int nc = 0;
int ni = 0;
double totalPrecision = 0.0;
for (int i=confidences.size()-1; i >= 0; i--) {
EntityConfidence c = (EntityConfidence) confidences.get (i);
if (c.correct()) {
nc++;
totalPrecision += (double)nc / (nc + ni);
}
else ni++;
}
return totalPrecision / nc;
}
/**
For comparison, rank segments as badly as possible (all
"incorrect" before "correct").
*/
public double getWorstAveragePrecision () {
int ni = confidences.size() - this.numCorrect;
double totalPrecision = 0.0;
for (int nc=1; nc <= this.numCorrect; nc++) {
totalPrecision += (double) nc / (nc + ni);
}
return totalPrecision / this.numCorrect;
}
public double getConfidenceSum()
{
double sum = 0.0;
for (int i = 0; i < size(); i++)
sum += ((EntityConfidence)confidences.get(i)).confidence();
return sum;
}
public double getConfidenceMean ()
{
return getConfidenceSum() / size();
}
/** Standard deviation of confidence scores
*/
public double getConfidenceStandardDeviation ()
{
double mean = getConfidenceMean();
double sumSquaredDifference = 0.0;
for (int i = 0; i < size(); i++) {
double conf = ((EntityConfidence)confidences.get(i)).confidence();
sumSquaredDifference += ((conf - mean) * (conf - mean));
}
return Math.sqrt (sumSquaredDifference / (double)size());
}
/** Calculate pearson's R for the corellation between confidence and
* correct, where 1 = correct and -1 = incorrect
*/
public double correlation ()
{
double xSum = 0;
double xSumOfSquares = 0;
double ySum = 0;
double ySumOfSquares = 0;
double xySum = 0; // product of x and y
for (int i = 0; i < size(); i++) {
double value = ((EntityConfidence)confidences.get(i)).correct() ? 1.0 : -1.0;
xSum += value;
xSumOfSquares += (value * value);
double conf = ((EntityConfidence)confidences.get(i)).confidence();
ySum += conf;
ySumOfSquares += (conf * conf);
xySum += value * conf;
}
double xVariance = xSumOfSquares - (xSum * xSum / size());
double yVariance = ySumOfSquares - (ySum * ySum / size());
double crossVariance = xySum - (xSum * ySum / size());
return crossVariance / Math.sqrt (xVariance * yVariance);
}
/** get accuracy at coverage for each bin of values
*/
public double[] getAccuracyCoverageValues ()
{
double [] values = new double [this.nBins];
int step = 100 / nBins;
for (int i = 0; i < values.length; i++) {
values[i] = accuracyAtCoverage (step * (double)(i+1) / 100.0);
}
return values;
}
public String accuracyCoverageValuesToString () {
String buf = "";
double [] vals = getAccuracyCoverageValues ();
int step = 100 / nBins;
for (int i=0; i < vals.length; i++) {
buf += ((step * (double)(i+1))/100.0) + "\t" + vals[i] + "\n";
}
return buf;
}
/** get accuracy at recall for each bin of values
* @param totalTrue total number of true Segments
* @return 2-d array where values[i][0] is coverage and
* values[i][1] is accuracy at position i.
*/
public double[][] getAccuracyRecallValues (int totalTrue)
{
double [][] values = new double [this.nBins][2];
int step = 100 / nBins;
for (int i = 0; i < this.nBins; i++) {
values[i] = new double[2];
double coverage = step * (double)(i+1) / 100.0;
values[i][1] = accuracyAtCoverage(coverage);
int numCorrect = numCorrectAtCoverage(coverage);
values[i][0] = (double)numCorrect / totalTrue;
}
return values;
}
public String accuracyRecallValuesToString (int totalTrue) {
String buf = "";
double [][] vals = getAccuracyRecallValues (totalTrue);
for (int i=0; i < this.nBins; i++)
buf += vals[i][0] + "\t" + vals[i][1] + "\n";
return buf;
}
public double accuracyAtCoverage (double cov)
{
assert (cov <= 1 && cov > 0);
int numPoints = (int) (Math.round ((double)size()*cov));
return ((double)numCorrectAtCoverage(cov) / numPoints);
}
public int numCorrectAtCoverage (double cov) {
assert (cov <= 1 && cov > 0);
// num accuracies to sum for this value of cov
int numPoints = (int) (Math.round ((double)size()*cov));
int numCorrect = 0;
for (int i = 0; i < numPoints; i++) {
if (((EntityConfidence)confidences.get(size() - i - 1)).correct())
numCorrect++;
}
return numCorrect;
}
public double getAverageAccuracy ()
{
int numCorrect = 0;
double totalArea= 0.0;
for(int i=confidences.size()-1; i>=0; i--){
if ( ((EntityConfidence)confidences.get(i)).correct())
numCorrect++;
totalArea += (double)numCorrect / (confidences.size() - i);
}
return totalArea / confidences.size();
}
public int numCorrect()
{
return this.numCorrect;
}
/**
number of entities correctly extracted
*/
private int getNumCorrectEntities ()
{
int sum = 0;
for (int i = 0; i < confidences.size(); i++) {
EntityConfidence ec = (EntityConfidence) confidences.get(i);
if (ec.correct()) {
sum++;
}
}
return sum;
}
/** Average confidence score for the incorrect entities
*/
public double getAverageIncorrectConfidence ()
{
double sum = 0.0;
for (int i = 0; i < confidences.size(); i++) {
EntityConfidence ec = (EntityConfidence) confidences.get(i);
if (!ec.correct()) {
sum += ec.confidence();
}
}
return sum / ((double)size() - (double) this.numCorrect);
}
/** Average confidence score for the incorrect entities
*/
public double getAverageCorrectConfidence ()
{
double sum = 0.0;
for (int i = 0; i < confidences.size(); i++) {
EntityConfidence ec = (EntityConfidence) confidences.get(i);
if (ec.correct()) {
sum += ec.confidence();
}
}
return sum / (double) this.numCorrect;
}
public int size()
{
return confidences.size();
}
public String toString()
{
StringBuffer toReturn = new StringBuffer();
for (int i = 0; i < size(); i++) {
toReturn.append (((EntityConfidence)confidences.get(i)).toString() + " ");
}
return toReturn.toString();
}
/** a simple class to store a confidence score and whether or not this
* labeling is correct
*/
public static class EntityConfidence
{
double confidence;
boolean correct;
String entity;
public EntityConfidence (double conf, boolean corr, String text){
this.confidence = conf;
this.correct = corr;
this.entity = text;
}
public EntityConfidence (double conf, boolean corr, Sequence input, int start, int end){
this.confidence = conf;
this.correct = corr;
StringBuffer buff = new StringBuffer();
if (input != null) {
for (int j = start; j <= end; j++){
FeatureVector fv = (FeatureVector) input.get(j);
for (int k = 0; k < fv.numLocations(); k++) {
String featureName = fv.getAlphabet().lookupObject (fv.indexAtLocation (k)).toString();
if (featureName.startsWith ("W=") && featureName.indexOf("@") == -1){
buff.append(featureName.substring (featureName.indexOf ('=')+1) + " ");
}
}
}
}
this.entity = buff.toString();
}
public double confidence () {return confidence;}
public boolean correct () {return correct;}
public String toString ()
{
StringBuffer toReturn = new StringBuffer();
toReturn.append(this.entity + " / " + this.confidence + " / "+ (this.correct ? "correct" : "incorrect") + "\n");
return toReturn.toString();
}
}
private class ConfidenceComparator implements Comparator
{
public final int compare (Object a, Object b)
{
double x = ((EntityConfidence) a).confidence();
double y = ((EntityConfidence) b).confidence();
double difference = x - y;
int toReturn = 0;
if(difference > 0)
toReturn = 1;
else if (difference < 0)
toReturn = -1;
return(toReturn);
}
}
}
| 11,392 | 29.220159 | 118 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/RandomSequenceConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence randomly.
*/
public class RandomSequenceConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
java.util.Random generator;
public RandomSequenceConfidenceEstimator (int seed, Transducer model) {
super(model);
generator = new Random (seed);
}
public RandomSequenceConfidenceEstimator (Transducer model) {
this (1, model);
}
/**
Calculates the confidence in the tagging of an {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
return generator.nextDouble();
}
}
| 1,377 | 27.122449 | 92 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/MaxEntSequenceConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.*;
import cc.mallet.classify.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
/**
* Estimates the confidence of a {@link Sequence} extracted by a {@link
* Transducer} using a {@link MaxEnt} classifier to classify Sequences
* as "correct" or "incorrect." xxx needs some interface work.
*/
public class MaxEntSequenceConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
MaxEntTrainer meTrainer;
MaxEnt meClassifier;
Pipe pipe;
String correct, incorrect;
public MaxEntSequenceConfidenceEstimator (Transducer model, double gaussianVariance) {
super(model);
meTrainer = new MaxEntTrainer (gaussianVariance);
}
public MaxEntSequenceConfidenceEstimator (Transducer model) {
this (model, 10.0);
}
public MaxEnt getClassifier () { return this.meClassifier; }
/**
Train underlying classifier on <code>ilist</code>. Assumes ilist
has targst <code>correct</code> or <code>incorrect</code>.
@param ilist training list to build correct/incorrect classifier
@param correct "correct" label
@param incorrect "incorrect" label
*/
public MaxEnt trainClassifier (InstanceList ilist, String correct, String incorrect) {
this.meClassifier = (MaxEnt) meTrainer.train (ilist);
this.pipe = ilist.getPipe ();
this.correct = correct;
this.incorrect = incorrect;
InfoGain ig = new InfoGain (ilist);
int igl = Math.min (30, ig.numLocations());
for (int i = 0; i < igl; i++)
System.out.println ("InfoGain["+ig.getObjectAtRank(i)+"]="+ig.getValueAtRank(i));
return this.meClassifier;
}
/**
Calculates the confidence in the tagging of an {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags, Object[] inTags) {
Classification c = null;
if (Alphabet.alphabetsMatch(instance, this.pipe))
c = this.meClassifier.classify (new SequenceConfidenceInstance (instance));
else
c = this.meClassifier.classify (instance);
return c.getLabelVector().value (this.correct);
}
public PipedInstanceWithConfidence[] rankPipedInstancesByConfidence (InstanceList ilist,
Object[] startTags,
Object[] continueTags) {
ArrayList confidenceList = new ArrayList ();
for (int i=0; i < ilist.size(); i++) {
Instance instance = ilist.get (i);
boolean correctInstance = ((Labeling)instance.getTarget()).getBestLabel().toString().equals (this.correct);
System.err.println ("Instance is " + (correctInstance ? "correct" : "incorrect"));
confidenceList.add (new PipedInstanceWithConfidence (instance,
estimateConfidenceFor (instance, startTags, continueTags),
correctInstance));
}
Collections.sort (confidenceList);
PipedInstanceWithConfidence[] ret = new PipedInstanceWithConfidence[1];
ret = (PipedInstanceWithConfidence[]) confidenceList.toArray (ret);
return ret;
}
}
| 3,536 | 35.84375 | 110 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ConstrainedViterbiTransducerCorrector.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.ArrayList;
import cc.mallet.fst.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
* Corrects a subset of the {@link Segment}s produced by a {@link
* Transducer}. It's most useful to find the {@link Segment}s that the
* {@link Transducer} is least confident in and correct those using
* the true {@link Labeling}
* (<code>correctLeastConfidenceSegments</code>). The corrected
* segment then propagates to other labelings in the sequence using
* "constrained viterbi" -- a viterbi calculation that requires the
* path to pass through the corrected segment states.
*/
public class ConstrainedViterbiTransducerCorrector implements TransducerCorrector
{
private static Logger logger = MalletLogger.getLogger(ConstrainedViterbiTransducerCorrector.class.getName());
TransducerConfidenceEstimator confidenceEstimator;
Transducer model;
ArrayList leastConfidentSegments;
public ConstrainedViterbiTransducerCorrector (TransducerConfidenceEstimator confidenceEstimator,
Transducer model) {
this.confidenceEstimator = confidenceEstimator;
this.model = model;
}
public ConstrainedViterbiTransducerCorrector (Transducer model) {
this (new ConstrainedForwardBackwardConfidenceEstimator (model), model);
}
public java.util.Vector getSegmentConfidences () {return confidenceEstimator.getSegmentConfidences();}
/**
Returns the least confident segments from each sequence in the
previous call to <code>correctLeastConfidentSegments</code>
*/
public ArrayList getLeastConfidentSegments () {
return this.leastConfidentSegments;
}
/**
Returns the least confident segments in <code>ilist</code>
@param ilist test instances
@param startTags indicate the beginning of segments
@param continueTages indicate "inside" of segments
@return list of {@link Segment}s, one for each instance, that is least confident
*/
public ArrayList getLeastConfidentSegments (InstanceList ilist, Object[] startTags, Object[] continueTags) {
ArrayList ret = new ArrayList ();
for (int i=0; i < ilist.size(); i++) {
Segment[] orderedSegments = confidenceEstimator.rankSegmentsByConfidence (
ilist.get (i), startTags, continueTags);
ret.add (orderedSegments[0]);
}
return ret;
}
public ArrayList correctLeastConfidentSegments (InstanceList ilist,
Object[] startTags,
Object[] continueTags) {
return correctLeastConfidentSegments (ilist, startTags, continueTags, false);
}
/**
Returns an ArrayList of corrected Sequences. Also stores
leastConfidentSegments, an ArrayList of the segments to correct,
where null entries mean no segment was corrected for that
sequence.
@param ilist test instances
@param startTags indicate the beginning of segments
@param continueTages indicate "inside" of segments
@param findIncorrect true if we should cycle through least
confident segments until find an incorrect one
@return list of {@link Sequence}s corresponding to the corrected
tagging of each instance in <code>ilist</code>
*/
public ArrayList correctLeastConfidentSegments (InstanceList ilist, Object[] startTags,
Object[] continueTags, boolean findIncorrect) {
ArrayList correctedPredictionList = new ArrayList ();
this.leastConfidentSegments = new ArrayList ();
logger.info (this.getClass().getName() + " ranking confidence using " +
confidenceEstimator.getClass().getName());
for (int i=0; i < ilist.size(); i++) {
logger.fine ("correcting instance# " + i + " / " + ilist.size());
Instance instance = ilist.get (i);
Segment[] orderedSegments = new Segment[1];
Sequence input = (Sequence) instance.getData ();
Sequence truth = (Sequence) instance.getTarget ();
Sequence predicted = new MaxLatticeDefault (model, input).bestOutputSequence();
int numIncorrect = 0;
for (int j=0; j < predicted.size(); j++)
numIncorrect += (!predicted.get(j).equals (truth.get(j))) ? 1 : 0;
if (numIncorrect == 0) { // nothing to correct
this.leastConfidentSegments.add (null);
correctedPredictionList.add (predicted);
continue;
}
// rank segments by confidence
orderedSegments = confidenceEstimator.rankSegmentsByConfidence (
instance, startTags, continueTags);
logger.fine ("Ordered Segments:\n");
for (int j=0; j < orderedSegments.length; j++) {
logger.fine (orderedSegments[j].toString());
}
logger.fine ("Correcting Segment: True Sequence:");
for (int j=0; j < truth.size(); j++)
logger.fine ((String)truth.get (j) + "\t");
logger.fine ("");
logger.fine ("Ordered Segments:\n");
for (int j=0; j < orderedSegments.length; j++) {
logger.fine (orderedSegments[j].toString());
}
// if <code>findIncorrect</code>, find the least confident
// segment that is incorrectly labeled
// else, use least confident segment
Segment leastConfidentSegment = orderedSegments[0];
if (findIncorrect) {
for (int j=0; j < orderedSegments.length; j++) {
if (!orderedSegments[j].correct()) {
leastConfidentSegment = orderedSegments[j];
break;
}
}
}
if (findIncorrect && leastConfidentSegment.correct()) {
logger.warning ("cannot find incorrect segment, probably because error is in background state\n");
this.leastConfidentSegments.add (null);
correctedPredictionList.add (predicted);
continue;
}
this.leastConfidentSegments.add (leastConfidentSegment);
if (leastConfidentSegment == null) { // nothing extracted
correctedPredictionList.add (predicted);
continue;
}
// create segmentCorrectedOutput, which has the true labels for
// the leastConfidentSegment and null for other positions
String[] sequence = new String[truth.size()];
int numCorrectedTokens = 0;
for (int j=0; j < sequence.length; j++)
sequence[j] = null;
for (int j=0; j < truth.size(); j++) {
// if in segment
if (leastConfidentSegment.indexInSegment (j)) {
sequence[j] = (String)truth.get (j);
numCorrectedTokens++;
}
}
if (leastConfidentSegment.endsPrematurely ()) {
sequence[leastConfidentSegment.getEnd()+1] =
(String)truth.get (leastConfidentSegment.getEnd()+1);
numCorrectedTokens++;
}
logger.fine ("Constrained Segment Sequence\n");
for (int j=0; j < sequence.length; j++) {
logger.fine (sequence[j]);
}
ArraySequence segmentCorrectedOutput = new ArraySequence (sequence);
// run constrained viterbi on this sequence with the
// constraint that this segment is tagged correctly
Sequence correctedPrediction = new MaxLatticeDefault (model,
orderedSegments[0].getInput (), segmentCorrectedOutput).bestOutputSequence();
int numIncorrectAfterCorrection = 0;
for (int j=0; j < truth.size(); j++)
numIncorrectAfterCorrection += (!correctedPrediction.get(j).equals (truth.get(j))) ? 1 : 0;
logger.fine ("Num incorrect tokens in original prediction: " + numIncorrect);
logger.fine ("Num corrected tokens: " + numCorrectedTokens);
logger.fine ("Num incorrect tokens after correction-propagation: " + numIncorrectAfterCorrection);
// print sequence info
logger.fine ("Correcting Segment: True Sequence:");
for (int j=0; j < truth.size(); j++)
logger.fine ((String)truth.get (j) + "\t");
logger.fine ("\nOriginal prediction: ");
for (int j=0; j < predicted.size(); j++)
logger.fine ((String)predicted.get (j) + "\t");
logger.fine ("\nCorrected prediction: ");
for (int j=0; j < correctedPrediction.size(); j++)
logger.fine ((String)correctedPrediction.get (j) + "\t");
logger.fine ("");
correctedPredictionList.add (correctedPrediction);
}
return correctedPredictionList;
}
}
| 8,378 | 38.338028 | 110 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/GammaAverageConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.HashMap;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/** Calculates the confidence in an extracted segment by taking the
* average of P(s_i|o) for each state in the segment. */
public class GammaAverageConfidenceEstimator extends TransducerConfidenceEstimator
{
HashMap string2stateIndex;
public GammaAverageConfidenceEstimator (Transducer model) {
super(model);
string2stateIndex = new HashMap();
// store state indices
for (int i=0; i < model.numStates(); i++) {
string2stateIndex.put (model.getState(i).getName(), new Integer (i));
}
}
/**
Calculates the confidence in the tagging of a {@link Segment}.
@return 0-1 confidence value. higher = more confident.
*/
public double estimateConfidenceFor (Segment segment, SumLatticeDefault cachedLattice) {
Sequence predSequence = segment.getPredicted ();
Sequence input = segment.getInput ();
SumLatticeDefault lattice = (cachedLattice==null) ? new SumLatticeDefault (model, input) :
cachedLattice;
double confidence = 0;
for (int i=segment.getStart(); i <= segment.getEnd(); i++) {
int stateIndex = stateIndexOfString((String)predSequence.get(i));
if (stateIndex == -1) // Unknown label.
return 0.0;
confidence += lattice.getGammaProbability(i+1, model.getState(stateIndex));
}
return confidence/(double)segment.size();
}
private int stateIndexOfString (String s)
{
Integer index = (Integer) string2stateIndex.get (s);
if (index == null)
return -1;
return index.intValue();
}
}
| 2,117 | 33.721311 | 92 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/NBestViterbiConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by the probability
that one of the the Viterbi paths rank 2->N is correct. Note that
this is a strange definition of confidence, and is mainly used for
{@link MultipleChoiceCRFActiveLearner}, where we want to find
Instances that are mislabeled, but are likely to have a correct
labeling in the top N Viterbi paths.
*/
public class NBestViterbiConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
/** total number of Viterbi paths */
int N;
private static Logger logger = MalletLogger.getLogger(
NBestViterbiConfidenceEstimator.class.getName());
public NBestViterbiConfidenceEstimator (Transducer model, int N) {
super(model);
this.N = N;
}
/**
Calculates the confidence in the tagging of a {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
SumLatticeDefault lattice = new SumLatticeDefault (model, (Sequence)instance.getData());
double[] costs = new double[N];
List<SequencePairAlignment<Object,Object>> as = new MaxLatticeDefault (model, (Sequence)instance.getData()).bestOutputAlignments(N);
for (int i = 0; i < N; i++)
costs[i] = as.get(i).getWeight();
double latticeCost = lattice.getTotalWeight();
double prFirstIsCorrect = Math.exp( latticeCost - costs[0] );
double prOtherIsCorrect = 0.0;
for (int i=1; i < N; i++)
prOtherIsCorrect += Math.exp( latticeCost - costs[i] );
return prFirstIsCorrect / prOtherIsCorrect;
}
}
| 2,277 | 35.15873 | 134 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/GammaProductConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.HashMap;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/** Calculates the confidence in an extracted segment by taking the
* product of eP(s_i|o) for each state in the segment. */
public class GammaProductConfidenceEstimator extends TransducerConfidenceEstimator
{
HashMap string2stateIndex;
public GammaProductConfidenceEstimator (Transducer model) {
super(model);
string2stateIndex = new HashMap();
// store state indices
for (int i=0; i < model.numStates(); i++) {
string2stateIndex.put (model.getState(i).getName(), new Integer (i));
}
}
/**
Calculates the confidence in the tagging of a {@link Segment}.
@return 0-1 confidence value. higher = more confident.
*/
public double estimateConfidenceFor (Segment segment, SumLatticeDefault cachedLattice) {
Sequence predSequence = segment.getPredicted ();
Sequence input = segment.getInput ();
SumLatticeDefault lattice = (cachedLattice==null) ? new SumLatticeDefault (model, input) :
cachedLattice;
double confidence = 1;
for (int i=segment.getStart(); i <= segment.getEnd(); i++)
confidence *= lattice.getGammaProbability (i+1, model.getState (stateIndexOfString ((String)predSequence.get (i))));
return confidence;
}
private int stateIndexOfString (String s)
{
Integer index = (Integer) string2stateIndex.get (s);
if (index == null)
throw new IllegalArgumentException ("state label " + s + " not a state in transducer");
return index.intValue();
}
}
| 2,080 | 35.508772 | 119 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/SequenceConfidenceInstance.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.types.*;
import cc.mallet.util.PropertyList;
/**
Stores a {@link Sequence} and a PropertyList, used when extracting
features from a Sequence in a pipe for confidence prediction
*/
public class SequenceConfidenceInstance
{
PropertyList features;
Instance instance;
public SequenceConfidenceInstance (Instance inst) {
this.instance = inst;
}
public Instance getInstance () { return this.instance; }
public PropertyList getFeatures () { return features; }
public void setFeatureValue (String key, double value) {
features = PropertyList.add (key, value, features); }
public boolean hasFeature (String key) {
return (features == null ? false : features.hasProperty(key)); }
public double getFeatureValue (String key) {
return (features == null ? 0.0 : features.lookupNumber (key)); }
}
| 1,420 | 32.046512 | 85 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/QBCSequenceConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by the
"disagreement" among a committee of CRFs.
*/
public class QBCSequenceConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
private static Logger logger = MalletLogger.getLogger(
QBCSequenceConfidenceEstimator.class.getName());
Transducer[] committee;
public QBCSequenceConfidenceEstimator (Transducer model, Transducer[] committee) {
super(model);
this.committee = committee;
}
/**
Calculates the confidence in the tagging of a {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
Sequence[] predictions = new Sequence[committee.length];
for (int i = 0; i < committee.length; i++)
predictions[i] = new MaxLatticeDefault (committee[i], (Sequence)instance.getData()).bestOutputSequence();
// Higher return value means higher confidence this sequence is correct.
double avg = avgVoteEntropy(predictions);
return -1.0 * avg;
}
/** Calculate the "vote entropy" for each token and average. Vote
* entropy is defined as
*
* - \frac{1}{log(min(k, |C|)) \sum_c \frac{V(c,e)}{k} log(\frac{V(c,e)}{k})
*
* where k is committee size, e is Instance, c is class, and V(c,e)
* is the number of committee members assigning class c to input e.
*/
private double avgVoteEntropy (Sequence[] predictions) {
double sum = 0.0;
for (int i = 0; i < predictions[0].size(); i++) {
HashMap label2Count = new HashMap();
for (int j = 0; j < predictions.length; j++) {
String label = predictions[j].get(i).toString();
Integer count = (Integer)label2Count.get(label);
if (count == null)
count = new Integer(0);
label2Count.put(label, new Integer(count.intValue() + 1));
}
sum += voteEntropy(label2Count);
}
return (double)sum / predictions[0].size();
}
private double voteEntropy (HashMap label2Count) {
Iterator iter = label2Count.keySet().iterator();
double sum = 0.0;
while (iter.hasNext()) {
String label = (String)iter.next();
int count = ((Integer)label2Count.get(label)).intValue();
double quot = (double)count / committee.length;
sum += quot * Math.log(quot);
}
double ret = (double) -1.0 * sum / Math.log((double)committee.length);
return ret;
}
}
| 3,062 | 31.935484 | 108 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/PipedInstanceWithConfidence.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import cc.mallet.types.*;
/**
Helper class to store confidence of an Instance.
*/
public class PipedInstanceWithConfidence implements Comparable{
double confidence;
Instance instance;
boolean correct;
public PipedInstanceWithConfidence (Instance inst, double c, boolean correct) {
this.instance = inst;
this.confidence = c;
this.correct = correct;
}
public int compareTo (Object o) {
PipedInstanceWithConfidence inst = (PipedInstanceWithConfidence) o;
if (this.confidence > inst.confidence)
return 1;
else if (this.confidence < inst.confidence)
return -1;
else return 0;
}
public double getConfidence () { return confidence; }
public Instance getInstance () { return instance; }
public boolean correct () { return correct; }
}
| 1,313 | 29.55814 | 85 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/SegmentProductConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by combining the
output of a segment confidence estimator for each segment.
*/
public class SegmentProductConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
TransducerConfidenceEstimator segmentEstimator;
private static Logger logger = MalletLogger.getLogger(
SegmentProductConfidenceEstimator.class.getName());
public SegmentProductConfidenceEstimator (Transducer model,
TransducerConfidenceEstimator segmentConfidenceEstimator) {
super(model);
this.segmentEstimator = segmentConfidenceEstimator;
}
/**
Calculates the confidence in the tagging of a {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
SegmentIterator iter = new SegmentIterator (model, instance, startTags, inTags);
double instanceConfidence = 1;
while (iter.hasNext()) {
Segment s = (Segment) iter.nextSegment();
instanceConfidence *= segmentEstimator.estimateConfidenceFor (s);
}
return instanceConfidence;
}
}
| 1,839 | 32.454545 | 92 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/RandomConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/** Randomly assigns values between 0-1 to the confidence of a {@link
* Segment}. Used as baseline to compare with other methods.
*/
public class RandomConfidenceEstimator extends TransducerConfidenceEstimator
{
java.util.Random generator;
public RandomConfidenceEstimator (int seed, Transducer model) {
super(model);
generator = new Random (seed);
}
public RandomConfidenceEstimator (Transducer model) {
this (1, model);
}
/**
Randomly generate the confidence in the tagging of a {@link Segment}.
*/
public double estimateConfidenceFor (Segment segment, SumLatticeDefault cachedLattice) {
return generator.nextDouble();
}
}
| 1,273 | 29.333333 | 89 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/MinSegmentConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
Estimates the confidence of an entire sequence by the least
confidence segment.
*/
public class MinSegmentConfidenceEstimator extends TransducerSequenceConfidenceEstimator
{
TransducerConfidenceEstimator segmentEstimator;
private static Logger logger = MalletLogger.getLogger(
SegmentProductConfidenceEstimator.class.getName());
public MinSegmentConfidenceEstimator (Transducer model,
TransducerConfidenceEstimator segmentConfidenceEstimator) {
super(model);
this.segmentEstimator = segmentConfidenceEstimator;
}
/**
Calculates the confidence in the tagging of a {@link Instance}.
*/
public double estimateConfidenceFor (Instance instance,
Object[] startTags,
Object[] inTags) {
SegmentIterator iter = new SegmentIterator (model, instance, startTags, inTags);
double lowestConfidence = 9999;
while (iter.hasNext()) {
Segment s = (Segment) iter.nextSegment();
double currConf = segmentEstimator.estimateConfidenceFor (s);
if (currConf < lowestConfidence)
lowestConfidence = currConf;
}
return lowestConfidence;
}
}
| 1,851 | 30.931034 | 88 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ConstrainedForwardBackwardConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/**
* Estimates the confidence of a {@link Segment} extracted by a {@link
* Transducer} by performing a "constrained lattice"
* calculation. Essentially, this sums all possible ways this segment
* could have been extracted and normalizes.
*/
public class ConstrainedForwardBackwardConfidenceEstimator extends TransducerConfidenceEstimator
{
public ConstrainedForwardBackwardConfidenceEstimator (Transducer model) {
super(model);
}
/**
Calculates the confidence in the tagging of a {@link Segment}.
@return 0-1 confidence value. higher = more confident.
*/
public double estimateConfidenceFor (Segment segment, SumLatticeDefault cachedLattice) {
Sequence predSequence = segment.getPredicted ();
Sequence input = segment.getInput ();
SumLatticeDefault lattice = (cachedLattice == null) ? new SumLatticeDefault (model, input) : cachedLattice;
// constrained lattice
SumLatticeDefault constrainedLattice = new SumLatticeConstrained (model, input, null, segment, predSequence);
double latticeWeight = lattice.getTotalWeight ();
double constrainedLatticeWeight = constrainedLattice.getTotalWeight ();
double confidence = Math.exp (latticeWeight - constrainedLatticeWeight);
//System.err.println ("confidence: " + confidence);
return confidence;
}
private static final long serialVersionUID = 1L;
}
| 1,937 | 38.55102 | 111 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/ConfidenceCorrectorEvaluator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.*;
import java.io.*;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/**
Calculates the effectiveness of "constrained viterbi" in
propagating corrections in one segment of a sequence to other
segments.
*/
public class ConfidenceCorrectorEvaluator
{
Object[] startTags; // to identify segment start/end boundaries
Object[] inTags;
public ConfidenceCorrectorEvaluator (Object[] startTags, Object[] inTags) {
this.startTags = startTags;
this.inTags = inTags;
}
/**
Returns true if predSequence contains errors outside of correctedSegment.
*/
private boolean containsErrorInUncorrectedSegments (Sequence trueSequence,
Sequence predSequence,
Sequence correctedSequence,
Segment correctedSegment) {
for (int i=0; i < trueSequence.size(); i++) {
if (correctedSegment.indexInSegment(i)) {
if (!correctedSequence.get (i).equals (trueSequence.get (i))) {
System.err.println ("\nTruth: ");
for (int j=0; j < trueSequence.size(); j++)
System.err.print (trueSequence.get (j) + " ");
System.err.println ("\nPredicted: ");
for (int j=0; j < trueSequence.size(); j++)
System.err.print (predSequence.get (j) + " ");
System.err.println ("\nCorrected: ");
for (int j=0; j < trueSequence.size(); j++)
System.err.print (correctedSequence.get (j) + " ");
throw new IllegalStateException ("Corrected sequence does not have correct labels for corrected segment: " + correctedSegment);
}
}
else {
if (!predSequence.get (i).equals (trueSequence.get (i)))
return true;
}
}
return false;
}
/**
Only evaluates over sequences which contain errors. Examine
region not directly corrected by <code>correctedSegments </code>
to measure effects of error propagation.
@param model used to segment input sequence
@param predictions list of the corrected segmentation
@param ilist list of testing data
@param correctedSegments list of {@link Segment}s in each
sequence that were corrected...currently only allows one segment
per instance.
@param uncorrected true if we only evaluate sequences where
errors remain after correction
*/
public void evaluate (Transducer model, ArrayList predictions, InstanceList ilist,
ArrayList correctedSegments, String description,
PrintStream outputStream, boolean errorsInUncorrected) {
if (predictions.size() != ilist.size () || correctedSegments.size() != ilist.size ())
throw new IllegalArgumentException ("number of predicted sequence (" +
predictions.size() + ") and number of corrected segments (" +
correctedSegments.size() + ") must be equal to length of instancelist (" +
ilist.size() + ")");
int numIncorrect2Correct = 0; // overall correction improvement
int numCorrect2Incorrect = 0; // overall correction deprovement
int numPropagatedIncorrect2Correct = 0; // count of propagated corrections
int numPredictedCorrect = 0; // num tokens predicted correctly
int numCorrectedCorrect = 0; // num tokens predicted correctly after correction
// accuracy outside of corrected segment before and after propagation
int numUncorrectedCorrectBeforePropagation = 0;
int numUncorrectedCorrectAfterPropagation = 0;
int totalTokens = 0;
int totalTokensInUncorrectedRegion = 0;
int numCorrectedSequences = 0; // count of sequences corrected
for (int i=0; i < ilist.size(); i++) {
Instance instance = ilist.get (i);
Sequence input = (Sequence) instance.getData ();
Sequence trueSequence = (Sequence) instance.getTarget ();
Sequence predSequence = (Sequence) new MaxLatticeDefault (model, input).bestOutputSequence();
Sequence correctedSequence = (Sequence) predictions.get (i);
Segment correctedSegment = (Segment) correctedSegments.get (i);
// if any condition is true, do not evaluate this sequence
if (correctedSegment == null ||
(errorsInUncorrected && !containsErrorInUncorrectedSegments (
trueSequence, predSequence, correctedSequence, correctedSegment)))
continue;
numCorrectedSequences++;
totalTokens += trueSequence.size();
boolean[] predictedMatches = getMatches (trueSequence, predSequence);
boolean[] correctedMatches = getMatches (trueSequence, correctedSequence);
for (int j=0; j < predictedMatches.length; j++) {
numPredictedCorrect += predictedMatches[j] ? 1 : 0;
numCorrectedCorrect += correctedMatches[j] ? 1 : 0;
if (predictedMatches[j] && !correctedMatches[j])
numCorrect2Incorrect++;
else if (!predictedMatches[j] && correctedMatches[j])
numIncorrect2Correct++;
// outside corrected segment
if (j < correctedSegment.getStart() || j > correctedSegment.getEnd()) {
totalTokensInUncorrectedRegion++;
if (!predictedMatches[j] && correctedMatches[j])
numPropagatedIncorrect2Correct++;
numUncorrectedCorrectBeforePropagation += predictedMatches[j] ? 1 : 0;
numUncorrectedCorrectAfterPropagation += correctedMatches[j] ? 1 : 0;
}
}
}
double tokenAccuracyBeforeCorrection = (double)numPredictedCorrect / totalTokens;
double tokenAccuracyAfterCorrection = (double)numCorrectedCorrect / totalTokens;
double uncorrectedRegionAccuracyBeforeCorrection = (double)numUncorrectedCorrectBeforePropagation / totalTokensInUncorrectedRegion;
double uncorrectedRegionAccuracyAfterCorrection = (double)numUncorrectedCorrectAfterPropagation / totalTokensInUncorrectedRegion;
outputStream.println (description + "\nEvaluating effect of error-propagation in sequences containing at least one token error:" +
"\ntotal number correctedsequences: " +
numCorrectedSequences +
"\ntotal number tokens: " +
totalTokens +
"\ntotal number tokens in \"uncorrected region\":" +
totalTokensInUncorrectedRegion +
"\ntotal number correct tokens before correction:" +
numPredictedCorrect +
"\ntotal number correct tokens after correction:" +
numCorrectedCorrect +
"\ntoken accuracy before correction: " +
tokenAccuracyBeforeCorrection +
"\ntoken accuracy after correction: " +
tokenAccuracyAfterCorrection +
"\nnumber tokens corrected by propagation: " +
numPropagatedIncorrect2Correct +
"\nnumber tokens made incorrect by propagation: " +
numCorrect2Incorrect +
"\ntoken accuracy of \"uncorrected region\" before propagation: " +
uncorrectedRegionAccuracyBeforeCorrection +
"\ntoken accuracy of \"uncorrected region\" after propagataion: " +
uncorrectedRegionAccuracyAfterCorrection);
}
/**
Returns a boolean array listing where two sequences have matching
values.
*/
private boolean[] getMatches (Sequence s1, Sequence s2) {
if (s1.size() != s2.size())
throw new IllegalArgumentException ("s1.size: " + s1.size() + " s2.size: " + s2.size());
boolean[] ret = new boolean [s1.size()];
for (int i=0; i < s1.size(); i++)
ret[i] = s1.get (i).equals (s2.get(i));
return ret;
}
}
| 7,853 | 43.625 | 133 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/TransducerConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import java.io.Serializable;
import cc.mallet.extract.LabeledSpan;
import cc.mallet.fst.*;
import cc.mallet.pipe.Noop;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
* Abstract class that estimates the confidence of a {@link Segment}
* extracted by a {@link Transducer}.
*/
abstract public class TransducerConfidenceEstimator implements Serializable
{
private static Logger logger = MalletLogger.getLogger(TransducerConfidenceEstimator.class.getName());
protected Transducer model; // the trained Transducer which
// performed the extractions.
java.util.Vector segmentConfidences;
public TransducerConfidenceEstimator (Transducer model) {
this.model = model;
}
/**
Calculates the confidence in the tagging of a {@link Segment}.
*/
public double estimateConfidenceFor (Segment segment) {
return estimateConfidenceFor (segment, null);
}
abstract public double estimateConfidenceFor (Segment segment, SumLatticeDefault lattice);
public java.util.Vector getSegmentConfidences () {return this.segmentConfidences;}
/**
Ranks all {@link Segment}s in this {@link InstanceList} by
confidence estimate.
@param ilist list of segmentation instances
@param startTags represent the labels for the start states (B-)
of all segments
@param continueTags represent the labels for the continue state
(I-) of all segments
@return array of {@link Segment}s ordered by non-decreasing
confidence scores, as calculated by <code>estimateConfidenceFor</code>
*/
public Segment[] rankSegmentsByConfidence (InstanceList ilist, Object[] startTags,
Object[] continueTags) {
ArrayList segmentList = new ArrayList ();
SegmentIterator iter = new SegmentIterator (this.model, ilist, startTags, continueTags);
if (this.segmentConfidences == null)
segmentConfidences = new java.util.Vector ();
while (iter.hasNext ()) {
Segment segment = (Segment) iter.nextSegment ();
double confidence = estimateConfidenceFor (segment);
segment.setConfidence (confidence);
logger.fine ("confidence=" + segment.getConfidence() + " for segment\n"
+ segment.sequenceToString() + "\n");
segmentList.add (segment);
}
Collections.sort (segmentList);
Segment[] ret = new Segment[1];
ret = (Segment[]) segmentList.toArray (ret);
return ret;
}
/**
ranks the segments in one {@link Instance}
@param instance instances to be segmented
@param startTags represent the labels for the start states (e.g. B-)
of all segments
@param continueTags represent the labels for the continue state
(e.g. I-) of all segments
@return array of {@link Segment}s ordered by non-decreasing
confidence scores, as calculated by <code>estimateConfidenceFor</code>
*/
public Segment[] rankSegmentsByConfidence (Instance instance, Object[] startTags,
Object[] continueTags) {
InstanceList ilist = new InstanceList (new Noop(instance.getDataAlphabet(),instance.getTargetAlphabet()));
ilist.add (instance);
return rankSegmentsByConfidence (ilist, startTags, continueTags);
}
public Transducer getTransducer() { return this.model; }
}
| 3,815 | 35.692308 | 108 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/IsolatedSegmentTransducerCorrector.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.ArrayList;
import java.util.logging.*;
import cc.mallet.fst.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
* Corrects a subset of the {@link Segment}s produced by a {@link
* Transducer}. It's most useful to find the {@link Segment}s that the
* {@link Transducer} is least confident in and correct those using
* the true {@link Labeling}
* (<code>correctLeastConfidenceSegments</code>). Unlike in {@link
* ConstrainedViterbi}, the corrected segment does not affect the
* labeling of other segments in the sequence. For comparison.
*/
public class IsolatedSegmentTransducerCorrector implements TransducerCorrector
{
private static Logger logger = MalletLogger.getLogger(IsolatedSegmentTransducerCorrector.class.getName());
TransducerConfidenceEstimator confidenceEstimator;
Transducer model;
public IsolatedSegmentTransducerCorrector (TransducerConfidenceEstimator confidenceEstimator,
Transducer model) {
this.confidenceEstimator = confidenceEstimator;
this.model = model;
}
public IsolatedSegmentTransducerCorrector (Transducer model) {
this (new ConstrainedForwardBackwardConfidenceEstimator (model), model);
}
/**
@param ilist original Transducer InstanceList
@param startTags start segment tags (B-)
@param continueTags continue segment tags (I-)
TransducerConfidenceEstimator}
@return a list of {@link Sequence}s corresponding to the
corrected tagging of each Instance in <code>ilist</code>. Note
that these corrections will not affect tokens outside of the
corrected segment.
*/
public ArrayList correctLeastConfidentSegments (InstanceList ilist, Object[] startTags,
Object[] continueTags) {
ArrayList correctedPredictionList = new ArrayList ();
for (int i=0; i < ilist.size(); i++) {
logger.fine ("correcting instance# " + i + " / " + ilist.size());
Instance instance = ilist.get (i);
Segment[] orderedSegments = new Segment[1];
orderedSegments = confidenceEstimator.rankSegmentsByConfidence (instance, startTags, continueTags);
Segment leastConfidentSegment = orderedSegments[0];
logger.fine ("Ordered Segments:\nTrue sequence: " + leastConfidentSegment.getTruth());
for (int j=0; j < orderedSegments.length; j++) {
logger.fine (orderedSegments[j].toString());
}
// _do not_ run constrained viterbi on this sequence with the
// constraint that this segment is tagged correctly.
// instead, simply replace the labeling of the corrected
// segment.
MultiSegmentationEvaluator eval = new MultiSegmentationEvaluator (new InstanceList[0], new String[0], startTags, continueTags);
Sequence truth = leastConfidentSegment.getTruth();
Sequence predicted = leastConfidentSegment.getPredicted();
int numIncorrect = eval.numIncorrectSegments (truth, predicted);
String[] sequence = new String[truth.size()];
for (int j=0; j < truth.size(); j++) {
if (j <= leastConfidentSegment.getEnd() && j >= leastConfidentSegment.getStart())
sequence[j] = (String)truth.get (j);
else sequence[j] = (String) predicted.get (j);
}
ArraySequence segmentCorrectedOutput = new ArraySequence (sequence);
logger.fine ("Original prediction: ");
for (int j=0; j < predicted.size(); j++)
logger.fine ((String)predicted.get (j) + "\t");
logger.fine ("\nCorrected prediction: ");
for (int j=0; j < segmentCorrectedOutput.size(); j++)
logger.fine ((String)segmentCorrectedOutput.get (j) + "\t");
logger.fine ("");
if (numIncorrect > -1)
correctedPredictionList.add (segmentCorrectedOutput);
else
correctedPredictionList.add (null);
}
return correctedPredictionList;
}
}
| 4,259 | 40.764706 | 130 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/TransducerSequenceConfidenceEstimator.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.logging.*;
import java.util.*;
import cc.mallet.fst.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.MalletLogger;
/**
* Abstract class that estimates the confidence of a {@link Sequence}
* extracted by a {@link Transducer}.Note that this is different from
* {@link TransducerConfidenceEstimator}, which estimates the
* confidence for a single {@link Segment}.
*/
abstract public class TransducerSequenceConfidenceEstimator
{
private static Logger logger = MalletLogger.getLogger(TransducerSequenceConfidenceEstimator.class.getName());
protected Transducer model; // the trained Transducer which
// performed the extractions.
public TransducerSequenceConfidenceEstimator (Transducer model) {
this.model = model;
}
/**
Calculates the confidence in the tagging of a {@link Sequence}.
*/
abstract public double estimateConfidenceFor (
Instance instance, Object[] startTags, Object[] inTags);
/**
Ranks all {@link Sequences}s in this {@link InstanceList} by
confidence estimate.
@param ilist list of segmentation instances
@param startTags represent the labels for the start states (B-)
of all segments
@param continueTags represent the labels for the continue state
(I-) of all segments
@return array of {@link InstanceWithConfidence}s ordered by
non-decreasing confidence scores, as calculated by
<code>estimateConfidenceFor</code>
*/
public InstanceWithConfidence[] rankInstancesByConfidence (InstanceList ilist,
Object[] startTags,
Object[] continueTags) {
ArrayList confidenceList = new ArrayList ();
for (int i=0; i < ilist.size(); i++) {
Instance instance = ilist.get (i);
Sequence predicted = new MaxLatticeDefault (model, (Sequence)instance.getData()).bestOutputSequence();
double confidence = estimateConfidenceFor (instance, startTags, continueTags);
confidenceList.add (new InstanceWithConfidence ( instance, confidence, predicted));
logger.info ("instance#"+i+" confidence="+confidence);
}
Collections.sort (confidenceList);
InstanceWithConfidence[] ret = new InstanceWithConfidence[1];
ret = (InstanceWithConfidence[]) confidenceList.toArray (ret);
return ret;
}
}
| 2,845 | 36.946667 | 110 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/InstanceWithConfidence.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import cc.mallet.types.*;
/**
Helper class to store confidence of an Instance.
*/
public class InstanceWithConfidence implements Comparable{
double confidence;
Instance instance;
boolean correct;
public InstanceWithConfidence (Instance inst, double c, Sequence predicted) {
this.instance = inst;
this.confidence = c;
this.correct = true;
Sequence truth = (Sequence) inst.getTarget ();
for (int i=0; i < truth.size(); i++) {
if (!truth.get(i).equals (predicted.get(i))) {
this.correct = false;
break;
}
}
}
public InstanceWithConfidence (Instance inst, double c, boolean correct) {
this.instance = inst;
this.confidence = c;
this.correct = correct;
}
public int compareTo (Object o) {
InstanceWithConfidence inst = (InstanceWithConfidence) o;
if (this.confidence > inst.confidence)
return 1;
else if (this.confidence < inst.confidence)
return -1;
else return 0;
}
public double getConfidence () { return confidence; }
public Instance getInstance () { return instance; }
public boolean correct () { return correct; }
}
| 1,636 | 28.232143 | 85 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/fst/confidence/TransducerCorrector.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Aron Culotta <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.fst.confidence;
import java.util.ArrayList;
import cc.mallet.fst.*;
import cc.mallet.types.*;
/**
*
* Interface for transducerCorrectors, which correct a subset of the
* {@link Segment}s produced by a {@link Transducer}. It's primary
* purpose is to find the {@link Segment}s that the {@link Transducer}
* is least confident in and correct those using the true {@link
* Labeling} (<code>correctLeastConfidenceSegments</code>).
*/
public interface TransducerCorrector
{
public ArrayList correctLeastConfidentSegments (InstanceList ilist, Object[] startTags,
Object[] continueTags);
}
| 1,155 | 34.030303 | 88 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/MaxEntShell.java
|
/* Copyright (C) 2003 University of Pennsylvania.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Fernando Pereira <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.upenn;
import java.util.regex.*;
import java.io.*;
import java.util.Iterator;
import java.util.List;
import java.util.logging.*;
import cc.mallet.classify.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
/**
* Simple wrapper for training a MALLET maxent classifier.
*
* @author <a href="mailto:[email protected]">Fernando Pereira</a>
* @version 1.0
*/
public class MaxEntShell {
private static Logger logger =
MalletLogger.getLogger(MaxEntShell.class.getName());
private MaxEntShell()
{
}
private static final CommandOption.Double gaussianVarianceOption = new CommandOption.Double
(MaxEntShell.class, "gaussian-variance", "decimal", true, 1.0,
"The gaussian prior variance used for training.", null);
private static final CommandOption.File trainOption = new CommandOption.File
(MaxEntShell.class, "train", "FILENAME", true, null,
"Training datafile", null);
private static final CommandOption.File testOption = new CommandOption.File
(MaxEntShell.class, "test", "filename", true, null,
"Test datafile", null);
private static final CommandOption.File classifyOption = new CommandOption.File
(MaxEntShell.class, "classify", "filename", true, null,
"Datafile to classify", null);
private static final CommandOption.File modelOption = new CommandOption.File
(MaxEntShell.class, "model", "filename", true, null,
"Model file", null);
private static final CommandOption.String encodingOption = new CommandOption.String
(MaxEntShell.class, "encoding", "character-encoding-name", true,
null, "Input character encoding", null);
private static final CommandOption.Boolean internalTestOption = new CommandOption.Boolean
(MaxEntShell.class, "internal-test", "true|false", true, false,
"Run internal tests", null);
private static final CommandOption.List commandOptions =
new CommandOption.List (
"Training, testing and running a generic tagger.",
new CommandOption[] {
gaussianVarianceOption,
trainOption,
testOption,
modelOption,
classifyOption,
encodingOption,
internalTestOption
});
/**
* Train a maxent classifier. Each row of <code>features</code>
* represents the features of a training instance. The label for
* that instance is in the corresponding position of
* <code>labels</code>.
*
* @param features Each row gives the on features of an instance
* @param labels Each position gives the label of an instance
* @param var Gaussian prior variance for training
* @param save if non-null, save the trained model to this file
* @return the maxent classifier
* @exception IOException if the trained model cannot be saved
*/
static public Classifier train(String[][]features, String[] labels, double var, File save) throws IOException
{
return train(new
PipeExtendedIterator(
new ArrayDataAndTargetIterator(features, labels),
new CharSequenceArray2TokenSequence()),
var, save);
}
/**
* Train a maxent classifier. The iterator <code>data</code> returns
* training instances with a {@link TokenSequence} as data and a
* target object. The tokens in the instance data will be converted to
* features.
*
* @param data the iterator over training instances
* @param var Gaussian prior variance for training.
* @param save if non-null, save the trained model to this file
* @return the maxent classifier
* @exception IOException if the trained model cannot be saved
*/
static public Classifier train(Iterator<Instance> data, double var,
File save)
throws IOException {
Alphabet features = new Alphabet();
LabelAlphabet labels = new LabelAlphabet();
Pipe instancePipe =
new SerialPipes (new Pipe[] {
new Target2Label(labels),
new TokenSequence2FeatureSequence(features),
new FeatureSequence2FeatureVector()});
InstanceList trainingList = new InstanceList(instancePipe);
trainingList.addThruPipe(data);
logger.info("# features = " + features.size());
logger.info("# labels = " + labels.size());
logger.info("# training instances = " + trainingList.size());
ClassifierTrainer trainer = new MaxEntTrainer(var);
Classifier classifier = trainer.train(trainingList);
logger.info("The training accuracy is "+
classifier.getAccuracy (trainingList));
features.stopGrowth();
if (save != null) {
ObjectOutputStream s =
new ObjectOutputStream(new FileOutputStream(save));
s.writeObject(classifier);
s.close();
}
return classifier;
}
/**
* Test a maxent classifier. The data representation is the same as for
* training.
*
* @param classifier the classifier to test
* @param features an array of instances represented as arrays of features
* @param labels corresponding labels
* @return accuracy on the data
*/
static public double test(Classifier classifier,
String[][]features, String[] labels) {
return test(classifier,
new PipeExtendedIterator(
new ArrayDataAndTargetIterator(features, labels),
new CharSequenceArray2TokenSequence()));
}
/**
* Test a maxent classifier. The data representation is the same as
* for training.
*
* @param classifier the classifier to test
* @param data an iterator over labeled instances
* @return accuracy on the data
*/
static public double test(Classifier classifier, Iterator<Instance> data) {
InstanceList testList = new InstanceList (classifier.getInstancePipe());
testList.addThruPipe(data);
logger.info("# test instances = " + testList.size());
double accuracy = classifier.getAccuracy(testList);
return accuracy;
}
/**
* Compute the maxent classification of an instance.
*
* @param classifier the classifier
* @param features the features that are on for this instance
* @return the classification
*/
static public Classification classify(Classifier classifier,
String[] features) {
return classifier.classify(
new Instance(new TokenSequence(features), null, null, null));
}
/**
* Compute the maxent classifications of an array of instances
*
* @param classifier the classifier
* @param features each row represents the on features for an instance
* @return the array of classifications for the given instances
*/
static public Classification[] classify(Classifier classifier,
String[][] features) {
return classify(classifier,
new PipeExtendedIterator(
new ArrayIterator(features),
new CharSequenceArray2TokenSequence()));
}
/**
* Compute the maxent classifications for unlabeled instances given
* by an iterator.
*
* @param classifier the classifier
* @param data the iterator over unlabeled instances
* @return the array of classifications for the given instances
*/
static public Classification[] classify(Classifier classifier,
Iterator<Instance> data) {
InstanceList unlabeledList =
new InstanceList(classifier.getInstancePipe());
unlabeledList.addThruPipe(data);
logger.info("# unlabeled instances = " + unlabeledList.size());
List classifications = classifier.classify(unlabeledList);
return (Classification[])classifications.toArray(new Classification[]{});
}
/**
* Load a classifier from a file.
*
* @param modelFile the file
* @return the classifier serialized in the file
* @exception IOException if the file cannot be opened or read
* @exception ClassNotFoundException if the file does not deserialize
*/
static public Classifier load(File modelFile)
throws IOException, ClassNotFoundException {
ObjectInputStream s =
new ObjectInputStream(new FileInputStream(modelFile));
Classifier c = (Classifier)s.readObject();
s.close();
return c;
}
static private final String[][] internalData = {{"a", "b"}, {"b", "c"}, {"a", "c"}};
static private final String[] internalTargets = {"yes", "no", "no"};
static private final String[] internalInstance = {"a", "b", "c"};
static private void internalTest() throws IOException {
Classifier classifier = train(internalData, internalTargets, 1.0, null);
System.out.println("Training accuracy = " +
test(classifier, internalData, internalTargets));
Classification cl =
classify(classifier, internalInstance);
Labeling lab = cl.getLabeling();
LabelAlphabet labels = lab.getLabelAlphabet();
for (int c = 0; c < labels.size(); c++)
System.out.print(labels.lookupObject(c) + " " +
lab.value(c) + " ");
System.out.println();
}
private static InputStreamReader getReader(File file, String encoding)
throws IOException {
return encoding != null ?
new InputStreamReader(
new FileInputStream(file), encoding) :
new FileReader(file);
}
/**
* Command-line wrapper to train, test, or run a maxent
* classifier. Instances are represented as follows:
* <dl>
* <dt>Labeled:</dt><dd><em>label</em> <em>feature-1</em> ... <em>feature-n</em></dd>
*<dt>Unlabeled:</dt><dd><em>feature-1</em> ... <em>feature-n</em></dd>
* </dl>
* @param args the command line arguments. Options (shell and Java quoting should be added as needed):
*<dl>
*<dt><code>--help</code> <em>boolean</em></dt>
*<dd>Print this command line option usage information. Give <code>true</code> for longer documentation. Default is <code>false</code>.</dd>
*<dt><code>--prefix-code</code> <em>Java-code</em></dt>
*<dd>Java code you want run before any other interpreted code. Note that the text is interpreted without modification, so unlike some other Java code options, you need to include any necessary 'new's. Default is null.</dd>
*<dt><code>--gaussian-variance</code> <em>positive-number</em></dt>
*<dd>The Gaussian prior variance used for training. Default is 1.0.</dd>
*<dt><code>--train</code> <em>filenane</em></dt>
*<dd>Train on labeled instances stored in <em>filename</em>. Default is no training.</dd>
*<dt><code>--test</code> <em>filename</em></dt>
*<dd>Test on the labeled instances stored in <em>filename</em>. Default is no testing.</dd>
*<dt><code>--classify</code> <em>filename</em></dt>
*<dd>Classify the unlabeled instances stored in <em>filename</em>. Default is no classification.</dd>
*<dt><code>--model</code> <em>filename</em></dt>
*<dd>The filename for reading (test/classify) or saving (train) the model. Default is no model file.</dd>
*</dl>
* @exception Exception if an error occurs
*/
static public void main (String[] args) throws Exception {
Classifier classifier = null;
Pipe preprocess =
new CharSequence2TokenSequence(
new CharSequenceLexer(CharSequenceLexer.LEX_NONWHITESPACE_TOGETHER));
InputStreamReader trainingData = null, testData = null;
Pattern instanceFormat = Pattern.compile("^\\s*(\\S+)\\s*(.*)\\s*$");
Pattern unlabeledInstanceFormat = Pattern.compile("^\\s*(.*)\\s*$");
commandOptions.process(args);
if (internalTestOption.value)
internalTest();
if (trainOption.value != null) {
trainingData = getReader(trainOption.value, encodingOption.value);
classifier = train(
new PipeExtendedIterator(
new LineIterator (trainingData, instanceFormat, 2, 1, -1),
preprocess),
gaussianVarianceOption.value, modelOption.value);
}
else if (modelOption.value != null)
classifier = load(modelOption.value);
if (classifier != null) {
if (testOption.value != null) {
testData = getReader(testOption.value, encodingOption.value);
System.out.println
("The testing accuracy is "+
test(classifier,
new PipeExtendedIterator(
new LineIterator (testData, instanceFormat, 2, 1, -1),
preprocess)));
}
if (classifyOption.value != null) {
classifier.getInstancePipe().setTargetProcessing(false);
InputStreamReader unlabeledData =
getReader(classifyOption.value, encodingOption.value);
Classification[] cl = classify(classifier, new PipeExtendedIterator(
new LineIterator(unlabeledData,
unlabeledInstanceFormat,
1, -1, -1),
preprocess));
for (int i = 0; i < cl.length; i++) {
Labeling lab = cl[i].getLabeling();
LabelAlphabet labels = lab.getLabelAlphabet();
for (int c = 0; c < labels.size(); c++)
System.out.print(labels.lookupObject(c) + " " +
lab.value(c) + " ");
System.out.println();
}
}
}
}
}
| 12,945 | 36.416185 | 225 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/ner/ListMember.java
|
package cc.mallet.share.upenn.ner;
import java.io.*;
import java.util.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
import gnu.trove.*;
/**
* Checks membership in a lexicon in a text file. Multi-token items are supported,
* but only if the tokens are uniformly separated or not separated by spaces: that is,
* U.S.A. is acceptable, as is San Francisco, but not St. Petersburg.
*/
public class ListMember extends Pipe implements java.io.Serializable {
String name;
Set lexicon;
boolean ignoreCase;
int min, max;
public ListMember (String featureName, File lexFile, boolean ignoreCase) {
this.name = featureName;
this.ignoreCase = ignoreCase;
if (!lexFile.exists())
throw new IllegalArgumentException("File "+lexFile+" not found.");
try {
lexicon = new THashSet();
min = 99999;
max = -1;
BufferedReader br = new BufferedReader(new FileReader(lexFile));
while (br.ready()) {
String s = br.readLine().trim();
if (s.equals("")) continue; // ignore blank lines
int count = countTokens(s);
if (count < min) min = count;
if (count > max) max = count;
if (ignoreCase)
lexicon.add(s.toLowerCase());
else
lexicon.add(s);
}
} catch (IOException e) {
System.err.println("Problem with "+lexFile+": "+e);
System.exit(0);
}
}
public Instance pipe (Instance carrier) {
TokenSequence seq = (TokenSequence)carrier.getData();
boolean[] marked = new boolean[seq.size()];
for (int i=0; i<seq.size(); i++) {
StringBuffer sb = new StringBuffer();
StringBuffer sbs = new StringBuffer(); // separate tokens by spaces
for (int j=i; j<i+max && j<seq.size(); j++) {
// test tokens from i to j
String text = seq.get(j).getText();
sb.append(text);
if (sbs.length() == 0) sbs.append(text);
else sbs.append(" "+text);
String test = ignoreCase ? sb.toString().toLowerCase() : sb.toString();
String tests = ignoreCase ? sbs.toString().toLowerCase() : sbs.toString();
if (j-i+1 >= min && (lexicon.contains(test) || lexicon.contains(tests)))
markFrom(i, j, marked);
}
}
for (int i=0; i<seq.size(); i++) {
if (marked[i])
seq.get(i).setFeatureValue(name, 1.0);
}
return carrier;
}
private void markFrom (int a, int b, boolean[] marked) {
for (int i=a; i<=b; i++) marked[i] = true;
}
// This method MUST count tokens the same way as the main tokenizer does!
private int countTokens (String s) {
// copied from Wei Li's EnronMessage2TokenSequence class
StringTokenizer wordst = new StringTokenizer(s, "~`!@#$%^&*()_-+={[}]|\\:;\"',<.>?/ \t\n\r", true);
return wordst.countTokens();
}
}
| 3,175 | 34.288889 | 107 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/ner/LengthBins.java
|
package cc.mallet.share.upenn.ner;
import java.util.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
/**
* A feature approximating string length.
*/
public class LengthBins extends Pipe implements java.io.Serializable {
String name;
int[] bins;
String[] binNames;
/**
* <p>bins contains the maximum sizes of elements in each bin.
* <p>For example, passing in {1,3,7} would produce 4 bins, for strings
* of lengths 1, 2-3, 4-7, and 8+.
*/
public LengthBins (String featureName, int[] binMaxes) {
this.name = featureName;
this.bins = binMaxes;
Arrays.sort(bins);
int min = 1;
binNames = new String[bins.length+1];
for (int i=0; i<bins.length; i++) {
binNames[i] = (min == bins[i] ? "["+min+"]" :
"["+min+"-"+bins[i]+"]");
min = bins[i]+1;
}
binNames[bins.length] = "["+min+"+]";
}
public Instance pipe (Instance carrier) {
TokenSequence ts = (TokenSequence) carrier.getData();
tokens:
for (int i=0; i < ts.size(); i++) {
Token t = ts.get(i);
int length = t.getText().length();
for (int j=0; j<bins.length; j++)
if (length <= bins[j]) {
t.setFeatureValue(name+"="+binNames[j], 1.0);
continue tokens;
}
t.setFeatureValue(name+"="+binNames[bins.length], 1.0);
}
return carrier;
}
}
| 1,518 | 27.12963 | 75 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/ner/NEPipes.java
|
package cc.mallet.share.upenn.ner;
import java.io.*;
import java.util.*;
import java.util.regex.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.tsf.*;
import cc.mallet.share.weili.ner.enron.*;
import cc.mallet.types.*;
public class NEPipes extends SerialPipes {
// *** General-purpose regex
// Single-token:
static String ALLCAPS = "([A-Z]*)";
static String ALLLOWER = "([a-z]*)";
static String INITCAPS = "([A-Z].*)";
static String MIXEDCASE = "(.*[a-z].*[A-Z].*)";
static String MIXEDNUM = "(.*[0-9].*)";
static String ENDSENTENCE = "([.!?])";
static String PUNCTUATION = "([:;,.!?-])";
static String BRACKET = "([(){}\\[\\]])";
static String ORDINAL = "([0-9]+(st|rd|th))";
// Multiple-token:
static String QUOTED = "([\"'].*[\"'])";
static String BRACKETED = "([({\\[].*[)}\\]])";
static String INITIAL = "([A-Z][.])";
static String DOTS = "([.][.])";
static String DASHES = "(--)";
static String FRACTION = "(<DIGITS>/<DIGITS>)";
// *** Regex for money
static String DOTDECIMAL = "((<DIGITS>)?[.]<DIGITS>)";
static String DECIMAL = "(<DIGITS>|"+DOTDECIMAL+")";
static String NUMBER_WORD = "(zero|one|two|three|four|five|six|seven|eight|nine|ten|eleven|twelve|thirteen|fourteen|fifteen|sixteen|seventeen|eighteen|nineteen|twenty|thirty|forty|fifty|sixty|seventy|eighty|ninety|hundred|thousand|million|billion|trillion)";
static String CURRENCY = "(dollar(s)?|cent(s)?|pound(s)?|euro(s)?|franc(s)?|yen)"; // a sample
static String MONEYWORDS = "("+NUMBER_WORD+"+"+CURRENCY+")";
// big number, not Eur. notation
static String COMMA_DECIMAL = "((<DIGITS>,)+<DIGITS>([.]<DIGITS>)?)";
static String ILLION = "(m(illion)?|b(illion)?|MM|k)";
static String MIXED_ILLION = "([0-9]+"+ILLION+")";
static String RANGE = "("+DECIMAL+"-"+DECIMAL+")";
// *** Regex for time
static String TIMENUM = "(<DIGITS>:<DIGITS>(:<DIGITS>)?)";
static String AMPM = "(am|a[.]m[.]|pm|p[.]m[.])";
static String MIXED_AMPM = "([0-9]+"+AMPM+")";
static String TIMEZONES = "(PST|PDT|MST|MDT|CST|CDT|EST|EDT|UTC|GMT)";
static String SPECIALTIME = "(noon|midnight)";
static String TIME = "(("+TIMENUM+AMPM+"?|(<DIGITS>)"+AMPM+"|"+
"(<DIGITS>:)?"+MIXED_AMPM+")"+
TIMEZONES+"?|"+SPECIALTIME+")";
static String TIMERANGE = "("+TIME+"(-|to|until)"+TIME+")";
// *** Regex for phone #
static String P10 = "(([(]?<DIGITS>[)]?[-]?)<DIGITS>[-]?<DIGITS>|"+
"<DIGITS>[.]<DIGITS>[.]<DIGITS>)";
static String P5 = "(<DIGITS>[-]<DIGITS>)";
// *** Regex for dates
static String DAY = "(<DIGITS>|[1-3]?[0-9](st|rd|th))";
static String YEAR = "(<YEAR>)";
static String DECADE = "(<YEARDECADE>)";
static String MONTHNAME = "(January|February|March|April|May|June|July|August|"+
"September|October|November|December)";
static String MONTHABBR = "((Jan|Feb|Mar|Apr|Jun|Jul|Aug|Sep|Sept|"+
"Oct|Nov|Dec)[.]?)";
static String MONTH = "("+MONTHNAME+"|"+MONTHABBR+")";
static String WEEKDAYNAME =
"(Sunday|Monday|Tuesday|Wednesday|Thursday|Friday|Saturday)";
static String WEEKDAYABBR = "((Sun|Mon|Tue|Tues|Wed|Thu|Thur|Thurs|Fri|Sat)[.]?)";
static String WEEKDAY = "("+WEEKDAYNAME+"|"+WEEKDAYABBR+")";
static String MONTHDAY = "("+MONTH+DAY+")";
static String DAYMONTHDAY = "("+WEEKDAY+"[,]?"+MONTHDAY+")";
static String MONTHYEAR = "("+MONTH+"[,]?"+YEAR+")";
static String MONTHDAYYEAR = "("+MONTHDAY+"[,]?"+YEAR+")";
static String DAYMONTHDAYYEAR = "("+DAYMONTHDAY+"[,]?"+YEAR+")";
static String SEP = "([-/])";
static String SEPDATE = "(<DIGITS>"+SEP+"<DIGITS>("+SEP+"(<DIGITS>|"+YEAR+"))?)";
static String FULLSEPDATE = "(<DIGITS>"+SEP+"<DIGITS>"+SEP+"(<DIGITS>|"+YEAR+"))";
public NEPipes (File placeDir) {
super(new Pipe[] {
new TokenText("text="),
new RegexMatches("SingleLetter", Pattern.compile("[A-Za-z]")),
new RegexMatches("AllCaps", Pattern.compile(ALLCAPS)),
new RegexMatches("AllLower", Pattern.compile(ALLLOWER)),
new RegexMatches("InitCaps", Pattern.compile(INITCAPS)),
new RegexMatches("MixedCase", Pattern.compile(MIXEDCASE)),
new RegexMatches("MixedNum", Pattern.compile(MIXEDNUM)),
new RegexMatches("EndSentPunc", Pattern.compile(ENDSENTENCE)),
new RegexMatches("Punc", Pattern.compile(PUNCTUATION)),
new RegexMatches("Bracket", Pattern.compile(BRACKET)),
new RegexMatches("Ordinal", Pattern.compile
(ORDINAL, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("Quoted", Pattern.compile(QUOTED), 3, 4),
new LongRegexMatches("Bracketed", Pattern.compile(BRACKETED), 3, 4),
new LongRegexMatches("Initial", Pattern.compile(INITIAL), 2, 2),
new LongRegexMatches("Ellipse", Pattern.compile(DOTS), 2, 2),
new LongRegexMatches("Dashes", Pattern.compile(DASHES), 2, 2),
new LongRegexMatches("Fraction", Pattern.compile(FRACTION), 3, 3),
new LongRegexMatches("DotDecimal", Pattern.compile(DOTDECIMAL), 2, 3),
new LongRegexMatches("Percent", Pattern.compile
("("+RANGE+"|"+DECIMAL+")%"), 2, 4),
new RegexMatches("10^3n", Pattern.compile
(ILLION, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("Numeric", Pattern.compile(DECIMAL), 1, 3),
new LongRegexMatches("BigNumber", Pattern.compile(COMMA_DECIMAL), 3, 7),
new LongRegexMatches("kmbNumber", Pattern.compile
(DECIMAL+ILLION, Pattern.CASE_INSENSITIVE), 1, 4),
new RegexMatches("kmbMixed", Pattern.compile
(MIXED_ILLION, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("Dollars", Pattern.compile
("[$]("+RANGE+"|"+DECIMAL+"|"+COMMA_DECIMAL+"|"+
DECIMAL+ILLION+"|"+MIXED_ILLION+")",
Pattern.CASE_INSENSITIVE), 2, 8),
new RegexMatches("NumberWord", Pattern.compile
(NUMBER_WORD, Pattern.CASE_INSENSITIVE)),
new RegexMatches("Currency", Pattern.compile
(CURRENCY, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("MoneyWords", Pattern.compile
(MONEYWORDS, Pattern.CASE_INSENSITIVE), 2, 4),
new LongRegexMatches("AmPm", Pattern.compile
(AMPM, Pattern.CASE_INSENSITIVE), 1, 4),
new RegexMatches("MixedAmPm", Pattern.compile
(MIXED_AMPM, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("TimeNum", Pattern.compile(TIMENUM), 3, 5),
new RegexMatches("TimeZone", Pattern.compile
(TIMEZONES, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("Time", Pattern.compile
(TIME, Pattern.CASE_INSENSITIVE), 1, 9),
new LongRegexMatches("TimeRange", Pattern.compile
(TIMERANGE, Pattern.CASE_INSENSITIVE), 3, 19),
new LongRegexMatches("P10", Pattern.compile(P10), 3, 7),
new LongRegexMatches("P5", Pattern.compile(P10), 3, 3),
new LongRegexMatches("Phone", Pattern.compile(P10+"|"+P5), 3, 7),
new RegexMatches("UncasedMonthName", Pattern.compile
(MONTHNAME, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("UncasedMonthAbbr", Pattern.compile
(MONTHABBR, Pattern.CASE_INSENSITIVE), 1, 2),
new LongRegexMatches("CasedMonth", Pattern.compile(MONTH), 1, 2),
new LongRegexMatches("UncasedMonth", Pattern.compile
(MONTH, Pattern.CASE_INSENSITIVE), 1, 2),
new RegexMatches("UncasedWeekdayName", Pattern.compile
(WEEKDAYNAME, Pattern.CASE_INSENSITIVE)),
new LongRegexMatches("UncasedWeekdayAbbr", Pattern.compile
(WEEKDAYABBR, Pattern.CASE_INSENSITIVE), 1, 2),
new LongRegexMatches("CasedWeekday", Pattern.compile(WEEKDAY), 1, 2),
new LongRegexMatches("UncasedWeekday", Pattern.compile
(WEEKDAY, Pattern.CASE_INSENSITIVE), 1, 2),
new LongRegexMatches("MonthDay", Pattern.compile
(MONTHDAY, Pattern.CASE_INSENSITIVE), 2, 3),
new LongRegexMatches("DayMonthDay", Pattern.compile
(DAYMONTHDAY, Pattern.CASE_INSENSITIVE), 3, 6),
new LongRegexMatches("MonthYear", Pattern.compile
(MONTHYEAR, Pattern.CASE_INSENSITIVE), 2, 4),
new LongRegexMatches("MonthDayYear", Pattern.compile
(MONTHDAYYEAR, Pattern.CASE_INSENSITIVE), 3, 5),
new LongRegexMatches("DayMonthDayYear", Pattern.compile
(DAYMONTHDAYYEAR, Pattern.CASE_INSENSITIVE), 4, 8),
new LongRegexMatches("SeparatorDate", Pattern.compile(SEPDATE), 3, 5),
new LongRegexMatches("FullSeparatorDate", Pattern.compile(FULLSEPDATE), 5, 5),
new ListMember("Country", new File(placeDir, "countries.txt"), false),
new ListMember("Africa", new File(placeDir, "africa.txt"), true),
new ListMember("Asia", new File(placeDir, "asia.txt"), true),
new ListMember("Europe", new File(placeDir, "europe.txt"), true),
new ListMember("NorAm", new File(placeDir, "north_america.txt"), true),
new ListMember("SouAm", new File(placeDir, "south_america.txt"), true),
new ListMember("Island", new File(placeDir, "islands.txt"), true),
new ListMember("Region", new File(placeDir, "regions.txt"), true),
new ListMember("USState", new File(placeDir, "states.txt"), true),
new ListMember("CanadaProv", new File(placeDir, "provinces.txt"), true),
new ListMember("City", new File(placeDir, "cities.txt"), true),
new ListMember("USCity", new File(placeDir, "us_cities.txt"), true),
new ListMember("Terrain", new File(placeDir, "terrain.txt"), true),
new ListMember("Geographical", new File(placeDir, "geo.txt"), true),
new LengthBins("Length", new int[] {1,2,3,5,10}),
new FeatureWindow(1, 1), // should be the last feature pipe
});
}
}
| 10,781 | 50.836538 | 262 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/ner/FeatureWindow.java
|
package cc.mallet.share.upenn.ner;
import java.util.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
import gnu.trove.*;
/**
* Adds all features of tokens in the window to the center token.
*/
public class FeatureWindow extends Pipe implements java.io.Serializable {
int left, right;
public FeatureWindow (int left, int right) {
assert (left >= 0 && right >= 0);
this.left = left;
this.right = right;
}
public Instance pipe (Instance carrier) {
TokenSequence seq = (TokenSequence)carrier.getData();
TObjectDoubleHashMap[] original = new TObjectDoubleHashMap[seq.size()];
for (int i=0; i<seq.size(); i++) {
Token t = seq.get(i);
original[i] = new TObjectDoubleHashMap();
PropertyList.Iterator pl = t.getFeatures().iterator();
while (pl.hasNext()) {
pl.nextProperty();
original[i].put(pl.getKey(), pl.getNumericValue());
}
}
for (int i=0; i<original.length; i++) { // add to features of token i...
for (int j = -1 * left; j <= right; j++) {
int index = i + j; //...the features of token index
String append = (j < 0) ? "/"+j : "/+"+j;
if (index<0 || index==i || index>=original.length) continue;
Token t = seq.get(i);
Object[] features = original[index].keys();
for (int k=0; k<features.length; k++)
t.setFeatureValue((String)features[k]+append,
original[index].get(features[k]));
}
}
return carrier;
}
}
| 1,739 | 32.461538 | 80 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/upenn/ner/LongRegexMatches.java
|
package cc.mallet.share.upenn.ner;
import java.util.regex.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
/**
* Matches a regular expression which spans several tokens.
*/
public class LongRegexMatches extends Pipe implements java.io.Serializable {
String name;
Pattern regex;
int min; // how many tokens to merge for a match
int max;
public LongRegexMatches (String featureName, Pattern regex, int min, int max) {
this.name = featureName;
this.regex = regex;
this.min = min;
this.max = max;
}
public Instance pipe (Instance carrier) {
TokenSequence ts = (TokenSequence) carrier.getData();
boolean[] marked = new boolean[ts.size()]; // avoid setting features twice
for (int i=0; i < ts.size(); i++) {
// On reaching a new token, test all strings with at least
// min tokens which end in the new token.
StringBuffer sb = new StringBuffer();
// start by testing rightmost suffix, and grow leftward
for (int length = 1; length <= max; length++) {
int loc = i - length + 1;
if (loc < 0) break; // take another token
sb.insert(0, ts.get(loc).getText()); // else prepend token
// On a match, mark all participating tokens.
if (length >= min && regex.matcher(sb.toString()).matches()) {
for (int j=0; j<length; j++)
marked[loc+j] = true;
}
}
}
// Set feature on all tokens participating in any match
for (int i=0; i < ts.size(); i++)
if (marked[i])
ts.get(i).setFeatureValue(name, 1.0);
return carrier;
}
}
| 1,766 | 30.553571 | 83 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/mccallum/ner/TokenSequenceDocHeader.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
If the new article contains a "header", like "SOCCER-", or "RUGBY LEAGUE-",
add an indicative feature to all Tokens.
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.mccallum.ner;
import java.io.*;
import cc.mallet.pipe.*;
import cc.mallet.types.Instance;
import cc.mallet.types.Token;
import cc.mallet.types.TokenSequence;
public class TokenSequenceDocHeader extends Pipe implements Serializable
{
public Instance pipe (Instance carrier)
{
TokenSequence ts = (TokenSequence) carrier.getData();
if (ts.size() > 3
&& (ts.get(2).getText().equals("-") || ts.get(3).getText().equals("-"))
&& ts.get(1).getText().matches("[A-Z]+")) {
String header = ts.get(1).getText();
if (header.equals("PRESS")) // Don't bother with "PRESS DIGEST" headers
return carrier;
String featureName = "HEADER="+header;
for (int i = 0; i < ts.size(); i++) {
Token t = ts.get(i);
// Only apply this feature to capitalized words, because if we apply it to everything
// we easily get an immense number of possible feature conjunctions, (e.g. every word
// with each of these HEADER= features.
if (t.getText().matches("^[A-Z].*"))
t.setFeatureValue (featureName, 1.0);
}
}
return carrier;
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
}
}
| 2,111 | 31 | 92 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/mccallum/ner/ConllNer2003Sentence2TokenSequence.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
/*
An error? CoNLLTrue MalletTrue MalletPred
O O O
I-MISC B-MISC B-MISC
B-MISC B-MISC I-MISC
I-MISC B-MISC I-MISC
O O O
O O O
O O O
*/
package cc.mallet.share.mccallum.ner;
import java.util.regex.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
public class ConllNer2003Sentence2TokenSequence extends Pipe
{
static final String[] endings = new String[]
{"ing", "ed", "ogy", "s", "ly", "ion", "tion", "ity", "ies"};
static Pattern[] endingPatterns = new Pattern[endings.length];
// Indexed by {forward,backward} {0,1,2 offset} {ending char ngram index}
static final String[][][] endingNames = new String[2][3][endings.length];
{
for (int i = 0; i < endings.length; i++) {
endingPatterns[i] = Pattern.compile (".*"+endings[i]+"$");
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 2; k++)
endingNames[k][j][i] = "W"+(k==1?"-":"")+j+"=<END"+endings[i]+">";
}
}
}
boolean saveSource = false;
boolean doConjunctions = false;
boolean doTags = true;
boolean doPhrases = true;
boolean doSpelling = false;
boolean doDigitCollapses = true;
boolean doDowncasing = false;
public ConllNer2003Sentence2TokenSequence ()
{
super (null, new LabelAlphabet());
}
public ConllNer2003Sentence2TokenSequence (boolean extraFeatures)
{
super (null, new LabelAlphabet());
if (!extraFeatures) {
doDigitCollapses = doConjunctions = doSpelling = doPhrases = doTags = false;
doDowncasing = true;
}
}
/* Lines look like this:
-DOCSTART- -X- -X- O
EU NNP I-NP I-ORG
rejects VBZ I-VP O
German JJ I-NP I-MISC
call NN I-NP O
to TO I-VP O
boycott VB I-VP O
British JJ I-NP I-MISC
lamb NN I-NP O
. . O O
Peter NNP I-NP I-PER
Blackburn NNP I-NP I-PER
BRUSSELS NNP I-NP I-LOC
1996-08-22 CD I-NP O
The DT I-NP O
European NNP I-NP I-ORG
Commission NNP I-NP I-ORG
said VBD I-VP O
on IN I-PP O
...
*/
public Instance pipe (Instance carrier)
{
String sentenceLines = (String) carrier.getData();
String[] tokens = sentenceLines.split ("\n");
TokenSequence data = new TokenSequence (tokens.length);
LabelSequence target = new LabelSequence ((LabelAlphabet)getTargetAlphabet(), tokens.length);
boolean [][] ending = new boolean[3][endings.length];
boolean [][] endingp1 = new boolean[3][endings.length];
boolean [][] endingp2 = new boolean[3][endings.length];
StringBuffer source = saveSource ? new StringBuffer() : null;
String prevLabel = "NOLABEL";
Pattern ipattern = Pattern.compile ("I-.*");
String word, tag, phrase, label;
for (int i = 0; i < tokens.length; i++) {
if (tokens[i].length() != 0) {
String[] features = tokens[i].split (" ");
if (features.length != 4)
throw new IllegalStateException ("Line \""+tokens[i]+"\" doesn't have four elements");
word = features[0]; // .toLowerCase();
tag = features[1];
phrase = features[2];
label = features[3];
} else {
word = "-<S>-";
tag = "-<S>-";
phrase = "-<S>-";
label = "O";
}
// Transformations
if (doDigitCollapses) {
if (word.matches ("19\\d\\d"))
word = "<YEAR>";
else if (word.matches ("19\\d\\ds"))
word = "<YEARDECADE>";
else if (word.matches ("19\\d\\d-\\d+"))
word = "<YEARSPAN>";
else if (word.matches ("\\d+\\\\/\\d"))
word = "<FRACTION>";
else if (word.matches ("\\d[\\d,\\.]*"))
word = "<DIGITS>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d--d"))
word = "<DATELINEDATE>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d\\d"))
word = "<DATELINEDATE>";
else if (word.matches (".*-led"))
word = "<LED>";
else if (word.matches (".*-sponsored"))
word = "<LED>";
}
if (doDowncasing)
word = word.toLowerCase();
Token token = new Token (word);
// Word and tag unigram at current time
if (doSpelling) {
for (int j = 0; j < endings.length; j++) {
ending[2][j] = ending[1][j];
ending[1][j] = ending[0][j];
ending[0][j] = endingPatterns[j].matcher(word).matches();
if (ending[0][j]) token.setFeatureValue (endingNames[0][0][j], 1);
}
}
if (doTags) {
token.setFeatureValue ("T="+tag, 1);
}
if (doPhrases) {
token.setFeatureValue ("P="+phrase, 1);
}
if (true) {
// Change so each segment always begins with a "B-",
// even if previous token did not have this label.
String oldLabel = label;
if (ipattern.matcher(label).matches ()
&& (prevLabel.length() < 3 // prevLabel is "O"
|| !prevLabel.substring(2).equals (label.substring(2)))) {
label = "B" + oldLabel.substring(1);
}
prevLabel = oldLabel;
}
// Append
data.add (token);
//target.add (bigramLabel);
target.add (label);
//System.out.print (label + ' ');
if (saveSource) {
source.append (word); source.append (" ");
//source.append (bigramLabel); source.append ("\n");
source.append (label); source.append ("\n");
}
}
//System.out.println ("");
carrier.setData(data);
carrier.setTarget(target);
if (saveSource)
carrier.setSource(source);
return carrier;
}
}
| 5,636 | 26.632353 | 95 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/mccallum/ner/TUI.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.mccallum.ner;
import junit.framework.*;
import java.util.Iterator;
import java.util.Random;
import java.util.regex.*;
import java.io.*;
import cc.mallet.fst.*;
import cc.mallet.optimize.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.pipe.tsf.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
public class TUI
{
static CommandOption.Double gaussianVarianceOption = new CommandOption.Double
(TUI.class, "gaussian-variance", "DECIMAL", true, 10.0,
"The gaussian prior variance used for training.", null);
static CommandOption.Double hyperbolicSlopeOption = new CommandOption.Double
(TUI.class, "hyperbolic-slope", "DECIMAL", true, 0.2,
"The hyperbolic prior slope used for training.", null);
static CommandOption.Double hyperbolicSharpnessOption = new CommandOption.Double
(TUI.class, "hyperbolic-sharpness", "DECIMAL", true, 10.0,
"The hyperbolic prior sharpness used for training.", null);
static CommandOption.File crfInputFileOption = new CommandOption.File
(TUI.class, "crf-input-file", "FILENAME", true, null,
"The name of the file to write the CRF after training.", null);
static CommandOption.Integer randomSeedOption = new CommandOption.Integer
(TUI.class, "random-seed", "INTEGER", true, 0,
"The random seed for randomly selecting a proportion of the instance list for training", null);
static CommandOption.Integer labelGramOption = new CommandOption.Integer
(TUI.class, "label-gram", "INTEGER", true, 1,
"Markov order of labels: 1, 2, 3", null);
static CommandOption.Integer wordWindowFeatureOption = new CommandOption.Integer
(TUI.class, "word-window-size", "INTEGER", true, 0,
"Size of window of words as features: 0=none, 10, 20...", null);
static CommandOption.Boolean useTestbOption = new CommandOption.Boolean
(TUI.class, "use-testb", "true|false", true, false,
"Use testb, final test set", null);
static CommandOption.Boolean useHyperbolicPriorOption = new CommandOption.Boolean
(TUI.class, "use-hyperbolic-prior", "true|false", true, false,
"Use hyperbolic prior", null);
static CommandOption.Boolean useFeatureInductionOption = new CommandOption.Boolean
(TUI.class, "use-feature-induction", "true|false", true, false,
"Not use or use feature induction", null);
static CommandOption.Boolean clusterFeatureInductionOption = new CommandOption.Boolean
(TUI.class, "cluster-feature-induction", "true|false", true, false,
"Cluster in feature induction", null);
static CommandOption.Boolean useFirstMentionFeatureOption = new CommandOption.Boolean
(TUI.class, "use-firstmention-feature", "true|false", true, false,
"Don't use first-mention feature", null);
static CommandOption.Boolean useDocHeaderFeatureOption = new CommandOption.Boolean
(TUI.class, "use-docheader-feature", "true|false", true, false,
"", null);
static CommandOption.Boolean includeConllLexiconsOption = new CommandOption.Boolean
(TUI.class, "include-conll-lexicons", "true|false", true, false,
"", null);
static CommandOption.Boolean charNGramsOption = new CommandOption.Boolean
(TUI.class, "char-ngrams", "true|false", true, false,
"", null);
static CommandOption.String offsetsOption = new CommandOption.String
(TUI.class, "offsets", "e.g. [[0,0],[1]]", true, "[[-2],[-1],[1],[2]]",
"Offset conjunctions", null);
static CommandOption.String capOffsetsOption = new CommandOption.String
(TUI.class, "cap-offsets", "e.g. [[0,0],[0,1]]", true, "",
"Offset conjunctions applied to features that are [A-Z]*", null);
static CommandOption.String viterbiFilePrefixOption = new CommandOption.String
(TUI.class, "viterbi-file", "FILE", true, "TUI",
"Filename in which to store most recent Viterbi output", null);
static final CommandOption.List commandOptions =
new CommandOption.List (
"Training, testing and running a Chinese word segmenter.",
new CommandOption[] {
gaussianVarianceOption,
hyperbolicSlopeOption,
hyperbolicSharpnessOption,
randomSeedOption,
labelGramOption,
wordWindowFeatureOption,
useHyperbolicPriorOption,
useFeatureInductionOption,
clusterFeatureInductionOption,
useFirstMentionFeatureOption,
useDocHeaderFeatureOption,
includeConllLexiconsOption,
offsetsOption,
capOffsetsOption,
viterbiFilePrefixOption,
useTestbOption,
});
int numEvaluations = 0;
static int iterationsBetweenEvals = 16;
static boolean doingFeatureInduction = true;
static boolean doingClusteredFeatureInduction = false;
private static String CAPS = "[\\p{Lu}]";
private static String LOW = "[\\p{Ll}]";
private static String CAPSNUM = "[\\p{Lu}\\p{Nd}]";
private static String ALPHA = "[\\p{Lu}\\p{Ll}]";
private static String ALPHANUM = "[\\p{Lu}\\p{Ll}\\p{Nd}]";
private static String PUNT = "[,\\.;:?!()]";
private static String QUOTE = "[\"`']";
public static void main (String[] args) throws FileNotFoundException, Exception
{
commandOptions.process (args);
String homedir = System.getProperty ("HOME");
String lexdir = homedir+"/research/data/resources/";
String offsetsString = offsetsOption.value.replace('[','{').replace(']','}');
int[][] offsets = (int[][]) CommandOption.getInterpreter().eval ("new int[][] "+offsetsString);
String capOffsetsString = capOffsetsOption.value.replace('[','{').replace(']','}');
int[][] capOffsets = null;
if (capOffsetsString.length() > 0)
capOffsets = (int[][]) CommandOption.getInterpreter().eval ("new int[][] "+capOffsetsString);
Pipe conllLexiconsPipe = null;
if (includeConllLexiconsOption.value)
conllLexiconsPipe = new SerialPipes (new Pipe[] {
new TrieLexiconMembership (new File(lexdir + "conll/CONLLTWOPER")),
new TrieLexiconMembership (new File(lexdir + "conll/CONLLTWOLOC")),
new TrieLexiconMembership (new File(lexdir + "conll/CONLLTWOORG")),
new TrieLexiconMembership (new File(lexdir + "conll/CONLLTWOMISC")),
});
Pipe p = new SerialPipes (new Pipe[] {
new ConllNer2003Sentence2TokenSequence (),
new RegexMatches ("INITCAP", Pattern.compile (CAPS+".*")),
new RegexMatches ("CAPITALIZED", Pattern.compile (CAPS+LOW+"*")),
new RegexMatches ("ALLCAPS", Pattern.compile (CAPS+"+")),
new RegexMatches ("MIXEDCAPS", Pattern.compile ("[A-Z][a-z]+[A-Z][A-Za-z]*")),
new RegexMatches ("CONTAINSDIGITS", Pattern.compile (".*[0-9].*")),
new RegexMatches ("ALLDIGITS", Pattern.compile ("[0-9]+")),
new RegexMatches ("NUMERICAL", Pattern.compile ("[-0-9]+[\\.,]+[0-9\\.,]+")),
//new RegexMatches ("ALPHNUMERIC", Pattern.compile ("[A-Za-z0-9]+")),
//new RegexMatches ("ROMAN", Pattern.compile ("[ivxdlcm]+|[IVXDLCM]+")),
new RegexMatches ("MULTIDOTS", Pattern.compile ("\\.\\.+")),
new RegexMatches ("ENDSINDOT", Pattern.compile ("[^\\.]+.*\\.")),
new RegexMatches ("CONTAINSDASH", Pattern.compile (ALPHANUM+"+-"+ALPHANUM+"*")),
new RegexMatches ("ACRO", Pattern.compile ("[A-Z][A-Z\\.]*\\.[A-Z\\.]*")),
new RegexMatches ("LONELYINITIAL", Pattern.compile (CAPS+"\\.")),
new RegexMatches ("SINGLECHAR", Pattern.compile (ALPHA)),
new RegexMatches ("CAPLETTER", Pattern.compile ("[A-Z]")),
new RegexMatches ("PUNC", Pattern.compile (PUNT)),
new RegexMatches ("QUOTE", Pattern.compile (QUOTE)),
//new RegexMatches ("LOWER", Pattern.compile (LOW+"+")),
//new RegexMatches ("MIXEDCAPS", Pattern.compile ("[A-Z]+[a-z]+[A-Z]+[a-z]*")),
(includeConllLexiconsOption.value ? conllLexiconsPipe : new Noop ()),
// Note that the word has not been lowecased! so INITCAP, etc, is redundant
//new TokenSequenceLowercase (),
new TokenText ("W="),
//new TokenSequenceFirstSentenceAllCaps (),
new OffsetConjunctions (offsets),
(capOffsets != null ? (Pipe) new OffsetConjunctions (capOffsets) : (Pipe) new Noop ()),
//// Don't lowercase the W= if you want to use this.
(!useFirstMentionFeatureOption.value
? (Pipe) new Noop ()
: (Pipe) new FeaturesOfFirstMention ("FIRSTMENTION=", Pattern.compile (CAPS+".*"),
// Exclude singleton W=foo features b/c redundant
Pattern.compile ("W=[^@&]+"), false)),
(!useDocHeaderFeatureOption.value ? (Pipe) new Noop () : (Pipe) new TokenSequenceDocHeader ()),
(wordWindowFeatureOption.value > 0
? (Pipe) new FeaturesInWindow ("WINDOW=", -wordWindowFeatureOption.value,
wordWindowFeatureOption.value, Pattern.compile ("WORD=.*"), true)
: (Pipe) new Noop()),
(charNGramsOption.value
? (Pipe) new TokenTextCharNGrams ("CHARNGRAM=", new int[] {2,3,4})
: (Pipe) new Noop()),
new PrintTokenSequenceFeatures(),
new TokenSequence2FeatureVectorSequence (true, true)
});
// Set up training and testing data
//args = new String[] {homedir+"/research/data/ie/ner2003/eng.testa"};
if (useTestbOption.value)
args = new String[] {homedir+"/research/data/ie/ner2003/eng.train",
homedir+"/research/data/ie/ner2003/eng.testb"};
else
args = new String[] {homedir+"/research/data/ie/ner2003/eng.train",
homedir+"/research/data/ie/ner2003/eng.testa"};
InstanceList trainingData = new InstanceList (p);
trainingData.addThruPipe (new LineGroupIterator (new FileReader (new File (args[0])),
Pattern.compile("^.DOCSTART. .X. .X. .$"), true));
System.out.println ("Read "+trainingData.size()+" training instances");
InstanceList testingData = null;
if (args.length > 1) {
testingData = new InstanceList (p);
testingData.addThruPipe (new LineGroupIterator (new FileReader (new File (args[1])),
Pattern.compile("^.DOCSTART. .X. .X. .$"), true));
}
if (testingData == null) {
// For now, just train on a small fraction of the data
Random r = new Random (1);
// Proportions below is: {training, testing, ignore}
InstanceList[] trainingLists = trainingData.split (r, new double[] {.2, .1, .7});
trainingData = trainingLists[0];
// and test on just 50% of the data
if (testingData != null) {
InstanceList[] testingLists = testingData.split (r, new double[] {.5, .5});
testingData = testingLists[0];
testingLists = null;
} else {
testingData = trainingLists[1];
}
trainingLists = null;
assert (testingData != null);
}
// Print out all the target names
Alphabet targets = p.getTargetAlphabet();
System.out.print ("State labels:");
for (int i = 0; i < targets.size(); i++)
System.out.print (" " + targets.lookupObject(i));
System.out.println ("");
// Print out some feature information
System.out.println ("Number of features = "+p.getDataAlphabet().size());
CRF crf = new CRF (p, null);
if (labelGramOption.value == 1)
crf.addStatesForLabelsConnectedAsIn (trainingData);
else if (labelGramOption.value == 2)
crf.addStatesForBiLabelsConnectedAsIn (trainingData);
//else if (labelGramOption.value == 3)
//crf.addStatesForTriLabelsConnectedAsIn (trainingData);
else
throw new IllegalStateException ("label-gram must be 1, 2, or 3, not "+ labelGramOption.value);
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf);
if (useHyperbolicPriorOption.value) {
crft.setUseHyperbolicPrior (true);
crft.setHyperbolicPriorSlope (hyperbolicSlopeOption.value);
crft.setHyperbolicPriorSharpness (hyperbolicSharpnessOption.value);
} else {
crft.setGaussianPriorVariance (gaussianVarianceOption.value);
}
for (int i = 0; i < crf.numStates(); i++) {
Transducer.State s = crf.getState (i);
if (s.getName().charAt(0) == 'I')
s.setInitialWeight (Double.POSITIVE_INFINITY);
}
System.out.println("Training on "+trainingData.size()+" training instances, "+
testingData.size()+" testing instances...");
MultiSegmentationEvaluator eval =
new MultiSegmentationEvaluator (new InstanceList[] {trainingData, testingData},
new String[] {"Training", "Testing"},
new String[] {"B-PER", "B-LOC", "B-ORG", "B-MISC"},
new String[] {"I-PER", "I-LOC", "I-ORG", "I-MISC"});
ViterbiWriter vw = new ViterbiWriter (viterbiFilePrefixOption.value,
new InstanceList[] {trainingData, testingData}, new String[] {"Training", "Testing"});
if (useFeatureInductionOption.value) {
if (clusterFeatureInductionOption.value)
crft.trainWithFeatureInduction (trainingData, null, testingData,
eval, 99999,
10, 99, 200, 0.5, true,
new double[] {.1, .2, .5, .7});
else
crft.trainWithFeatureInduction (trainingData, null, testingData,
eval, 99999,
10, 99, 1000, 0.5, false,
new double[] {.1, .2, .5, .7});
}
else {
double[] trainingProportions = new double[] {.1, .2, .5, .7};
for (int i = 0; i < trainingProportions.length; i++) {
crft.train(trainingData, 3, new double[] {trainingProportions[i]});
eval.evaluate(crft);
vw.evaluate(crft);
}
while (crft.train(trainingData, 3)) {
eval.evaluate(crft);
vw.evaluate(crft);
}
eval.evaluate(crft);
vw.evaluate(crft);
}
}
}
| 13,609 | 39.993976 | 98 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/weili/ner/WordTransformation.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Wei Li <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.weili.ner;
import java.util.regex.*;
import java.io.*;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
public class WordTransformation
{
static final String[] endings = new String[]
{"ing", "ed", "ogy", "s", "ly", "ion", "tion", "ity", "ies"};
static Pattern[] endingPatterns = new Pattern[endings.length];
static final String[][][] endingNames = new String[2][3][endings.length];
{
for (int i = 0; i < endings.length; i++) {
endingPatterns[i] = Pattern.compile (".*"+endings[i]+"$");
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 2; k++)
endingNames[k][j][i] = "W"+(k==1?"-":"")+j+"=<END"+endings[i]+">";
}
}
}
boolean doSpelling;
boolean doDigitCollapses;
boolean doDowncasing;
public WordTransformation ()
{
this (false, true, false);
}
public WordTransformation (boolean doSpelling, boolean doDigitCollapses, boolean doDowncasing)
{
this.doSpelling = doSpelling;
this.doDigitCollapses = doDigitCollapses;
this.doDowncasing = doDowncasing;
}
public Token transformedToken (String original)
{
boolean [][] ending = new boolean[3][endings.length];
boolean [][] endingp1 = new boolean[3][endings.length];
boolean [][] endingp2 = new boolean[3][endings.length];
String word = original;
if (doDigitCollapses) {
if (word.matches ("19\\d\\d"))
word = "<YEAR>";
else if (word.matches ("19\\d\\ds"))
word = "<YEARDECADE>";
else if (word.matches ("19\\d\\d-\\d+"))
word = "<YEARSPAN>";
else if (word.matches ("\\d+\\\\/\\d"))
word = "<FRACTION>";
else if (word.matches ("\\d[\\d,\\.]*"))
word = "<DIGITS>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d--d"))
word = "<DATELINEDATE>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d\\d"))
word = "<DATELINEDATE>";
else if (word.matches (".*-led"))
word = "<LED>";
else if (word.matches (".*-sponsored"))
word = "<LED>";
}
if (doDowncasing) word = word.toLowerCase();
Token token = new Token (word);
if (doSpelling) {
for (int j = 0; j < endings.length; j++) {
ending[2][j] = ending[1][j];
ending[1][j] = ending[0][j];
ending[0][j] = endingPatterns[j].matcher(word).matches();
if (ending[0][j]) token.setFeatureValue (endingNames[0][0][j], 1);
}
}
return token;
}
}
| 2,812 | 28 | 95 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/weili/ner/enron/EnronMessage2TokenSequence.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Wei Li <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.weili.ner.enron;
import java.util.regex.*;
import java.util.*;
import java.io.*;
import cc.mallet.pipe.*;
import cc.mallet.share.weili.ner.*;
import cc.mallet.types.*;
public class EnronMessage2TokenSequence extends Pipe implements Serializable
{
boolean saveSource = false;
public static String[] skip = new String[] {"=_part_", "sent by:"};
public static String[] skipToBlankLine = new String[] {"subject:", "original message",
"content-type:", "content-transfer-encoding:", "forwarded by",
"from:", "sent:", "to:", "bcc:", "cc:"};
public static String[] labels = new String[] {"DATE", "TIME", "LOCATION", "PERSON",
"ORGANIZATION", "ACRONYM", "PHONE", "MONEY", "PERCENT"};
HashSet headerPersonNames;
public EnronMessage2TokenSequence ()
{
super (null, new LabelAlphabet());
headerPersonNames = new HashSet();
}
public Instance pipe (Instance carrier)
{
TokenSequence data = new TokenSequence ();
LabelSequence target = new LabelSequence ((LabelAlphabet)getTargetAlphabet());
StringBuffer source = saveSource ? new StringBuffer() : null;
WordTransformation wt = new WordTransformation();
File f = (File) carrier.getData();
StringBuffer message = new StringBuffer();
try {
BufferedReader br = new BufferedReader(new FileReader(f));
//skip the header before the first blank line
String line = br.readLine();
while (line != null) {
if (line.equals("")) break;
int i;
line = line.toLowerCase();
for (i = 5; i <= 9; i++) {
if (line.startsWith(skipToBlankLine[i])) break;
}
if (i <= 9) {
String header = line.substring(skipToBlankLine[i].length());
while ((line = br.readLine()) != null) {
if (line.equals("")) break;
if (line.startsWith(" ") || line.startsWith("\t"))
header += line;
else break;
}
StringTokenizer st = new StringTokenizer(header, " \t,");
while (st.hasMoreTokens()) {
String token = st.nextToken();
if (!token.endsWith("@enron.com")) {
continue;
}
token = token.substring(0, token.length()-10);
int dot = token.indexOf(".");
if (dot == -1) {
continue;
}
if (dot != token.lastIndexOf(".")) {
if (dot == token.lastIndexOf(".")-1) {
dot++;
if (dot+1 < token.length()-1)
headerPersonNames.add(token.substring(dot+1));
}
continue;
}
if (dot > 1)
headerPersonNames.add(token.substring(0, dot));
if (dot+1 < token.length()-1)
headerPersonNames.add(token.substring(dot+1));
}
}
else line = br.readLine();
}
while ((line = br.readLine()) != null) {
boolean header = false;
for (int i = 0; i < skip.length; i++) {
int index = line.toLowerCase().indexOf(skip[i]);
if (index != -1) {
String prefix = line.substring(0, index).trim();
header = true;
for (int j = 0; j < prefix.length(); j++) {
if (prefix.charAt(j) != '-' && prefix.charAt(j) != '>' && prefix.charAt(j) != ' ') {
header = false;
break;
}
}
if (header) break;
}
}
if (header) continue;
for (int i = 0; i < skipToBlankLine.length; i++) {
int index = line.toLowerCase().indexOf(skipToBlankLine[i]);
if (index != -1) {
String prefix = line.substring(0, index).trim();
header = true;
for (int j = 0; j < prefix.length(); j++) {
if (prefix.charAt(j) != '-' && prefix.charAt(j) != '>' && prefix.charAt(j) != ' ') {
header = false;
break;
}
}
if (header) break;
}
}
if (header) {
while ((line = br.readLine()) != null) {
if (line.equals("")) break;
}
continue;
}
message.append(line);
message.append("\n");
}
}
catch (IOException e) {System.err.println(e);}
String currentLabel = "O";
StringTokenizer st = new StringTokenizer(message.toString(), "<>", true);
boolean readText = true;
String text = null;
while (st.hasMoreTokens()) {
if (readText) text = st.nextToken();
readText = true;
if (text.equals("<")) {
String tag = st.nextToken();
if (tag.equals("/ENAMEX") || tag.equals("/TIMEX") || tag.equals("/NUMEX")) {
String nextToken = st.nextToken();
assert (nextToken.equals(">"));
currentLabel = "O";
continue;
}
else if (tag.startsWith("ENAMEX") || tag.startsWith("TIMEX") || tag.startsWith("NUMEX")) {
String type = tag.substring(tag.indexOf(" ")+1);
assert (type.startsWith("TYPE="));
type = type.substring(type.indexOf("\"")+1, type.lastIndexOf("\""));
// nested entities (should do something)
//if (!currentLabel.equals("O")) {
//}
for (int i = 0; i < labels.length; i++) {
if (labels[i].equals(type)) {
currentLabel = "B-" + type;
break;
}
}
String nextToken = st.nextToken();
assert (nextToken.equals(">"));
continue;
}
else {//false alarm
data.add(new Token("<"));
target.add(currentLabel);
if (saveSource) {
source.append ("<");
source.append ("\n");
}
text = tag;
readText = false;
}
}
// there is no tag in "text"
StringTokenizer wordst = new StringTokenizer(text, "~`!@#$%^&*()_-+={[}]|\\:;\"',<.>?/ \t\n\r", true);
while (wordst.hasMoreTokens()) {
String word = wordst.nextToken();
if (word.equals(" ") || word.equals("\t") || word.equals("\n") || word.equals("\r")) continue;
String originalWord = word;
Token token = wt.transformedToken (word);
// Check if the token is in headerPersonNames
if (headerPersonNames.contains(word.toLowerCase())) {
token.setFeatureValue("HEADER-PERSON", 1.0);
}
// Append
data.add (token);
target.add (currentLabel);
if (saveSource) {
source.append (originalWord);
source.append ("\n");
}
if (currentLabel.startsWith("B-")) currentLabel = "I-" + currentLabel.substring(2);
}
}
carrier.setData(data);
carrier.setTarget(target);
if (saveSource)
carrier.setSource(source);
return carrier;
}
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream(new FileOutputStream(f));
oos.writeObject(headerPersonNames);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt(CURRENT_SERIAL_VERSION);
out.writeBoolean(saveSource);
out.writeObject(headerPersonNames);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt();
saveSource = in.readBoolean();
headerPersonNames = (HashSet)in.readObject();
}
}
| 7,489 | 29.571429 | 105 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/weili/ner/enron/TUI.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Wei Li <a href="mailto:[email protected]">[email protected]</a>
*/
package cc.mallet.share.weili.ner.enron;
import junit.framework.*;
import java.util.Iterator;
import java.util.Random;
import java.util.regex.*;
import java.io.*;
import cc.mallet.fst.*;
import cc.mallet.optimize.*;
import cc.mallet.pipe.*;
import cc.mallet.pipe.iterator.*;
import cc.mallet.pipe.tsf.*;
import cc.mallet.share.upenn.ner.*;
import cc.mallet.types.*;
import cc.mallet.util.*;
public class TUI
{
private static String CAPS = "[\\p{Lu}]";
private static String LOW = "[\\p{Ll}]";
private static String CAPSNUM = "[\\p{Lu}\\p{Nd}]";
private static String ALPHA = "[\\p{Lu}\\p{Ll}]";
private static String ALPHANUM = "[\\p{Lu}\\p{Ll}\\p{Nd}]";
private static String PUNT = "[,\\.;:?!()]";
private static String QUOTE = "[\"`']";
public static void main(String[] args) throws IOException {
String datadir = "/usr/can/tmp3/weili/NER/Enron/data";
String conlllexdir = "/usr/col/tmp1/weili/Resource/conllDict/";
String idflexdir = "/usr/col/tmp1/weili/Resource/idfDict/";
String placelexdir = "/usr/col/tmp1/weili/Resource/places";
Pipe conllLexiconsPipe = new SerialPipes (new Pipe[] {
new TrieLexiconMembership (new File(conlllexdir + "conll/CONLLTWOPER")),
new TrieLexiconMembership (new File(conlllexdir + "conll/CONLLTWOLOC")),
new TrieLexiconMembership (new File(conlllexdir + "conll/CONLLTWOORG")),
new TrieLexiconMembership (new File(conlllexdir + "conll/CONLLTWOMISC")),
});
Pipe googleLexiconsPipe = new SerialPipes (new Pipe[] {
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGSOCCER")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGGOVT")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGNGO")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGMILITARY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGCOMPANY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGBANK")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGTRADE")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGNEWS")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGOPERATINGSYSTEM")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGPOLITICALPARTY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGTRAVEL")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGBASEBALLTEAMAUGF")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGCARMODEL")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGCARCOMPANY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGENGLISHCOUNTYAUG")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGUNIVERSITY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCNATIONALITYAUGF")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCDISEASEAUG")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCTIME")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCAWARDS")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCMOVIESAUGF")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCPOLITICALPARTY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCRELIGION")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCGOVT")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCWAR")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCCURRENCY")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/LOC")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/PERFL")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/MISCF")),
new TrieLexiconMembership (new File(conlllexdir + "googlesets/ORGFRAWEDITEDSORTED")),
});
Pipe fixedLexiconsPipe = new SerialPipes (new Pipe[] {
new LexiconMembership ("FIRSTHIGHEST", new File(conlllexdir + "personname/ssdi.prfirsthighest"), true),
new LexiconMembership ("FIRSTHIGH", new File(conlllexdir + "personname/ssdi.prfirsthigh"), true),
new LexiconMembership ("FIRSTMED", new File(conlllexdir + "personname/ssdi.prfirstmed"), true),
new LexiconMembership ("FIRSTLOW", new File(conlllexdir + "personname/ssdi.prfirstlow"), true),
new LexiconMembership ("LASTHIGHEST", new File(conlllexdir + "personname/ssdi.prlasthighest"), true),
new LexiconMembership ("LASTHIGH", new File(conlllexdir + "personname/ssdi.prlasthigh"), true),
new LexiconMembership ("LASTMED", new File(conlllexdir + "personname/ssdi.prlastmed"), true),
new LexiconMembership ("LASTLOW", new File(conlllexdir + "personname/ssdi.prlastlow"), true),
new LexiconMembership ("HONORIFIC", new File(conlllexdir + "personname/honorifics"), true),
new LexiconMembership ("NAMESUFFIX", new File(conlllexdir + "personname/namesuffixes"), true),
new LexiconMembership ("NAMEPARTICLE", new File(conlllexdir + "personname/name-particles"), true),
new LexiconMembership ("DAY", new File(conlllexdir + "days"), true),
new LexiconMembership ("MONTH", new File(conlllexdir + "months"), true),
new LexiconMembership ("PLACESUFFIX", new File(conlllexdir + "place-suffixes"), true),
new TrieLexiconMembership ("COUNTRY", new File(conlllexdir + "countries"), true),
new TrieLexiconMembership ("COUNTRYCAPITAL", new File(conlllexdir + "country-capitals"), true),
new TrieLexiconMembership ("USSTATE", new File(conlllexdir + "US-states"), true),
new TrieLexiconMembership ("COMPANYNAME", new File(conlllexdir + "company-names"), true),
new TrieLexiconMembership ("COMPANYSUFFIX", new File(conlllexdir + "company-suffixes"), true),
new TrieLexiconMembership ("CONTINENT", new File(conlllexdir + "continents"), true),
new LexiconMembership ("STOPWORD", new File(conlllexdir + "stopwords"), true),
new TrieLexiconMembership (new File(conlllexdir + "biz.yahoo/COMPANYNAME.ABBREV")),
new TrieLexiconMembership (new File(conlllexdir + "utexas/UNIVERSITIES")),
});
Pipe idfLexiconsPipe = new SerialPipes (new Pipe[] {
new TrieLexiconMembership ("IDF_DES", new File(idflexdir + "designator.data"), true),
new TrieLexiconMembership ("IDF_FIR", new File(idflexdir + "firstnames.data"), true),
new TrieLexiconMembership ("IDF_LOC", new File(idflexdir + "locations.data"), true),
new TrieLexiconMembership ("IDF_NAT", new File(idflexdir + "nations.data"), true),
new TrieLexiconMembership ("IDF_ABB", new File(idflexdir + "non-final-abbrevs.data"), true),
new TrieLexiconMembership ("IDF_ORG", new File(idflexdir + "organization.data"), true),
new TrieLexiconMembership ("IDF_PER", new File(idflexdir + "person.data"), true),
});
Pipe spellingFeaturesPipe = new SerialPipes (new Pipe[] {
new RegexMatches ("INITCAP", Pattern.compile (CAPS+".*")),
new RegexMatches ("CAPITALIZED", Pattern.compile (CAPS+LOW+"*")),
new RegexMatches ("ALLCAPS", Pattern.compile (CAPS+"+")),
new RegexMatches ("MIXEDCAPS", Pattern.compile ("[A-Z][a-z]+[A-Z][A-Za-z]*")),
new RegexMatches ("CONTAINSDIGITS", Pattern.compile (".*[0-9].*")),
new RegexMatches ("ALLDIGITS", Pattern.compile ("[0-9]+")),
new RegexMatches ("NUMERICAL", Pattern.compile ("[-0-9]+[\\.,]+[0-9\\.,]+")),
new RegexMatches ("MULTIDOTS", Pattern.compile ("\\.\\.+")),
new RegexMatches ("ENDSINDOT", Pattern.compile ("[^\\.]+.*\\.")),
new RegexMatches ("CONTAINSDASH", Pattern.compile (ALPHANUM+"+-"+ALPHANUM+"*")),
new RegexMatches ("ACRO", Pattern.compile ("[A-Z][A-Z\\.]*\\.[A-Z\\.]*")),
new RegexMatches ("LONELYINITIAL", Pattern.compile (CAPS+"\\.")),
new RegexMatches ("SINGLECHAR", Pattern.compile (ALPHA)),
new RegexMatches ("CAPLETTER", Pattern.compile ("[A-Z]")),
new RegexMatches ("PUNC", Pattern.compile (PUNT)),
new RegexMatches ("QUOTE", Pattern.compile (QUOTE)),
});
SerialPipes p = new SerialPipes (new Pipe[] {
new EnronMessage2TokenSequence (),
//original
//new TokenText("W="),
//spellingFeaturesPipe,
new NEPipes(new File(placelexdir)),
conllLexiconsPipe,
googleLexiconsPipe,
fixedLexiconsPipe,
idfLexiconsPipe,
new OffsetConjunctions (new int[][]{{-1},{1}}),
new PrintTokenSequenceFeatures(),
new TokenSequence2FeatureVectorSequence (true, true)
});
InstanceList ilist = new InstanceList (p);
ilist.addThruPipe (new FileIterator (datadir, FileIterator.STARTING_DIRECTORIES));
Random r = new Random (1);
InstanceList[] ilists = ilist.split (r, new double[] {0.8, 0.2});
Alphabet targets = p.getTargetAlphabet();
System.out.print ("State labels:");
for (int i = 0; i < targets.size(); i++)
System.out.print (" " + targets.lookupObject(i));
System.out.println ("");
System.out.println ("Number of features = "+p.getDataAlphabet().size());
CRF crf = new CRF (p, null);
crf.addStatesForThreeQuarterLabelsConnectedAsIn (ilists[0]);
CRFTrainerByLabelLikelihood crft = new CRFTrainerByLabelLikelihood (crf);
crft.setGaussianPriorVariance (100.0);
for (int i = 0; i < crf.numStates(); i++)
crf.getState(i).setInitialWeight (Transducer.IMPOSSIBLE_WEIGHT);
crf.getState("O").setInitialWeight(0.0);
System.out.println("Training on "+ilists[0].size()+" training instances.");
MultiSegmentationEvaluator eval =
new MultiSegmentationEvaluator (new InstanceList[] {ilists[0], ilists[1]},
new String[] {"train", "test"},
new String[] {"B-DATE", "B-TIME", "B-LOCATION", "B-PERSON",
"B-ORGANIZATION", "B-ACRONYM", "B-PHONE", "B-MONEY", "B-PERCENT"},
new String[] {"I-DATE", "I-TIME", "I-LOCATION", "I-PERSON",
"I-ORGANIZATION", "I-ACRONYM", "I-PHONE", "I-MONEY", "I-PERCENT"});
if (args[0].equals("FeatureInduction"))
throw new IllegalStateException ("Feature induction not yet supported.");
/* crf.trainWithFeatureInduction (ilists[0], null, ilists[1],
eval, 99999,
10, 60, 500, 0.5, false,
new double[] {.1, .2, .5, .7}); */
else if (args[0].equals("NoFeatureInduction")) {
crft.train (ilists[0], 5, new double[] {.1, .2, .5, .7});
while (!crft.trainIncremental(ilists[0])) {
eval.evaluate(crft);
if (crft.getIteration() % 5 == 0)
new ViterbiWriter (args[2], ilists[0], "train", ilists[1], "test");
}
} else {
System.err.println("Feature induction or not? Give me a choice.");
System.exit(1);
}
crf.write(new File(args[1]));
}
}
| 11,072 | 51.478673 | 106 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/share/casutton/ner/ConllNer2003Sentence2TokenSequence.java
|
/* Copyright (C) 2002 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
/**
@author Andrew McCallum <a href="mailto:[email protected]">[email protected]</a>
*/
/*
An error? CoNLLTrue MalletTrue MalletPred
O O O
I-MISC B-MISC B-MISC
B-MISC B-MISC I-MISC
I-MISC B-MISC I-MISC
O O O
O O O
O O O
*/
package cc.mallet.share.casutton.ner; // Generated package name
import java.util.regex.*;
import cc.mallet.extract.StringSpan;
import cc.mallet.extract.StringTokenization;
import cc.mallet.pipe.*;
import cc.mallet.types.*;
/**
* Reads a data file in CoNLL 2003 format, and makes some simple
* transformations.
*
* Unlike the version in <tt>mccallum.ner</tt>, does not expect fields in
* the data file for tags and phrasos if those features are off. Does
* not look for target field if isTargetProcessing() is false.
*/
public class ConllNer2003Sentence2TokenSequence extends Pipe
{
static final String[] endings = new String[]
{"ing", "ed", "ogy", "s", "ly", "ion", "tion", "ity", "ies"};
static Pattern[] endingPatterns = new Pattern[endings.length];
// Indexed by {forward,backward} {0,1,2 offset} {ending char ngram index}
static final String[][][] endingNames = new String[2][3][endings.length];
{
for (int i = 0; i < endings.length; i++) {
endingPatterns[i] = Pattern.compile (".*"+endings[i]+"$");
for (int j = 0; j < 3; j++) {
for (int k = 0; k < 2; k++)
endingNames[k][j][i] = "W"+(k==1?"-":"")+j+"=<END"+endings[i]+">";
}
}
}
boolean saveSource = true;
boolean doConjunctions = false;
boolean doTags = true;
boolean doPhrases = true;
boolean doSpelling = false;
boolean doDigitCollapses = true;
boolean doDowncasing = false;
public ConllNer2003Sentence2TokenSequence ()
{
super (null, new LabelAlphabet());
}
public ConllNer2003Sentence2TokenSequence (boolean useTags, boolean usePhrases)
{
super (null, new LabelAlphabet());
this.doTags = useTags;
this.doPhrases = usePhrases;
}
/* Lines look like this:
-DOCSTART- -X- -X- O
EU NNP I-NP I-ORG
rejects VBZ I-VP O
German JJ I-NP I-MISC
call NN I-NP O
to TO I-VP O
boycott VB I-VP O
British JJ I-NP I-MISC
lamb NN I-NP O
. . O O
Peter NNP I-NP I-PER
Blackburn NNP I-NP I-PER
BRUSSELS NNP I-NP I-LOC
1996-08-22 CD I-NP O
The DT I-NP O
European NNP I-NP I-ORG
Commission NNP I-NP I-ORG
said VBD I-VP O
on IN I-PP O
...
*/
public Instance pipe (Instance carrier)
{
String sentenceLines = (String) carrier.getData();
String[] tokens = sentenceLines.split ("\n");
LabelSequence target = new LabelSequence ((LabelAlphabet)getTargetAlphabet(), tokens.length);
boolean [][] ending = new boolean[3][endings.length];
boolean [][] endingp1 = new boolean[3][endings.length];
boolean [][] endingp2 = new boolean[3][endings.length];
StringBuffer source = saveSource ? new StringBuffer() : null;
TokenSequence data = new StringTokenization (source);
String prevLabel = "NOLABEL";
Pattern ipattern = Pattern.compile ("I-.*");
String word, tag = null, phrase = null, label = null;
for (int i = 0; i < tokens.length; i++) {
if (tokens[i].length() != 0) {
try {
String[] features = tokens[i].split (" ");
int fieldIdx = 0;
word = features[fieldIdx++]; // .toLowerCase();
if (doTags) tag = features[fieldIdx++];
if (doPhrases) phrase = features[fieldIdx++];
if (isTargetProcessing ()) label = features[fieldIdx++];
} catch (ArrayIndexOutOfBoundsException e) {
throw new IllegalArgumentException ("Invalid line "+tokens[i]+" : expected word "
+ (doTags ? ", tag" : "")
+ (doPhrases ? ", phrase" : "")
+ (isTargetProcessing () ? ", target" : "")
+ ".");
}
} else {
word = "-<S>-";
tag = "-<S>-";
phrase = "-<S>-";
label = "O";
}
// Transformations
if (doDigitCollapses) {
if (word.matches ("19\\d\\d"))
word = "<YEAR>";
else if (word.matches ("19\\d\\ds"))
word = "<YEARDECADE>";
else if (word.matches ("19\\d\\d-\\d+"))
word = "<YEARSPAN>";
else if (word.matches ("\\d+\\\\/\\d"))
word = "<FRACTION>";
else if (word.matches ("\\d[\\d,\\.]*"))
word = "<DIGITS>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d--d"))
word = "<DATELINEDATE>";
else if (word.matches ("19\\d\\d-\\d\\d-\\d\\d"))
word = "<DATELINEDATE>";
else if (word.matches (".*-led"))
word = "<LED>";
else if (word.matches (".*-sponsored"))
word = "<LED>";
}
if (doDowncasing)
word = word.toLowerCase();
int start = source.length ();
if (saveSource) {
if (word.equals ("-<S>-")) source.append ("\n\n");
source.append (word); source.append (" ");
}
Token token = new StringSpan (source, start, source.length () - 1);
// Word and tag unigram at current time
if (doSpelling) {
for (int j = 0; j < endings.length; j++) {
ending[2][j] = ending[1][j];
ending[1][j] = ending[0][j];
ending[0][j] = endingPatterns[j].matcher(word).matches();
if (ending[0][j]) token.setFeatureValue (endingNames[0][0][j], 1);
}
}
if (doTags) {
token.setFeatureValue ("T="+tag, 1);
}
if (doPhrases) {
token.setFeatureValue ("P="+phrase, 1);
}
data.add (token);
if (isTargetProcessing ()) {
// Change so each segment always begins with a "B-",
// even if previous token did not have this label.
String oldLabel = label;
if (ipattern.matcher(label).matches ()
&& (prevLabel.length() < 3 // prevLabel is "O"
|| !prevLabel.substring(2).equals (label.substring(2)))) {
label = "B" + oldLabel.substring(1);
}
prevLabel = oldLabel;
target.add (label);
}
}
carrier.setData(data);
if (isTargetProcessing ()) carrier.setTarget(target);
if (saveSource) carrier.setSource(source);
return carrier;
}
// serialization garbage
private static final long serialVersionUID = -7326674871670572522L;
}
| 6,448 | 27.662222 | 95 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/LDA.java
|
/* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.Arrays;
import java.io.*;
import cc.mallet.types.*;
import cc.mallet.util.ArrayUtils;
import cc.mallet.util.Randoms;
/**
* Latent Dirichlet Allocation.
* @author Andrew McCallum
* @deprecated Use ParallelTopicModel instead.
*/
// Think about support for incrementally adding more documents...
// (I think this means we might want to use FeatureSequence directly).
// We will also need to support a growing vocabulary!
public class LDA implements Serializable {
int numTopics; // Number of topics to be fit
double alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
double beta; // Prior on per-topic multinomial distribution over words
double tAlpha;
double vBeta;
InstanceList ilist; // the data field of the instances is expected to hold a FeatureSequence
int[][] topics; // indexed by <document index, sequence index>
int numTypes;
int numTokens;
int[][] docTopicCounts; // indexed by <document index, topic index>
int[][] typeTopicCounts; // indexed by <feature index, topic index>
int[] tokensPerTopic; // indexed by <topic index>
public LDA (int numberOfTopics)
{
this (numberOfTopics, 50.0, 0.01);
}
public LDA (int numberOfTopics, double alphaSum, double beta)
{
this.numTopics = numberOfTopics;
this.alpha = alphaSum / numTopics;
this.beta = beta;
}
public void estimate (InstanceList documents, int numIterations, int showTopicsInterval,
int outputModelInterval, String outputModelFilename,
Randoms r)
{
ilist = documents.shallowClone();
numTypes = ilist.getDataAlphabet().size ();
int numDocs = ilist.size();
topics = new int[numDocs][];
docTopicCounts = new int[numDocs][numTopics];
typeTopicCounts = new int[numTypes][numTopics];
tokensPerTopic = new int[numTopics];
tAlpha = alpha * numTopics;
vBeta = beta * numTypes;
long startTime = System.currentTimeMillis();
// Initialize with random assignments of tokens to topics
// and finish allocating this.topics and this.tokens
int topic, seqLen;
FeatureSequence fs;
for (int di = 0; di < numDocs; di++) {
try {
fs = (FeatureSequence) ilist.get(di).getData();
} catch (ClassCastException e) {
System.err.println ("LDA and other topic models expect FeatureSequence data, not FeatureVector data. "
+"With text2vectors, you can obtain such data with --keep-sequence or --keep-bisequence.");
throw e;
}
seqLen = fs.getLength();
numTokens += seqLen;
topics[di] = new int[seqLen];
// Randomly assign tokens to topics
for (int si = 0; si < seqLen; si++) {
topic = r.nextInt(numTopics);
topics[di][si] = topic;
docTopicCounts[di][topic]++;
typeTopicCounts[fs.getIndexAtPosition(si)][topic]++;
tokensPerTopic[topic]++;
}
}
this.estimate(0, numDocs, numIterations, showTopicsInterval, outputModelInterval, outputModelFilename, r);
// 124.5 seconds
// 144.8 seconds after using FeatureSequence instead of tokens[][] array
// 121.6 seconds after putting "final" on FeatureSequence.getIndexAtPosition()
// 106.3 seconds after avoiding array lookup in inner loop with a temporary variable
}
public void addDocuments(InstanceList additionalDocuments,
int numIterations, int showTopicsInterval,
int outputModelInterval, String outputModelFilename,
Randoms r)
{
if (ilist == null) throw new IllegalStateException ("Must already have some documents first.");
for (Instance inst : additionalDocuments)
ilist.add(inst);
assert (ilist.getDataAlphabet() == additionalDocuments.getDataAlphabet());
assert (additionalDocuments.getDataAlphabet().size() >= numTypes);
numTypes = additionalDocuments.getDataAlphabet().size();
int numNewDocs = additionalDocuments.size();
int numOldDocs = topics.length;
int numDocs = numOldDocs+ numNewDocs;
// Expand various arrays to make space for the new data.
int[][] newTopics = new int[numDocs][];
for (int i = 0; i < topics.length; i++)
newTopics[i] = topics[i];
topics = newTopics; // The rest of this array will be initialized below.
int[][] newDocTopicCounts = new int[numDocs][numTopics];
for (int i = 0; i < docTopicCounts.length; i++)
newDocTopicCounts[i] = docTopicCounts[i];
docTopicCounts = newDocTopicCounts; // The rest of this array will be initialized below.
int [][] newTypeTopicCounts = new int[numTypes][numTopics];
for (int i = 0; i < typeTopicCounts.length; i++)
for (int j = 0; j < numTopics; j++)
newTypeTopicCounts[i][j] = typeTopicCounts[i][j]; // This array further populated below
FeatureSequence fs;
for (int di = numOldDocs; di < numDocs; di++) {
try {
fs = (FeatureSequence) additionalDocuments.get(di-numOldDocs).getData();
} catch (ClassCastException e) {
System.err.println ("LDA and other topic models expect FeatureSequence data, not FeatureVector data. "
+"With text2vectors, you can obtain such data with --keep-sequence or --keep-bisequence.");
throw e;
}
int seqLen = fs.getLength();
numTokens += seqLen;
topics[di] = new int[seqLen];
// Randomly assign tokens to topics
for (int si = 0; si < seqLen; si++) {
int topic = r.nextInt(numTopics);
topics[di][si] = topic;
docTopicCounts[di][topic]++;
typeTopicCounts[fs.getIndexAtPosition(si)][topic]++;
tokensPerTopic[topic]++;
}
}
}
/* Perform several rounds of Gibbs sampling on the documents in the given range. */
public void estimate (int docIndexStart, int docIndexLength,
int numIterations, int showTopicsInterval,
int outputModelInterval, String outputModelFilename,
Randoms r)
{
long startTime = System.currentTimeMillis();
for (int iterations = 0; iterations < numIterations; iterations++) {
if (iterations % 10 == 0) System.out.print (iterations); else System.out.print (".");
System.out.flush();
if (showTopicsInterval != 0 && iterations % showTopicsInterval == 0 && iterations > 0) {
System.out.println ();
printTopWords (5, false);
}
if (outputModelInterval != 0 && iterations % outputModelInterval == 0 && iterations > 0) {
this.write (new File(outputModelFilename+'.'+iterations));
}
sampleTopicsForDocs(docIndexStart, docIndexLength, r);
}
long seconds = Math.round((System.currentTimeMillis() - startTime)/1000.0);
long minutes = seconds / 60; seconds %= 60;
long hours = minutes / 60; minutes %= 60;
long days = hours / 24; hours %= 24;
System.out.print ("\nTotal time: ");
if (days != 0) { System.out.print(days); System.out.print(" days "); }
if (hours != 0) { System.out.print(hours); System.out.print(" hours "); }
if (minutes != 0) { System.out.print(minutes); System.out.print(" minutes "); }
System.out.print(seconds); System.out.println(" seconds");
}
/* One iteration of Gibbs sampling, across all documents. */
public void sampleTopicsForAllDocs (Randoms r)
{
double[] topicWeights = new double[numTopics];
// Loop over every word in the corpus
for (int di = 0; di < topics.length; di++) {
sampleTopicsForOneDoc ((FeatureSequence)ilist.get(di).getData(),
topics[di], docTopicCounts[di], topicWeights, r);
}
}
/* One iteration of Gibbs sampling, across all documents. */
public void sampleTopicsForDocs (int start, int length, Randoms r)
{
assert (start+length <= docTopicCounts.length);
double[] topicWeights = new double[numTopics];
// Loop over every word in the corpus
for (int di = start; di < start+length; di++) {
sampleTopicsForOneDoc ((FeatureSequence)ilist.get(di).getData(),
topics[di], docTopicCounts[di], topicWeights, r);
}
}
/*
public double[] assignTopics (int[] testTokens, Random r)
{
int[] testTopics = new int[testTokens.length];
int[] testTopicCounts = new int[numTopics];
int numTokens = MatrixOps.sum(testTokens);
double[] topicWeights = new double[numTopics];
// Randomly assign topics to the words and
// incorporate this document in the global counts
int topic;
for (int si = 0; si < testTokens.length; si++) {
topic = r.nextInt (numTopics);
testTopics[si] = topic; // analogous to this.topics
testTopicCounts[topic]++; // analogous to this.docTopicCounts
typeTopicCounts[testTokens[si]][topic]++;
tokensPerTopic[topic]++;
}
// Repeatedly sample topic assignments for the words in this document
for (int iterations = 0; iterations < numTokens*2; iterations++)
sampleTopicsForOneDoc (testTokens, testTopics, testTopicCounts, topicWeights, r);
// Remove this document from the global counts
// and also fill topicWeights with an unnormalized distribution over topics for whole doc
Arrays.fill (topicWeights, 0.0);
for (int si = 0; si < testTokens.length; si++) {
topic = testTopics[si];
typeTopicCounts[testTokens[si]][topic]--;
tokensPerTopic[topic]--;
topicWeights[topic]++;
}
// Normalize the distribution over topics for whole doc
for (int ti = 0; ti < numTopics; ti++)
topicWeights[ti] /= testTokens.length;
return topicWeights;
}
*/
private void sampleTopicsForOneDoc (FeatureSequence oneDocTokens, int[] oneDocTopics, // indexed by seq position
int[] oneDocTopicCounts, // indexed by topic index
double[] topicWeights, Randoms r)
{
int[] currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLen = oneDocTokens.getLength();
double tw;
// Iterate over the positions (words) in the document
for (int si = 0; si < docLen; si++) {
type = oneDocTokens.getIndexAtPosition(si);
oldTopic = oneDocTopics[si];
// Remove this token from all counts
oneDocTopicCounts[oldTopic]--;
typeTopicCounts[type][oldTopic]--;
tokensPerTopic[oldTopic]--;
// Build a distribution over topics for this token
Arrays.fill (topicWeights, 0.0);
topicWeightsSum = 0;
currentTypeTopicCounts = typeTopicCounts[type];
for (int ti = 0; ti < numTopics; ti++) {
tw = ((currentTypeTopicCounts[ti] + beta) / (tokensPerTopic[ti] + vBeta))
* ((oneDocTopicCounts[ti] + alpha)); // (/docLen-1+tAlpha); is constant across all topics
topicWeightsSum += tw;
topicWeights[ti] = tw;
}
// Sample a topic assignment from this distribution
newTopic = r.nextDiscrete (topicWeights, topicWeightsSum);
// Put that new topic into the counts
oneDocTopics[si] = newTopic;
oneDocTopicCounts[newTopic]++;
typeTopicCounts[type][newTopic]++;
tokensPerTopic[newTopic]++;
}
}
public int[][] getDocTopicCounts(){
return docTopicCounts;
}
public int[][] getTypeTopicCounts(){
return typeTopicCounts;
}
public int[] getTokensPerTopic(){
return tokensPerTopic;
}
public void printTopWords (int numWords, boolean useNewLines)
{
class WordProb implements Comparable {
int wi;
double p;
public WordProb (int wi, double p) { this.wi = wi; this.p = p; }
public final int compareTo (Object o2) {
if (p > ((WordProb)o2).p)
return -1;
else if (p == ((WordProb)o2).p)
return 0;
else return 1;
}
}
WordProb[] wp = new WordProb[numTypes];
for (int ti = 0; ti < numTopics; ti++) {
for (int wi = 0; wi < numTypes; wi++)
wp[wi] = new WordProb (wi, ((double)typeTopicCounts[wi][ti]) / tokensPerTopic[ti]);
Arrays.sort (wp);
if (useNewLines) {
System.out.println ("\nTopic "+ti);
for (int i = 0; i < numWords; i++)
System.out.println (ilist.getDataAlphabet().lookupObject(wp[i].wi).toString() + " " + wp[i].p);
} else {
System.out.print ("Topic "+ti+": ");
for (int i = 0; i < numWords; i++)
System.out.print (ilist.getDataAlphabet().lookupObject(wp[i].wi).toString() + " ");
System.out.println();
}
}
}
public void printDocumentTopics (File f) throws IOException
{
printDocumentTopics (new PrintWriter (new FileWriter (f)));
}
public void printDocumentTopics (PrintWriter pw) {
printDocumentTopics (pw, 0.0, -1);
}
public void printDocumentTopics (PrintWriter pw, double threshold, int max)
{
pw.println ("#doc source topic proportion ...");
int docLen;
double topicDist[] = new double[topics.length];
for (int di = 0; di < topics.length; di++) {
pw.print (di); pw.print (' ');
if (ilist.get(di).getSource() != null){
pw.print (ilist.get(di).getSource().toString());
}
else {
pw.print("null-source");
}
pw.print (' ');
docLen = topics[di].length;
for (int ti = 0; ti < numTopics; ti++)
topicDist[ti] = (((float)docTopicCounts[di][ti])/docLen);
if (max < 0) max = numTopics;
for (int tp = 0; tp < max; tp++) {
double maxvalue = 0;
int maxindex = -1;
for (int ti = 0; ti < numTopics; ti++)
if (topicDist[ti] > maxvalue) {
maxvalue = topicDist[ti];
maxindex = ti;
}
if (maxindex == -1 || topicDist[maxindex] < threshold)
break;
pw.print (maxindex+" "+topicDist[maxindex]+" ");
topicDist[maxindex] = 0;
}
pw.println (' ');
}
}
public void printState (File f) throws IOException
{
PrintWriter writer = new PrintWriter (new FileWriter(f));
printState (writer);
writer.close();
}
public void printState (PrintWriter pw)
{
Alphabet a = ilist.getDataAlphabet();
pw.println ("#doc pos typeindex type topic");
for (int di = 0; di < topics.length; di++) {
FeatureSequence fs = (FeatureSequence) ilist.get(di).getData();
for (int si = 0; si < topics[di].length; si++) {
int type = fs.getIndexAtPosition(si);
pw.print(di); pw.print(' ');
pw.print(si); pw.print(' ');
pw.print(type); pw.print(' ');
pw.print(a.lookupObject(type)); pw.print(' ');
pw.print(topics[di][si]); pw.println();
}
}
}
public void write (File f) {
try {
ObjectOutputStream oos = new ObjectOutputStream (new FileOutputStream(f));
oos.writeObject(this);
oos.close();
}
catch (IOException e) {
System.err.println("Exception writing file " + f + ": " + e);
}
}
// Serialization
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeObject (ilist);
out.writeInt (numTopics);
out.writeDouble (alpha);
out.writeDouble (beta);
out.writeDouble (tAlpha);
out.writeDouble (vBeta);
for (int di = 0; di < topics.length; di ++)
for (int si = 0; si < topics[di].length; si++)
out.writeInt (topics[di][si]);
for (int di = 0; di < topics.length; di ++)
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (docTopicCounts[di][ti]);
for (int fi = 0; fi < numTypes; fi++)
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (typeTopicCounts[fi][ti]);
for (int ti = 0; ti < numTopics; ti++)
out.writeInt (tokensPerTopic[ti]);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int featuresLength;
int version = in.readInt ();
ilist = (InstanceList) in.readObject ();
numTopics = in.readInt();
alpha = in.readDouble();
beta = in.readDouble();
tAlpha = in.readDouble();
vBeta = in.readDouble();
int numDocs = ilist.size();
topics = new int[numDocs][];
for (int di = 0; di < ilist.size(); di++) {
int docLen = ((FeatureSequence)ilist.get(di).getData()).getLength();
topics[di] = new int[docLen];
for (int si = 0; si < docLen; si++)
topics[di][si] = in.readInt();
}
docTopicCounts = new int[numDocs][numTopics];
for (int di = 0; di < ilist.size(); di++)
for (int ti = 0; ti < numTopics; ti++)
docTopicCounts[di][ti] = in.readInt();
int numTypes = ilist.getDataAlphabet().size();
typeTopicCounts = new int[numTypes][numTopics];
for (int fi = 0; fi < numTypes; fi++)
for (int ti = 0; ti < numTopics; ti++)
typeTopicCounts[fi][ti] = in.readInt();
tokensPerTopic = new int[numTopics];
for (int ti = 0; ti < numTopics; ti++)
tokensPerTopic[ti] = in.readInt();
}
public InstanceList getInstanceList ()
{
return ilist;
}
// Recommended to use mallet/bin/vectors2topics instead.
public static void main (String[] args) throws IOException
{
InstanceList ilist = InstanceList.load (new File(args[0]));
int numIterations = args.length > 1 ? Integer.parseInt(args[1]) : 1000;
int numTopWords = args.length > 2 ? Integer.parseInt(args[2]) : 20;
System.out.println ("Data loaded.");
LDA lda = new LDA (10);
lda.estimate (ilist, numIterations, 50, 0, null, new Randoms()); // should be 1100
lda.printTopWords (numTopWords, true);
lda.printDocumentTopics (new File(args[0]+".lda"));
}
}
| 17,528 | 34.920082 | 119 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/TopicAssignment.java
|
package cc.mallet.topics;
import java.io.Serializable;
import cc.mallet.types.*;
/** This class combines a sequence of observed features
* with a sequence of hidden "labels".
*/
public class TopicAssignment implements Serializable {
public Instance instance;
public LabelSequence topicSequence;
public Labeling topicDistribution;
public TopicAssignment (Instance instance, LabelSequence topicSequence) {
this.instance = instance;
this.topicSequence = topicSequence;
}
}
| 503 | 24.2 | 74 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/HierarchicalPAM.java
|
package cc.mallet.topics;
import cc.mallet.types.*;
import cc.mallet.util.*;
import java.util.Arrays;
import java.io.*;
import java.text.NumberFormat;
import java.util.logging.*;
/**
* Hierarchical PAM, where each node in the DAG has a distribution over all topics on the
* next level and one additional "node-specific" topic.
* @author David Mimno
*/
public class HierarchicalPAM {
protected static Logger logger = MalletLogger.getLogger(HierarchicalPAM.class.getName());
static CommandOption.String inputFile = new CommandOption.String
(HierarchicalPAM.class, "input", "FILENAME", true, null,
"The filename from which to read the list of training instances. Use - for stdin. " +
"The instances must be FeatureSequence or FeatureSequenceWithBigrams, not FeatureVector", null);
static CommandOption.String stateFile = new CommandOption.String
(HierarchicalPAM.class, "output-state", "FILENAME", true, null,
"The filename in which to write the Gibbs sampling state after at the end of the iterations. " +
"By default this is null, indicating that no file will be written.", null);
static CommandOption.Double superTopicBalanceOption = new CommandOption.Double
(HierarchicalPAM.class, "super-topic-balance", "DECIMAL", true, 1.0,
"Weight (in \"words\") of the shared distribution over super-topics, relative to the document-specific distribution", null);
static CommandOption.Double subTopicBalanceOption = new CommandOption.Double
(HierarchicalPAM.class, "sub-topic-balance", "DECIMAL", true, 1.0,
"Weight (in \"words\") of the shared distribution over sub-topics for each super-topic, relative to the document-specific distribution", null);
static CommandOption.Integer numSuperTopicsOption = new CommandOption.Integer
(HierarchicalPAM.class, "num-super-topics", "INTEGER", true, 10,
"The number of super-topics", null);
static CommandOption.Integer numSubTopicsOption = new CommandOption.Integer
(HierarchicalPAM.class, "num-sub-topics", "INTEGER", true, 20,
"The number of sub-topics", null);
public static final int NUM_LEVELS = 3;
// Constants to determine the level of the output multinomial
public static final int ROOT_TOPIC = 0;
public static final int SUPER_TOPIC = 1;
public static final int SUB_TOPIC = 2;
// Parameters
int numSuperTopics; // Number of topics to be fit
int numSubTopics;
// This parameter controls the balance between
// the local document counts and the global distribution
// over super-topics.
double superTopicBalance = 1.0;
// This parameter is the smoothing on that global distribution
double superTopicSmoothing = 1.0;
// ... and the same for sub-topics.
double subTopicBalance = 1.0;
double subTopicSmoothing = 1.0;
// Prior on per-topic multinomial distribution over words
double beta;
double betaSum;
// Data
InstanceList instances; // the data field of the instances is expected to hold a FeatureSequence
int numTypes;
int numTokens;
// Gibbs sampling state
// (these could be shorts, or we could encode both in one int)
int[][] superTopics; // indexed by <document index, sequence index>
int[][] subTopics; // indexed by <document index, sequence index>
// Per-document state variables
int[][] superSubCounts; // # of words per <super, sub>
int[] superCounts; // # of words per <super>
double[] superWeights; // the component of the Gibbs update that depends on super-topics
double[] subWeights; // the component of the Gibbs update that depends on sub-topics
double[][] superSubWeights; // unnormalized sampling distribution
double[] cumulativeSuperWeights; // a cache of the cumulative weight for each super-topic
// Document frequencies used for "minimal path" hierarchical Dirichlets
int[] superTopicDocumentFrequencies;
int[][] superSubTopicDocumentFrequencies;
// ... and their sums
int sumDocumentFrequencies;
int[] sumSuperTopicDocumentFrequencies;
// [Note that this last is not the same as superTopicDocumentFrequencies]
// Cached priors
double[] superTopicPriorWeights;
double[][] superSubTopicPriorWeights;
// Per-word type state variables
int[][] typeTopicCounts; // indexed by <feature index, topic index>
int[] tokensPerTopic; // indexed by <topic index>
int[] tokensPerSuperTopic; // indexed by <topic index>
int[][] tokensPerSuperSubTopic;
Runtime runtime;
NumberFormat formatter;
public HierarchicalPAM (int superTopics, int subTopics, double superTopicBalance, double subTopicBalance) {
formatter = NumberFormat.getInstance();
formatter.setMaximumFractionDigits(5);
this.superTopicBalance = superTopicBalance;
this.subTopicBalance = subTopicBalance;
this.numSuperTopics = superTopics;
this.numSubTopics = subTopics;
superTopicDocumentFrequencies = new int[numSuperTopics + 1];
superSubTopicDocumentFrequencies = new int[numSuperTopics + 1][numSubTopics + 1];
sumSuperTopicDocumentFrequencies = new int[numSuperTopics];
this.beta = 0.01; // We can't calculate betaSum until we know how many word types...
runtime = Runtime.getRuntime();
}
public void estimate (InstanceList documents, InstanceList testing,
int numIterations, int showTopicsInterval,
int outputModelInterval, int optimizeInterval, String outputModelFilename,
Randoms r) {
instances = documents;
numTypes = instances.getDataAlphabet().size ();
int numDocs = instances.size();
superTopics = new int[numDocs][];
subTopics = new int[numDocs][];
// Allocate several arrays for use within each document
// to cut down memory allocation and garbage collection time
superSubCounts = new int[numSuperTopics + 1][numSubTopics + 1];
superCounts = new int[numSuperTopics + 1];
superWeights = new double[numSuperTopics + 1];
subWeights = new double[numSubTopics];
superSubWeights = new double[numSuperTopics + 1][numSubTopics + 1];
cumulativeSuperWeights = new double[numSuperTopics];
typeTopicCounts = new int[numTypes][1 + numSuperTopics + numSubTopics];
tokensPerTopic = new int[1 + numSuperTopics + numSubTopics];
tokensPerSuperTopic = new int[numSuperTopics + 1];
tokensPerSuperSubTopic = new int[numSuperTopics + 1][numSubTopics + 1];
betaSum = beta * numTypes;
long startTime = System.currentTimeMillis();
int maxTokens = 0;
// Initialize with random assignments of tokens to topics
// and finish allocating this.topics and this.tokens
int superTopic, subTopic, seqLen;
for (int doc = 0; doc < numDocs; doc++) {
int[] localTokensPerSuperTopic = new int[numSuperTopics + 1];
int[][] localTokensPerSuperSubTopic = new int[numSuperTopics + 1][numSubTopics + 1];
FeatureSequence fs = (FeatureSequence) instances.get(doc).getData();
seqLen = fs.getLength();
if (seqLen > maxTokens) {
maxTokens = seqLen;
}
numTokens += seqLen;
superTopics[doc] = new int[seqLen];
subTopics[doc] = new int[seqLen];
// Randomly assign tokens to topics
for (int position = 0; position < seqLen; position++) {
// Random super-topic
superTopic = r.nextInt(numSuperTopics);
// Random sub-topic
subTopic = r.nextInt(numSubTopics);
int level = r.nextInt(NUM_LEVELS);
if (level == ROOT_TOPIC) {
superTopics[doc][position] = numSuperTopics;
subTopics[doc][position] = numSubTopics;
typeTopicCounts[ fs.getIndexAtPosition(position) ][0]++;
tokensPerTopic[0]++;
tokensPerSuperTopic[numSuperTopics]++;
tokensPerSuperSubTopic[numSuperTopics][numSubTopics]++;
if (localTokensPerSuperTopic[numSuperTopics] == 0) {
superTopicDocumentFrequencies[numSuperTopics]++;
sumDocumentFrequencies++;
}
localTokensPerSuperTopic[numSuperTopics]++;
}
else if (level == SUPER_TOPIC) {
superTopics[doc][position] = superTopic;
subTopics[doc][position] = numSubTopics;
typeTopicCounts[ fs.getIndexAtPosition(position) ][1 + superTopic]++;
tokensPerTopic[1 + superTopic]++;
tokensPerSuperTopic[superTopic]++;
tokensPerSuperSubTopic[superTopic][numSubTopics]++;
if (localTokensPerSuperTopic[superTopic] == 0) {
superTopicDocumentFrequencies[superTopic]++;
sumDocumentFrequencies++;
}
localTokensPerSuperTopic[superTopic]++;
if (localTokensPerSuperSubTopic[superTopic][numSubTopics] == 0) {
superSubTopicDocumentFrequencies[superTopic][numSubTopics]++;
sumSuperTopicDocumentFrequencies[superTopic]++;
}
localTokensPerSuperSubTopic[superTopic][numSubTopics]++;
}
else {
superTopics[doc][position] = superTopic;
subTopics[doc][position] = subTopic;
typeTopicCounts[ fs.getIndexAtPosition(position) ][ 1 + numSuperTopics + subTopic]++;
tokensPerTopic[1 + numSuperTopics + subTopic]++;
tokensPerSuperTopic[superTopic]++;
tokensPerSuperSubTopic[superTopic][subTopic]++;
if (localTokensPerSuperTopic[superTopic] == 0) {
superTopicDocumentFrequencies[superTopic]++;
sumDocumentFrequencies++;
}
localTokensPerSuperTopic[superTopic]++;
if (localTokensPerSuperSubTopic[superTopic][subTopic] == 0) {
superSubTopicDocumentFrequencies[superTopic][subTopic]++;
sumSuperTopicDocumentFrequencies[superTopic]++;
}
localTokensPerSuperSubTopic[superTopic][subTopic]++;
}
}
}
// Initialize cached priors
superTopicPriorWeights = new double[ numSuperTopics + 1 ];
superSubTopicPriorWeights = new double[ numSuperTopics ][ numSubTopics + 1 ];
cacheSuperTopicPrior();
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
cacheSuperSubTopicPrior(superTopic);
}
// Finally, start the sampler!
for (int iterations = 1; iterations < numIterations; iterations++) {
long iterationStart = System.currentTimeMillis();
// Loop over every word in the corpus
for (int doc = 0; doc < superTopics.length; doc++) {
sampleTopicsForOneDoc ((FeatureSequence)instances.get(doc).getData(),
superTopics[doc], subTopics[doc], r);
}
if (showTopicsInterval != 0 && iterations % showTopicsInterval == 0) {
logger.info( printTopWords(8, false) );
}
logger.fine((System.currentTimeMillis() - iterationStart) + " ");
if (iterations % 10 == 0) {
logger.info ("<" + iterations + "> LL: " + formatter.format(modelLogLikelihood() / numTokens));
}
}
}
private void cacheSuperTopicPrior() {
for (int superTopic = 0; superTopic < numSuperTopics; superTopic++) {
superTopicPriorWeights[superTopic] =
(superTopicDocumentFrequencies[superTopic] + superTopicSmoothing) /
(sumDocumentFrequencies + (numSuperTopics + 1) * superTopicSmoothing);
}
superTopicPriorWeights[numSuperTopics] =
(superTopicDocumentFrequencies[numSuperTopics] + superTopicSmoothing) /
(sumDocumentFrequencies + (numSuperTopics + 1) * superTopicSmoothing);
}
private void cacheSuperSubTopicPrior(int superTopic) {
int[] documentFrequencies = superSubTopicDocumentFrequencies[superTopic];
for (int subTopic = 0; subTopic < numSubTopics; subTopic++) {
superSubTopicPriorWeights[superTopic][subTopic] =
(documentFrequencies[subTopic] + subTopicSmoothing ) /
(sumSuperTopicDocumentFrequencies[superTopic] + (numSubTopics + 1) * subTopicSmoothing);
}
superSubTopicPriorWeights[superTopic][numSubTopics] =
(documentFrequencies[numSubTopics] + subTopicSmoothing ) /
(sumSuperTopicDocumentFrequencies[superTopic] + (numSubTopics + 1) * subTopicSmoothing);
}
private void sampleTopicsForOneDoc (FeatureSequence oneDocTokens,
int[] superTopics, // indexed by seq position
int[] subTopics,
Randoms r) {
//long startTime = System.currentTimeMillis();
int[] currentTypeTopicCounts;
int[] currentSuperSubCounts;
double[] currentSuperSubWeights;
double[] wordWeights = new double[ 1 + numSuperTopics + numSubTopics ];
int type, subTopic, superTopic;
double rootWeight, currentSuperWeight, cumulativeWeight, sample;
int docLen = oneDocTokens.getLength();
Arrays.fill(superCounts, 0);
for (int t = 0; t < numSuperTopics; t++) {
Arrays.fill(superSubCounts[t], 0);
}
// populate topic counts
for (int position = 0; position < docLen; position++) {
superSubCounts[ superTopics[position] ][ subTopics[position] ]++;
superCounts[ superTopics[position] ]++;
}
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
superWeights[superTopic] =
((double) superCounts[superTopic] +
(superTopicBalance * superTopicPriorWeights[superTopic])) /
((double) superCounts[superTopic] + subTopicBalance);
assert(superWeights[superTopic] != 0.0);
}
// Iterate over the positions (words) in the document
for (int position = 0; position < docLen; position++) {
type = oneDocTokens.getIndexAtPosition(position);
currentTypeTopicCounts = typeTopicCounts[type];
superTopic = superTopics[position];
subTopic = subTopics[position];
if (superTopic == numSuperTopics) {
currentTypeTopicCounts[ 0 ]--;
tokensPerTopic[ 0 ]--;
}
else if (subTopic == numSubTopics) {
currentTypeTopicCounts[ 1 + superTopic ]--;
tokensPerTopic[ 1 + superTopic ]--;
}
else {
currentTypeTopicCounts[ 1 + numSuperTopics + subTopic ]--;
tokensPerTopic[ 1 + numSuperTopics + subTopic ]--;
}
// Remove this token from all counts
superCounts[superTopic]--;
superSubCounts[superTopic][subTopic]--;
if (superCounts[superTopic] == 0) {
// The document frequencies have changed.
// Decrement and recalculate the prior weights
superTopicDocumentFrequencies[superTopic]--;
sumDocumentFrequencies--;
cacheSuperTopicPrior();
}
if (superTopic != numSuperTopics &&
superSubCounts[superTopic][subTopic] == 0) {
superSubTopicDocumentFrequencies[superTopic][subTopic]--;
sumSuperTopicDocumentFrequencies[superTopic]--;
cacheSuperSubTopicPrior(superTopic);
}
tokensPerSuperTopic[superTopic]--;
tokensPerSuperSubTopic[superTopic][subTopic]--;
// Update the super-topic weight for the old topic.
superWeights[superTopic] =
((double) superCounts[superTopic] +
(superTopicBalance * superTopicPriorWeights[superTopic])) /
((double) superCounts[superTopic] + subTopicBalance);
// Build a distribution over super-sub topic pairs
// for this token
for (int i=0; i<wordWeights.length; i++) {
wordWeights[i] =
(beta + currentTypeTopicCounts[i]) /
(betaSum + tokensPerTopic[i]);
assert(wordWeights[i] != 0);
}
Arrays.fill(cumulativeSuperWeights, 0.0);
// The conditional probability of each super-sub pair is proportional
// to an expression with three parts, one that depends only on the
// super-topic, one that depends only on the sub-topic and the word type,
// and one that depends on the super-sub pair.
cumulativeWeight = 0.0;
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
currentSuperSubWeights = superSubWeights[superTopic];
currentSuperSubCounts = superSubCounts[superTopic];
currentSuperWeight = superWeights[superTopic];
int[] documentFrequencies = superSubTopicDocumentFrequencies[superTopic];
double[] priorCache = superSubTopicPriorWeights[superTopic];
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
currentSuperSubWeights[subTopic] =
currentSuperWeight *
wordWeights[ 1 + numSuperTopics + subTopic ] *
((double) currentSuperSubCounts[subTopic] +
( subTopicBalance * priorCache[subTopic] ));
cumulativeWeight += currentSuperSubWeights[subTopic];
}
currentSuperSubWeights[numSubTopics] =
currentSuperWeight *
wordWeights[1 + superTopic] *
((double) currentSuperSubCounts[numSubTopics] +
( subTopicBalance * priorCache[numSubTopics] ));
cumulativeWeight += currentSuperSubWeights[numSubTopics];
cumulativeSuperWeights[superTopic] = cumulativeWeight;
assert(cumulativeSuperWeights[superTopic] != 0.0);
}
rootWeight = wordWeights[0] *
(superCounts[numSuperTopics] +
(superTopicBalance * superTopicPriorWeights[numSuperTopics]));
// Sample a topic assignment from this distribution
sample = r.nextUniform() * (cumulativeWeight + rootWeight);
if (sample > cumulativeWeight) {
// We picked the root topic
currentTypeTopicCounts[ 0 ]++;
tokensPerTopic[ 0 ] ++;
superTopic = numSuperTopics;
subTopic = numSubTopics;
}
else {
// Go over the row sums to find the super-topic...
superTopic = 0;
while (sample > cumulativeSuperWeights[superTopic]) {
superTopic++;
}
// Now read across to find the sub-topic
currentSuperSubWeights = superSubWeights[superTopic];
cumulativeWeight = cumulativeSuperWeights[superTopic];
// Go over each sub-topic until the weight is LESS than
// the sample. Note that we're subtracting weights
// in the same order we added them...
subTopic = 0;
cumulativeWeight -= currentSuperSubWeights[0];
while (sample < cumulativeWeight) {
subTopic++;
cumulativeWeight -= currentSuperSubWeights[subTopic];
}
if (subTopic == numSubTopics) {
currentTypeTopicCounts[ 1 + superTopic ]++;
tokensPerTopic[ 1 + superTopic ]++;
}
else {
currentTypeTopicCounts[ 1 + numSuperTopics + subTopic ]++;
tokensPerTopic[ 1 + numSuperTopics + subTopic ]++;
}
}
// Save the choice into the Gibbs state
superTopics[position] = superTopic;
subTopics[position] = subTopic;
// Put the new super/sub topics into the counts
superSubCounts[superTopic][subTopic]++;
superCounts[superTopic]++;
if (superCounts[superTopic] == 1) {
superTopicDocumentFrequencies[superTopic]++;
sumDocumentFrequencies++;
cacheSuperTopicPrior();
}
if (superTopic != numSuperTopics &&
superSubCounts[superTopic][subTopic] == 1) {
superSubTopicDocumentFrequencies[superTopic][subTopic]++;
sumSuperTopicDocumentFrequencies[superTopic]++;
cacheSuperSubTopicPrior(superTopic);
}
tokensPerSuperTopic[superTopic]++;
tokensPerSuperSubTopic[superTopic][subTopic]++;
// Update the weight for the new super topic
superWeights[superTopic] =
((double) superCounts[superTopic] +
(superTopicBalance * superTopicPriorWeights[superTopic])) /
((double) superCounts[superTopic] + subTopicBalance);
}
}
public String printTopWords (int numWords, boolean useNewLines) {
StringBuilder output = new StringBuilder();
IDSorter[] sortedTypes = new IDSorter[numTypes];
IDSorter[] sortedSubTopics = new IDSorter[numSubTopics];
String[] topicTerms = new String[1 + numSuperTopics + numSubTopics];
int subTopic, superTopic;
for (int topic = 0; topic < topicTerms.length; topic++) {
for (int type = 0; type < numTypes; type++)
sortedTypes[type] = new IDSorter (type,
(((double) typeTopicCounts[type][topic]) /
tokensPerTopic[topic]));
Arrays.sort (sortedTypes);
StringBuilder terms = new StringBuilder();
for (int i = 0; i < numWords; i++) {
terms.append(instances.getDataAlphabet().lookupObject(sortedTypes[i].getID()));
terms.append(" ");
}
topicTerms[topic] = terms.toString();
}
int maxSubTopics = 10;
if (numSubTopics < 10) { maxSubTopics = numSubTopics; }
output.append("Root: " + "[" + tokensPerSuperTopic[numSuperTopics] + "/" +
superTopicDocumentFrequencies[numSuperTopics] + "]" +
topicTerms[0] + "\n");
for (superTopic = 0; superTopic < numSuperTopics; superTopic++) {
for (subTopic = 0; subTopic < numSubTopics; subTopic++) {
sortedSubTopics[subTopic] =
new IDSorter(subTopic, tokensPerSuperSubTopic[superTopic][subTopic]);
}
Arrays.sort(sortedSubTopics);
output.append("\nSuper-topic " + superTopic +
" [" + tokensPerSuperTopic[superTopic] + "/" +
superTopicDocumentFrequencies[superTopic] + " " +
tokensPerSuperSubTopic[superTopic][numSubTopics] + "/" +
superSubTopicDocumentFrequencies[superTopic][numSubTopics] + "]\t" +
topicTerms[1 + superTopic] + "\n");
for (int i = 0; i < maxSubTopics; i++) {
subTopic = sortedSubTopics[i].getID();
output.append(subTopic + ":\t" +
tokensPerSuperSubTopic[superTopic][subTopic] + "/" +
formatter.format(superSubTopicDocumentFrequencies[superTopic][subTopic]) + "\t" +
topicTerms[1 + numSuperTopics + subTopic] + "\n");
}
}
return output.toString();
}
public void printState (File f) throws IOException {
PrintWriter out = new PrintWriter (new BufferedWriter (new FileWriter(f)));
printState (out);
out.close();
}
public void printState (PrintWriter out) {
Alphabet alphabet = instances.getDataAlphabet();
out.println ("#doc pos typeindex type super-topic sub-topic");
for (int doc = 0; doc < superTopics.length; doc++) {
StringBuilder output = new StringBuilder();
FeatureSequence fs = (FeatureSequence) instances.get(doc).getData();
for (int position = 0; position < superTopics[doc].length; position++) {
int type = fs.getIndexAtPosition(position);
output.append(doc); output.append(' ');
output.append(position); output.append(' ');
output.append(type); output.append(' ');
output.append(alphabet.lookupObject(type)); output.append(' ');
output.append(superTopics[doc][position]); output.append(' ');
output.append(subTopics[doc][position]); output.append("\n");
}
out.print(output);
}
}
public double modelLogLikelihood() {
double logLikelihood = 0.0;
int nonZeroTopics;
// The likelihood of the model is a combination of a
// Dirichlet-multinomial for the words in each topic
// and a Dirichlet-multinomial for the topics in each
// document.
// The likelihood function of a dirichlet multinomial is
// Gamma( sum_i alpha_i ) prod_i Gamma( alpha_i + N_i )
// prod_i Gamma( alpha_i ) Gamma( sum_i (alpha_i + N_i) )
// So the log likelihood is
// logGamma ( sum_i alpha_i ) - logGamma ( sum_i (alpha_i + N_i) ) +
// sum_i [ logGamma( alpha_i + N_i) - logGamma( alpha_i ) ]
// Do the documents first
int superTopic, subTopic;
double[] superTopicLogGammas = new double[numSuperTopics + 1];
double[][] superSubTopicLogGammas = new double[numSuperTopics][numSubTopics + 1];
for (superTopic=0; superTopic < numSuperTopics; superTopic++) {
superTopicLogGammas[ superTopic ] = Dirichlet.logGamma(superTopicPriorWeights[superTopic]);
for (subTopic=0; subTopic < numSubTopics; subTopic++) {
superSubTopicLogGammas[ superTopic ][ subTopic ] =
Dirichlet.logGamma(subTopicBalance * superSubTopicPriorWeights[superTopic][subTopic]);
}
superSubTopicLogGammas[ superTopic ][ numSubTopics ] =
Dirichlet.logGamma(subTopicBalance * superSubTopicPriorWeights[superTopic][numSubTopics]);
}
superTopicLogGammas[ numSuperTopics ] = Dirichlet.logGamma(superTopicPriorWeights[numSuperTopics]);
int[] superTopicCounts = new int[ numSuperTopics + 1];
int[][] superSubTopicCounts = new int[ numSuperTopics ][ numSubTopics + 1];
int[] docSuperTopics;
int[] docSubTopics;
for (int doc=0; doc < superTopics.length; doc++) {
docSuperTopics = superTopics[doc];
docSubTopics = subTopics[doc];
for (int token=0; token < docSuperTopics.length; token++) {
superTopic = docSuperTopics[ token ];
subTopic = docSubTopics[ token ];
superTopicCounts[ superTopic ]++;
if (superTopic != numSuperTopics) {
superSubTopicCounts[ superTopic ][ subTopic ]++;
}
}
for (superTopic=0; superTopic < numSuperTopics; superTopic++) {
if (superTopicCounts[superTopic] > 0) {
logLikelihood += (Dirichlet.logGamma(superTopicBalance * superTopicPriorWeights[superTopic] +
superTopicCounts[superTopic]) -
superTopicLogGammas[ superTopic ]);
for (subTopic=0; subTopic < numSubTopics; subTopic++) {
if (superSubTopicCounts[superTopic][subTopic] > 0) {
logLikelihood += (Dirichlet.logGamma(subTopicBalance * superSubTopicPriorWeights[superTopic][subTopic] +
superSubTopicCounts[superTopic][subTopic]) -
superSubTopicLogGammas[ superTopic ][ subTopic ]);
}
}
// Account for words assigned to super-topic
logLikelihood += (Dirichlet.logGamma(subTopicBalance * superSubTopicPriorWeights[superTopic][numSubTopics] +
superSubTopicCounts[superTopic][numSubTopics]) -
superSubTopicLogGammas[ superTopic ][ numSubTopics ]);
// The term for the sums
logLikelihood +=
Dirichlet.logGamma(subTopicBalance) -
Dirichlet.logGamma(subTopicBalance + superTopicCounts[superTopic]);
Arrays.fill(superSubTopicCounts[superTopic], 0);
}
}
// Account for words assigned to the root topic
logLikelihood += (Dirichlet.logGamma(superTopicBalance * superTopicPriorWeights[numSuperTopics] +
superTopicCounts[numSuperTopics]) -
superTopicLogGammas[ numSuperTopics ]);
// subtract the (count + parameter) sum term
logLikelihood -= Dirichlet.logGamma(superTopicBalance + docSuperTopics.length);
Arrays.fill(superTopicCounts, 0);
}
// add the parameter sum term for every document all at once.
logLikelihood += superTopics.length * Dirichlet.logGamma(superTopicBalance);
// And the topics
// Count the number of type-topic pairs
int nonZeroTypeTopics = 0;
for (int type=0; type < numTypes; type++) {
// reuse this array as a pointer
int[] topicCounts = typeTopicCounts[type];
for (int topic=0; topic < numSuperTopics + numSubTopics + 1; topic++) {
if (topicCounts[topic] > 0) {
nonZeroTypeTopics++;
logLikelihood += Dirichlet.logGamma(beta + topicCounts[topic]);
}
}
}
for (int topic=0; topic < numSuperTopics + numSubTopics + 1; topic++) {
logLikelihood -=
Dirichlet.logGamma( (beta * (numSuperTopics + numSubTopics + 1)) +
tokensPerTopic[ topic ] );
}
logLikelihood +=
(Dirichlet.logGamma(beta * (numSuperTopics + numSubTopics + 1))) -
(Dirichlet.logGamma(beta) * nonZeroTypeTopics);
return logLikelihood;
}
public static void main (String[] args) throws IOException {
CommandOption.setSummary(HierarchicalPAM.class, "Train a three level hierarchy of topics");
CommandOption.process(HierarchicalPAM.class, args);
InstanceList instances = InstanceList.load (new File(inputFile.value));
InstanceList testing = null;
HierarchicalPAM pam = new HierarchicalPAM (numSuperTopicsOption.value, numSubTopicsOption.value,
superTopicBalanceOption.value, subTopicBalanceOption.value);
pam.estimate (instances, testing, 1000, 100, 0, 250, null, new Randoms());
if (stateFile.wasInvoked()) {
pam.printState(new File(stateFile.value));
}
}
}
| 27,997 | 35.408322 | 146 |
java
|
twitter_nlp
|
twitter_nlp-master/mallet-2.0.6/src/cc/mallet/topics/MarginalProbEstimator.java
|
/* Copyright (C) 2005 Univ. of Massachusetts Amherst, Computer Science Dept.
This file is part of "MALLET" (MAchine Learning for LanguagE Toolkit).
http://www.cs.umass.edu/~mccallum/mallet
This software is provided under the terms of the Common Public License,
version 1.0, as published by http://www.opensource.org. For further
information, see the file `LICENSE' included with this distribution. */
package cc.mallet.topics;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.zip.*;
import java.io.*;
import java.text.NumberFormat;
import cc.mallet.types.*;
import cc.mallet.util.Randoms;
/**
* An implementation of topic model marginal probability estimators
* presented in Wallach et al., "Evaluation Methods for Topic Models", ICML (2009)
*
* @author David Mimno
*/
public class MarginalProbEstimator implements Serializable {
protected int numTopics; // Number of topics to be fit
// These values are used to encode type/topic counts as
// count/topic pairs in a single int.
protected int topicMask;
protected int topicBits;
protected double[] alpha; // Dirichlet(alpha,alpha,...) is the distribution over topics
protected double alphaSum;
protected double beta; // Prior on per-topic multinomial distribution over words
protected double betaSum;
protected double smoothingOnlyMass = 0.0;
protected double[] cachedCoefficients;
protected int[][] typeTopicCounts; // indexed by <feature index, topic index>
protected int[] tokensPerTopic; // indexed by <topic index>
protected Randoms random;
public MarginalProbEstimator (int numTopics,
double[] alpha, double alphaSum,
double beta,
int[][] typeTopicCounts,
int[] tokensPerTopic) {
this.numTopics = numTopics;
if (Integer.bitCount(numTopics) == 1) {
// exact power of 2
topicMask = numTopics - 1;
topicBits = Integer.bitCount(topicMask);
}
else {
// otherwise add an extra bit
topicMask = Integer.highestOneBit(numTopics) * 2 - 1;
topicBits = Integer.bitCount(topicMask);
}
this.typeTopicCounts = typeTopicCounts;
this.tokensPerTopic = tokensPerTopic;
this.alphaSum = alphaSum;
this.alpha = alpha;
this.beta = beta;
this.betaSum = beta * typeTopicCounts.length;
this.random = new Randoms();
cachedCoefficients = new double[ numTopics ];
// Initialize the smoothing-only sampling bucket
smoothingOnlyMass = 0;
// Initialize the cached coefficients, using only smoothing.
// These values will be selectively replaced in documents with
// non-zero counts in particular topics.
for (int topic=0; topic < numTopics; topic++) {
smoothingOnlyMass += alpha[topic] * beta / (tokensPerTopic[topic] + betaSum);
cachedCoefficients[topic] = alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
System.err.println("Topic Evaluator: " + numTopics + " topics, " + topicBits + " topic bits, " +
Integer.toBinaryString(topicMask) + " topic mask");
}
public int[] getTokensPerTopic() { return tokensPerTopic; }
public int[][] getTypeTopicCounts() { return typeTopicCounts; }
public double evaluateLeftToRight (InstanceList testing, int numParticles, boolean usingResampling,
PrintStream docProbabilityStream) {
random = new Randoms();
double logNumParticles = Math.log(numParticles);
double totalLogLikelihood = 0;
for (Instance instance : testing) {
FeatureSequence tokenSequence = (FeatureSequence) instance.getData();
double docLogLikelihood = 0;
double[][] particleProbabilities = new double[ numParticles ][];
for (int particle = 0; particle < numParticles; particle++) {
particleProbabilities[particle] =
leftToRight(tokenSequence, usingResampling);
}
for (int position = 0; position < particleProbabilities[0].length; position++) {
double sum = 0;
for (int particle = 0; particle < numParticles; particle++) {
sum += particleProbabilities[particle][position];
}
if (sum > 0.0) {
docLogLikelihood += Math.log(sum) - logNumParticles;
}
}
if (docProbabilityStream != null) {
docProbabilityStream.println(docLogLikelihood);
}
totalLogLikelihood += docLogLikelihood;
}
return totalLogLikelihood;
}
protected double[] leftToRight (FeatureSequence tokenSequence, boolean usingResampling) {
int[] oneDocTopics = new int[tokenSequence.getLength()];
double[] wordProbabilities = new double[tokenSequence.getLength()];
int[] currentTypeTopicCounts;
int type, oldTopic, newTopic;
double topicWeightsSum;
int docLength = tokenSequence.getLength();
// Keep track of the number of tokens we've examined, not
// including out-of-vocabulary words
int tokensSoFar = 0;
int[] localTopicCounts = new int[numTopics];
int[] localTopicIndex = new int[numTopics];
// Build an array that densely lists the topics that
// have non-zero counts.
int denseIndex = 0;
// Record the total number of non-zero topics
int nonZeroTopics = denseIndex;
// Initialize the topic count/beta sampling bucket
double topicBetaMass = 0.0;
double topicTermMass = 0.0;
double[] topicTermScores = new double[numTopics];
int[] topicTermIndices;
int[] topicTermValues;
int i;
double score;
double logLikelihood = 0;
// All counts are now zero, we are starting completely fresh.
// Iterate over the positions (words) in the document
for (int limit = 0; limit < docLength; limit++) {
// Record the marginal probability of the token
// at the current limit, summed over all topics.
if (usingResampling) {
// Iterate up to the current limit
for (int position = 0; position < limit; position++) {
type = tokenSequence.getIndexAtPosition(position);
oldTopic = oneDocTopics[position];
// Check for out-of-vocabulary words
if (type >= typeTopicCounts.length ||
typeTopicCounts[type] == null) {
continue;
}
currentTypeTopicCounts = typeTopicCounts[type];
// Remove this token from all counts.
// Remove this topic's contribution to the
// normalizing constants.
// Note that we are using clamped estimates of P(w|t),
// so we are NOT changing smoothingOnlyMass.
topicBetaMass -= beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Decrement the local doc/topic counts
localTopicCounts[oldTopic]--;
// Maintain the dense index, if we are deleting
// the old topic
if (localTopicCounts[oldTopic] == 0) {
// First get to the dense location associated with
// the old topic.
denseIndex = 0;
// We know it's in there somewhere, so we don't
// need bounds checking.
while (localTopicIndex[denseIndex] != oldTopic) {
denseIndex++;
}
// shift all remaining dense indices to the left.
while (denseIndex < nonZeroTopics) {
if (denseIndex < localTopicIndex.length - 1) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex + 1];
}
denseIndex++;
}
nonZeroTopics --;
}
// Add the old topic's contribution back into the
// normalizing constants.
topicBetaMass += beta * localTopicCounts[oldTopic] /
(tokensPerTopic[oldTopic] + betaSum);
// Reset the cached coefficient for this topic
cachedCoefficients[oldTopic] =
(alpha[oldTopic] + localTopicCounts[oldTopic]) /
(tokensPerTopic[oldTopic] + betaSum);
// Now go over the type/topic counts, calculating the score
// for each topic.
int index = 0;
int currentTopic, currentValue;
boolean alreadyDecremented = false;
topicTermMass = 0.0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
score =
cachedCoefficients[currentTopic] * currentValue;
topicTermMass += score;
topicTermScores[index] = score;
index++;
}
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = currentTypeTopicCounts[i] & topicMask;
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
sample -= localTopicCounts[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
else {
//smoothingOnlyCount++;
sample -= topicBetaMass;
sample /= beta;
newTopic = 0;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
while (sample > 0.0) {
newTopic++;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
}
if (newTopic == -1) {
System.err.println("sampling error: "+ origSample + " " + sample + " " + smoothingOnlyMass + " " +
topicBetaMass + " " + topicTermMass);
newTopic = numTopics-1; // TODO is this appropriate
//throw new IllegalStateException ("WorkerRunnable: New topic not sampled.");
}
//assert(newTopic != -1);
// Put that new topic into the counts
oneDocTopics[position] = newTopic;
topicBetaMass -= beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts[newTopic]++;
// If this is a new topic for this document,
// add the topic to the dense index.
if (localTopicCounts[newTopic] == 1) {
// First find the point where we
// should insert the new topic by going to
// the end (which is the only reason we're keeping
// track of the number of non-zero
// topics) and working backwards
denseIndex = nonZeroTopics;
while (denseIndex > 0 &&
localTopicIndex[denseIndex - 1] > newTopic) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex - 1];
denseIndex--;
}
localTopicIndex[denseIndex] = newTopic;
nonZeroTopics++;
}
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts[newTopic]) /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
// We've just resampled all tokens UP TO the current limit,
// now sample the token AT the current limit.
type = tokenSequence.getIndexAtPosition(limit);
// Check for out-of-vocabulary words
if (type >= typeTopicCounts.length ||
typeTopicCounts[type] == null) {
continue;
}
currentTypeTopicCounts = typeTopicCounts[type];
int index = 0;
int currentTopic, currentValue;
topicTermMass = 0.0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
score =
cachedCoefficients[currentTopic] * currentValue;
topicTermMass += score;
topicTermScores[index] = score;
//System.out.println(" " + currentTopic + " = " + currentValue);
index++;
}
/* // Debugging, to make sure we're getting the right probabilities
for (int topic = 0; topic < numTopics; topic++) {
index = 0;
int displayCount = 0;
while (index < currentTypeTopicCounts.length &&
currentTypeTopicCounts[index] > 0) {
currentTopic = currentTypeTopicCounts[index] & topicMask;
currentValue = currentTypeTopicCounts[index] >> topicBits;
if (currentTopic == topic) {
displayCount = currentValue;
break;
}
index++;
}
System.out.print(topic + "\t");
System.out.print("(" + localTopicCounts[topic] + " + " + alpha[topic] + ") / " +
"(" + alphaSum + " + " + tokensSoFar + ") * ");
System.out.println("(" + displayCount + " + " + beta + ") / " +
"(" + tokensPerTopic[topic] + " + " + betaSum + ") =" +
((displayCount + beta) / (tokensPerTopic[topic] + betaSum)));
}
*/
double sample = random.nextUniform() * (smoothingOnlyMass + topicBetaMass + topicTermMass);
double origSample = sample;
// Note that we've been absorbing (alphaSum + docLength) into
// the normalizing constant. The true marginal probability needs
// this term, so we stick it back in.
wordProbabilities[limit] +=
(smoothingOnlyMass + topicBetaMass + topicTermMass) /
(alphaSum + tokensSoFar);
//System.out.println("normalizer: " + alphaSum + " + " + tokensSoFar);
tokensSoFar++;
// Make sure it actually gets set
newTopic = -1;
if (sample < topicTermMass) {
i = -1;
while (sample > 0) {
i++;
sample -= topicTermScores[i];
}
newTopic = currentTypeTopicCounts[i] & topicMask;
}
else {
sample -= topicTermMass;
if (sample < topicBetaMass) {
//betaTopicCount++;
sample /= beta;
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
sample -= localTopicCounts[topic] /
(tokensPerTopic[topic] + betaSum);
if (sample <= 0.0) {
newTopic = topic;
break;
}
}
}
else {
//smoothingOnlyCount++;
sample -= topicBetaMass;
sample /= beta;
newTopic = 0;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
while (sample > 0.0) {
newTopic++;
sample -= alpha[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
}
}
}
if (newTopic == -1) {
System.err.println("sampling error: "+ origSample + " " +
sample + " " + smoothingOnlyMass + " " +
topicBetaMass + " " + topicTermMass);
newTopic = numTopics-1; // TODO is this appropriate
}
// Put that new topic into the counts
oneDocTopics[limit] = newTopic;
topicBetaMass -= beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
localTopicCounts[newTopic]++;
// If this is a new topic for this document,
// add the topic to the dense index.
if (localTopicCounts[newTopic] == 1) {
// First find the point where we
// should insert the new topic by going to
// the end (which is the only reason we're keeping
// track of the number of non-zero
// topics) and working backwards
denseIndex = nonZeroTopics;
while (denseIndex > 0 &&
localTopicIndex[denseIndex - 1] > newTopic) {
localTopicIndex[denseIndex] =
localTopicIndex[denseIndex - 1];
denseIndex--;
}
localTopicIndex[denseIndex] = newTopic;
nonZeroTopics++;
}
// update the coefficients for the non-zero topics
cachedCoefficients[newTopic] =
(alpha[newTopic] + localTopicCounts[newTopic]) /
(tokensPerTopic[newTopic] + betaSum);
topicBetaMass += beta * localTopicCounts[newTopic] /
(tokensPerTopic[newTopic] + betaSum);
//System.out.println(type + "\t" + newTopic + "\t" + logLikelihood);
}
// Clean up our mess: reset the coefficients to values with only
// smoothing. The next doc will update its own non-zero topics...
for (denseIndex = 0; denseIndex < nonZeroTopics; denseIndex++) {
int topic = localTopicIndex[denseIndex];
cachedCoefficients[topic] =
alpha[topic] / (tokensPerTopic[topic] + betaSum);
}
return wordProbabilities;
}
private static final long serialVersionUID = 1;
private static final int CURRENT_SERIAL_VERSION = 0;
private static final int NULL_INTEGER = -1;
private void writeObject (ObjectOutputStream out) throws IOException {
out.writeInt (CURRENT_SERIAL_VERSION);
out.writeInt(numTopics);
out.writeInt(topicMask);
out.writeInt(topicBits);
out.writeObject(alpha);
out.writeDouble(alphaSum);
out.writeDouble(beta);
out.writeDouble(betaSum);
out.writeObject(typeTopicCounts);
out.writeObject(tokensPerTopic);
out.writeObject(random);
out.writeDouble(smoothingOnlyMass);
out.writeObject(cachedCoefficients);
}
private void readObject (ObjectInputStream in) throws IOException, ClassNotFoundException {
int version = in.readInt ();
numTopics = in.readInt();
topicMask = in.readInt();
topicBits = in.readInt();
alpha = (double[]) in.readObject();
alphaSum = in.readDouble();
beta = in.readDouble();
betaSum = in.readDouble();
typeTopicCounts = (int[][]) in.readObject();
tokensPerTopic = (int[]) in.readObject();
random = (Randoms) in.readObject();
smoothingOnlyMass = in.readDouble();
cachedCoefficients = (double[]) in.readObject();
}
public static MarginalProbEstimator read (File f) throws Exception {
MarginalProbEstimator estimator = null;
ObjectInputStream ois = new ObjectInputStream (new FileInputStream(f));
estimator = (MarginalProbEstimator) ois.readObject();
ois.close();
return estimator;
}
}
| 18,107 | 27.338028 | 105 |
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.