edu.stanford.nlp.pipeline.Annotation.containsKey()方法的使用及代码示例

x33g5p2x  于2022-01-16 转载在 其他  
字(8.9k)|赞(0)|评价(0)|浏览(104)

本文整理了Java中edu.stanford.nlp.pipeline.Annotation.containsKey()方法的一些代码示例,展示了Annotation.containsKey()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Annotation.containsKey()方法的具体详情如下:
包路径:edu.stanford.nlp.pipeline.Annotation
类名称:Annotation
方法名:containsKey

Annotation.containsKey介绍

暂无

代码示例

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   doOneSentence(sentence);
  }
 } else {
  throw new RuntimeException("unable to find sentences in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (verbose) {
  log.info("Adding true-case annotation...");
 }
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  // classify tokens for each sentence
  for (CoreMap sentence: annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
   List<CoreLabel> output = this.trueCaser.classifySentence(tokens);
   for (int i = 0, size = tokens.size(); i < size; i++) {
    // add the truecaser tag to each token
    String neTag = output.get(i).get(CoreAnnotations.AnswerAnnotation.class);
    tokens.get(i).set(CoreAnnotations.TrueCaseAnnotation.class, neTag);
    setTrueCaseText(tokens.get(i));
   }
  }
 } else {
  throw new RuntimeException("unable to find sentences in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (VERBOSE) {
  log.info("Adding number annotation ... ");
 }
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  // classify tokens for each sentence
  for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
   doOneSentenceNew(tokens, annotation, sentence);
  }
  if (VERBOSE) {
   log.info("done. Output: " + annotation.get(CoreAnnotations.SentencesAnnotation.class));
  }
 } else if (annotation.containsKey(CoreAnnotations.TokensAnnotation.class)) {
  List<CoreLabel> tokens = annotation.get(CoreAnnotations.TokensAnnotation.class);
  doOneSentenceNew(tokens, annotation, null);
 } else {
  throw new RuntimeException("unable to find sentences in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (annotation.containsKey(CoreAnnotations.TokensAnnotation.class)) {
  List<CoreLabel> tokens = annotation.get(CoreAnnotations.TokensAnnotation.class);
  if (DEBUG) { log.info("CleanXML: starting tokens: " + tokens); }
  List<CoreLabel> newTokens = process(annotation, tokens);
  // We assume that if someone is using this annotator, they don't
  // want the old tokens any more and get rid of them
  // redo the token indexes if xml tokens have been removed
  setTokenBeginTokenEnd(newTokens);
  annotation.set(CoreAnnotations.TokensAnnotation.class, newTokens);
  if (DEBUG) { log.info("CleanXML: ending tokens: " + annotation.get(CoreAnnotations.TokensAnnotation.class)); }
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  // parse a tree for each sentence
  for (CoreMap sentence: annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   List<CoreLabel> words = sentence.get(CoreAnnotations.TokensAnnotation.class);
   if (VERBOSE) {
    log.info("Parsing: " + words);
   }
   int maxSentenceLength = parser.getMaxSentenceLength();
   // generate the constituent tree
   Tree tree; // initialized below
   if (maxSentenceLength <= 0 || words.size() < maxSentenceLength) {
    tree = parser.getBestParse(words);
   }
   else {
    tree = ParserUtils.xTree(words);
   }
   List<Tree> trees = Generics.newArrayList(1);
   trees.add(tree);
   ParserAnnotatorUtils.fillInParseAnnotations(VERBOSE, BUILD_GRAPHS, gsf, sentence, trees, GrammaticalStructure.Extras.NONE);
  }
 } else {
  throw new RuntimeException("unable to find sentences in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

public void annotate(Annotation annotation) {
 if (VERBOSE) {
  timer.start();
  log.info("Normalizing quantifiable entities...");
 }
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
  for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
   annotateTokens(tokens);
  }
  if (VERBOSE) {
   timer.stop("done.");
   log.info("output: " + sentences + '\n');
  }
 } else if (annotation.containsKey(CoreAnnotations.TokensAnnotation.class)) {
  List<CoreLabel> tokens = annotation.get(CoreAnnotations.TokensAnnotation.class);
  annotateTokens(tokens);
 } else {
  throw new RuntimeException("unable to find sentences in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

if ( ! doc.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
 StanfordCoreNLP pipeline = mkStanfordCoreNLP(props);
 pipeline.annotate(doc);

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation){
 try {
  if (!annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
   log.error("this coreference resolution system requires SentencesAnnotation!");
   return;
  }
  if (hasSpeakerAnnotations(annotation)) {
   annotation.set(CoreAnnotations.UseMarkedDiscourseAnnotation.class, true);
  }
  Document corefDoc = corefSystem.docMaker.makeDocument(annotation);
  Map<Integer, CorefChain> result = corefSystem.coref(corefDoc);
  annotation.set(CorefCoreAnnotations.CorefChainAnnotation.class, result);
  // for backward compatibility
  if(OLD_FORMAT) annotateOldFormat(result, corefDoc);
 } catch (RuntimeException e) {
  throw e;
 } catch (Exception e) {
  throw new RuntimeException(e);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

if (! annotation.containsKey(CoreAnnotations.SentencesAnnotation.class))
 throw new RuntimeException("Unable to find sentences in " + annotation);

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (VERBOSE) {
  log.info("Finding lemmas ...");
 }
 Morphology morphology = new Morphology();
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
   List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
   //log.info("Lemmatizing sentence: " + tokens);
   for (CoreLabel token : tokens) {
    String text = token.get(CoreAnnotations.TextAnnotation.class);
    String posTag = token.get(CoreAnnotations.PartOfSpeechAnnotation.class);
    addLemma(morphology, CoreAnnotations.LemmaAnnotation.class, token, text, posTag);
   }
  }
 } else {
  throw new RuntimeException("Unable to find words/tokens in: " +
                annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

if (!doc.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
 StanfordCoreNLP pipeline = mkStanfordCoreNLP(props);
 pipeline.annotate(doc);

代码示例来源:origin: stanfordnlp/CoreNLP

if (!annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
 log.error("this coreference resolution system requires SentencesAnnotation!");
 return;

代码示例来源:origin: stanfordnlp/CoreNLP

if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
 allMatched = new ArrayList<>();
 List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);

代码示例来源:origin: stanfordnlp/CoreNLP

boolean perDocumentCharacterMap = false;
if (buildCharacterMapPerAnnotation) {
 if (annotation.containsKey(CoreAnnotations.MentionsAnnotation.class)) {
  entityMentionsToCharacterMap(annotation);

代码示例来源:origin: stanfordnlp/CoreNLP

if (ann.containsKey(CoreAnnotations.DocIDAnnotation.class)) {
 impl.setDocID(ann.get(CoreAnnotations.DocIDAnnotation.class));

代码示例来源:origin: stanfordnlp/CoreNLP

if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 // turn the annotation into a sentence
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  if (nThreads == 1) {
   for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
    doOneSentence(sentence);
   }
  } else {
   MulticoreWrapper<CoreMap, CoreMap> wrapper = new MulticoreWrapper<>(nThreads, new POSTaggerProcessor());
   for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
    wrapper.put(sentence);
    while (wrapper.peek()) {
     wrapper.poll();
    }
   }
   wrapper.join();
   while (wrapper.peek()) {
    wrapper.poll();
   }
  }
 } else {
  throw new RuntimeException("unable to find words/tokens in: " + annotation);
 }
}

代码示例来源:origin: stanfordnlp/CoreNLP

if (annotation.containsKey(CoreAnnotations.TextAnnotation.class)) {
 String text = annotation.get(CoreAnnotations.TextAnnotation.class);
 Reader r = new StringReader(text);

代码示例来源:origin: stanfordnlp/CoreNLP

@Override
public void annotate(Annotation annotation) {
 if (annotation.containsKey(CoreAnnotations.SentencesAnnotation.class)) {
  if (nThreads() != 1 || maxTime() > 0) {
   InterruptibleMulticoreWrapper<CoreMap, CoreMap> wrapper = buildWrapper(annotation);

相关文章