@@ -11,7 +11,7 @@ First make sure you have the Stanford CoreNLP server running. See [the instruct
11
11
12
12
Then the setup just requires you to pass in the url of the server:
13
13
```
14
- >>> from corenlp import StanfordCoreNLP
14
+ >>> from pycorenlp import StanfordCoreNLP
15
15
>>> nlp = StanfordCoreNLP('http://localhost:9000')
16
16
```
17
17
@@ -21,7 +21,7 @@ Supports annotation:
21
21
'Pusheen and Smitha walked along the beach. '
22
22
'Pusheen wanted to surf, but fell off the surfboard.')
23
23
>>> output = nlp.annotate(text, properties={
24
- 'annotators': 'tokenize,ssplit,pos,depparse,parse',
24
+ 'annotators': 'tokenize,ssplit,pos,depparse,parse',
25
25
'outputFormat': 'json'
26
26
})
27
27
>>> print(output['sentences'][0]['parse'])
@@ -41,15 +41,15 @@ And tokensregex + semgrex
41
41
>>> nlp.tokensregex(text, pattern='/Pusheen|Smitha/', filter=False)
42
42
{u'sentences': [
43
43
{
44
- u'1': {u'text': u'Smitha', u'begin': 2, u'end': 3},
44
+ u'1': {u'text': u'Smitha', u'begin': 2, u'end': 3},
45
45
u'0': {u'text': u'Pusheen', u'begin': 0, u'end': 1}, u'length': 2
46
- },
46
+ },
47
47
{u'0': {u'text': u'Pusheen', u'begin': 0, u'end': 1}, u'length': 1}]}
48
48
>>> nlp.semgrex(text, pattern='{tag: VBD}', filter=False)
49
49
{u'sentences': [
50
- {u'0': {u'text': u'walked', u'begin': 3, u'end': 4}, u'length': 1},
50
+ {u'0': {u'text': u'walked', u'begin': 3, u'end': 4}, u'length': 1},
51
51
{
52
- u'1': {u'text': u'fell', u'begin': 6, u'end': 7},
52
+ u'1': {u'text': u'fell', u'begin': 6, u'end': 7},
53
53
u'0': {u'text': u'wanted', u'begin': 1, u'end': 2}, u'length': 2
54
54
}
55
55
]}
0 commit comments