File size: 3,799 Bytes
32e9f98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import spacy
from numpy import char
from spacy.tokens import Doc, DocBin

train_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/train.spacy"
dev_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/dev.spacy"
test_dane = "/Users/au561649/Github/DaCy/training/main/corpus/dane/test.spacy"


nlp = spacy.blank("da")
# train, dev, test = dane()
train_docs = list(DocBin().from_disk(train_dane).get_docs(nlp.vocab))
dev_docs = list(DocBin().from_disk(dev_dane).get_docs(nlp.vocab))
test_docs = list(DocBin().from_disk(test_dane).get_docs(nlp.vocab))

Doc.set_extension("split", default=None)

for split, nam in zip([train_docs, dev_docs, test_docs], ["train", "dev", "test"]):
    for doc in split:
        doc._.split = nam

# text2doc = {}
# n_duplicates = 0  # all looks like non-actual duplicates (e.g. "stk. 2")
# for i, doc in enumerate(test_docs + train_docs + dev_docs):
#     if doc.text in text2doc:
#         print(f"Duplicate found: {doc.text}")
#         print("split:": doc._.split)
#         n_duplicates += 1
#     text2doc[doc.text] = doc


# load daneplus
path_to_data = "/Users/au561649/Github/DaCy/training/dane_plus/train.spacy"
train_data = DocBin().from_disk(path_to_data)
daneplus_docs = list(train_data.get_docs(nlp.vocab))

text2doc = {}
n_duplicates = 0  # No duplicates (prodigy removed them - this will be problematic when reconstructing the documents - so therefore we re-annotate the dane documents)
for i, doc in enumerate(daneplus_docs):
    if doc.text in text2doc:
        print(f"Duplicate found: {doc.text}")
        n_duplicates += 1
    text2doc[doc.text] = doc

# Add the daneplus annotations to the dane documents
docs_to_fix = []
for doc in train_docs + dev_docs + test_docs:
    if doc.text in text2doc:
        _ents_to_add = text2doc[doc.text].ents
        ents_to_add = []
        for ent in _ents_to_add:
            char_span = doc.char_span(ent.start_char, ent.end_char, label=ent.label_)
            if char_span is None:
                print(f"Entity could not be added: {ent.text}")
                docs_to_fix.append((doc, ent))
                continue
            ents_to_add.append(char_span)
        doc.ents = ents_to_add  # type: ignore

# manual fixes (due to difference in tokenization)
doc, ent = docs_to_fix[0]
ents = list(doc.ents)
_ent = doc[-2:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
print("added", new_ent, "to", doc.text)
ents.append(new_ent)
doc.ents = ents


doc, ent = docs_to_fix[1]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)

doc, ent = docs_to_fix[2]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)

doc, ent = docs_to_fix[3]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)

doc, ent = docs_to_fix[4]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)

doc, ent = docs_to_fix[5]
ents = list(doc.ents)
_ent = doc[-3:-1]
new_ent = doc.char_span(_ent.start_char, _ent.end_char, label=ent.label_)
ents.append(new_ent)
doc.ents = ents
print("added", new_ent, "to", doc.text)


# Save the new documents
new_train = DocBin(docs=train_docs)
new_dev = DocBin(docs=dev_docs)
new_test = DocBin(docs=test_docs)

new_train.to_disk("train.spacy")
new_dev.to_disk("dev.spacy")
new_test.to_disk("test.spacy")