Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
GRAAL-Research
GitHub Repository: GRAAL-Research/deepparse
Path: blob/main/examples/retrain_attention_model.py
1233 views
1
# pylint: skip-file
2
###################
3
"""
4
IMPORTANT:
5
THE EXAMPLE IN THIS FILE IS CURRENTLY NOT FUNCTIONAL
6
BECAUSE THE `download_from_public_repository` FUNCTION
7
NO LONGER EXISTS. WE HAD TO MAKE A QUICK RELEASE TO
8
REMEDIATE AN ISSUE IN OUR PREVIOUS STORAGE SOLUTION.
9
THIS WILL BE FIXED IN A FUTURE RELEASE.
10
11
IN THE MEAN TIME IF YOU NEED ANY CLARIFICATION
12
REGARDING THE PACKAGE PLEASE FEEL FREE TO OPEN AN ISSUE.
13
"""
14
import os
15
16
import poutyne
17
18
from deepparse import download_from_public_repository
19
from deepparse.dataset_container import PickleDatasetContainer
20
from deepparse.parser import AddressParser
21
22
# First, let's download the train and test data with the new tags, "new tags", from the public repository.
23
saving_dir = "./data"
24
file_extension = "p"
25
training_dataset_name = "sample_incomplete_data"
26
test_dataset_name = "test_sample_data"
27
download_from_public_repository(training_dataset_name, saving_dir, file_extension=file_extension)
28
download_from_public_repository(test_dataset_name, saving_dir, file_extension=file_extension)
29
30
# Now let's create a training and test container.
31
training_container = PickleDatasetContainer(os.path.join(saving_dir, training_dataset_name + "." + file_extension))
32
test_container = PickleDatasetContainer(os.path.join(saving_dir, test_dataset_name + "." + file_extension))
33
34
# We will retrain the FastText attention version of our pretrained model.
35
model = "bpemb"
36
address_parser = AddressParser(model_type=model, device=0, attention_mechanism=True)
37
38
# Now, let's retrain for 5 epochs using a batch size of 8 since the data is really small for the example.
39
# Let's start with the default learning rate of 0.01 and use a learning rate scheduler to lower the learning rate
40
# as we progress.
41
lr_scheduler = poutyne.StepLR(step_size=1, gamma=0.1) # reduce LR by a factor of 10 each epoch
42
43
# The path to save our checkpoints
44
logging_path = "./checkpoints"
45
46
address_parser.retrain(
47
training_container,
48
train_ratio=0.8,
49
epochs=5,
50
batch_size=8,
51
num_workers=2,
52
callbacks=[lr_scheduler],
53
logging_path=logging_path,
54
layers_to_freeze="seq2seq",
55
)
56
57
# Now, let's test our fine-tuned model using the best checkpoint (default parameter).
58
address_parser.test(test_container, batch_size=256)
59
60