This repository was archived by the owner on Jul 7, 2023. It is now read-only.
File tree Expand file tree Collapse file tree 1 file changed +4
-4
lines changed
tensor2tensor/data_generators Expand file tree Collapse file tree 1 file changed +4
-4
lines changed Original file line number Diff line number Diff line change @@ -109,12 +109,12 @@ class TokenTextEncoder(TextEncoder):
109
109
110
110
def __init__ (self , vocab_filename , reverse = False , num_reserved_ids = 2 ):
111
111
"""Initialize from a file, one token per line."""
112
+ super (TokenTextEncoder , self ).__init__ (num_reserved_ids = num_reserved_ids )
113
+
112
114
self ._reverse = reverse
113
- if vocab_filename is None :
115
+ if vocab_filename is not None :
114
116
self ._load_vocab_from_file (vocab_filename )
115
117
116
- super (TokenTextEncoder , self ).__init__ (num_reserved_ids = num_reserved_ids )
117
-
118
118
def encode (self , sentence ):
119
119
"""Converts a space-separated string of tokens to a list of ids."""
120
120
ret = [self ._token_to_id [tok ] for tok in sentence .strip ().split ()]
@@ -285,7 +285,7 @@ def build_to_target_size(cls,
285
285
subtokenizer .build_from_token_counts (token_counts , store_filename ,
286
286
present_count , num_iterations )
287
287
288
- if min_val = = max_val or subtokenizer .vocab_size == target_size :
288
+ if min_val > = max_val or subtokenizer .vocab_size == target_size :
289
289
return subtokenizer
290
290
elif subtokenizer .vocab_size > target_size :
291
291
other_subtokenizer = cls .build_to_target_size (
You can’t perform that action at this time.
0 commit comments