Changeset - 252d3b1bca60
[Not reviewed]
default
0 3 0
Laman - 2 years ago 2022-10-17 19:12:22

models file included in the package
3 files changed with 5 insertions and 2 deletions:
0 comments (0 inline, 0 general)
setup.cfg
Show inline comments
 
[metadata]
 
name = languedoc
 
version = 1.0
 
license = GPLv3
 
description = A simple language identification library.
 

	
 
classifiers =
 
    Programming Language :: Python :: 3
 
    License :: OSI Approved :: GNU General Public License v3 (GPLv3)
 
    Operating System :: OS Independent
 

	
 
long_description = file: README.md
 
long_description_content_type = text/markdown
 

	
 
[options]
 
packages =
 
    languedoc
 
package_dir =
 
    =src
 
python_requires = >=3.6
 

	
 
[options.package_data]
 
languedoc = models.json.gz
src/languedoc/predict.py
Show inline comments
 
import os
 
import re
 
import itertools
 
import json
 
import gzip
 

	
 
TOP_NGRAM_COUNT = 3000
 
MODEL_PATH = os.path.join(os.path.dirname(__file__), "../../models.json.gz")
 
MODEL_PATH = os.path.join(os.path.dirname(__file__), "models.json.gz")
 

	
 

	
 
def preprocess(text):
 
	text = re.sub(r"[\W\d_]+", " ", " "+text+" ")
 
	return text.lower()
 

	
 

	
 
def extract_kgram_freqs(text, k):
 
	n = len(text)
 
	d = dict()
 

	
 
	for i in range(0, n-k+1):
 
		key = text[i:i+k]
 
		if key.isspace():
 
			continue
 

	
 
		d[key] = d.get(key, 0) + 1
 

	
 
	count = sum(d.values())
 

	
 
	return {key: val/count for (key, val) in d.items()}
 

	
 

	
 
def extract_ngram_freqs(text):
 
	frequencies = {}
 

	
 
	for k in range(1, 4):
 
		frequencies.update(extract_kgram_freqs(text, k))
 

	
 
	return frequencies
 

	
 

	
 
def rank_ngram_freqs(frequencies):
 
	ordered_ngrams = sorted(frequencies.items(), key=lambda kv: -kv[1])[:TOP_NGRAM_COUNT]
 
	return dict(zip([key for (key, freq) in ordered_ngrams], itertools.count(0)))
 

	
 

	
 
def extract_ranked_ngrams(text):
 
	frequencies = extract_ngram_freqs(text)
 
	return rank_ngram_freqs(frequencies)
 

	
 

	
 
class Sample:
 
	def __init__(self, language, ranked_ngrams):
 
		self.language = language
 
		self.ranked_ngrams = ranked_ngrams
 

	
 
	@classmethod
src/languedoc/train.py
Show inline comments
 
@@ -29,70 +29,70 @@ class SampleSet:
 
		self.language = language
 
		self.texts = []
 
		self.frequencies = []
 

	
 
	def add(self, text):
 
		self.texts.append(text)
 
		self.frequencies.append(extract_ngram_freqs(text))
 

	
 
	def create_model(self):
 
		merged_frequencies = merge_ngram_freqs(self.frequencies)
 
		res = Sample(self.language, rank_ngram_freqs(merged_frequencies))
 
		return res
 

	
 
	def generate_tests(self, n):
 
		for (i, (text, freqs)) in enumerate(itertools.cycle(zip(self.texts, self.frequencies))):
 
			if i >= n:
 
				break
 

	
 
			ranked_ngrams = rank_ngram_freqs(merge_ngram_freqs([f for f in self.frequencies if f is not freqs]))
 
			yield (text, Sample(self.language, ranked_ngrams))
 

	
 

	
 
def cross_validate(sample_sets):
 
	models = [s.create_model() for s in sample_sets]
 
	score = 0
 
	max_score = 0
 

	
 
	for s in sample_sets:
 
		for (test_text, partial_model) in s.generate_tests(CROSSVALIDATION_SOURCE_COUNT):
 
			real_lang = partial_model.language
 
			test_models = [partial_model] + [m for m in models if m.language != real_lang]
 

	
 
			for k in TEST_LENS:
 
				for i in range(10):
 
					j = random.randrange(0, len(test_text)-k)
 
					t = test_text[j:j+k]
 
					predicted_lang = identify(t, test_models)
 
					if predicted_lang == real_lang:
 
						score += 1
 
					else:
 
						print(real_lang, predicted_lang, t)
 
					max_score += 1
 

	
 
	return score / max_score, (score, max_score)
 

	
 

	
 
DATA_DIR = os.path.join(os.path.dirname(__file__), "../../data")
 
LANG_DIRS = sorted([x.path for x in os.scandir(DATA_DIR)])
 
MODEL_PATH = os.path.join(os.path.dirname(__file__), "../../models.json.gz")
 
MODEL_PATH = os.path.join(os.path.dirname(__file__), "models.json.gz")
 

	
 
if __name__ == "__main__":
 
	samples = []
 

	
 
	for d in LANG_DIRS:
 
		lang = os.path.basename(d)
 
		lang_samples = SampleSet(lang)
 
		samples.append(lang_samples)
 

	
 
		for file in sorted(os.scandir(d), key=lambda f: f.name):
 
			with open(file) as f:
 
				text = f.read()
 
				text = preprocess(text)
 
				print(f"{lang}: {file.name} ({len(text)})")
 

	
 
				lang_samples.add(text)
 

	
 
	with gzip.open(MODEL_PATH, mode="wt", encoding="utf-8") as f:
 
		json.dump([sample_set.create_model().export() for sample_set in samples], f, ensure_ascii=False)
 

	
 
	print(cross_validate(samples))
0 comments (0 inline, 0 general)