2017-04-18 16:20:12 -04:00
|
|
|
# frozen_string_literal: true
|
|
|
|
|
|
|
|
class LanguageDetector
|
2017-09-16 08:59:41 -04:00
|
|
|
include Singleton
|
2017-04-18 16:20:12 -04:00
|
|
|
|
2019-03-25 20:23:59 -04:00
|
|
|
WORDS_THRESHOLD = 4
|
2020-06-25 16:45:01 -04:00
|
|
|
RELIABLE_CHARACTERS_RE = /[\p{Hebrew}\p{Arabic}\p{Syriac}\p{Thaana}\p{Nko}\p{Han}\p{Katakana}\p{Hiragana}\p{Hangul}\p{Thai}]+/m
|
2018-07-13 22:05:36 -04:00
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def initialize
|
Use CLD3 (#2949)
Compact Language Detector v3 (CLD3) is the successor of CLD2, which was
used in the previous implementation. CLD3 includes improvements since CLD2,
and supports newer compilers. On the other hand, it has additional
requirements and cld3-ruby, the FFI of CLD3 for Ruby, is still new and may
be still inmature.
Though CLD3 is named after CLD2, it is implemented with a neural network
model, different from the old implementation, which is based on a Naïve
Bayesian classifier.
CLD3 supports newer compilers, such as GCC 6. CLD2 is not compatible with
GCC 6 because it assigns negative values to varibales typed unsigned.
(see internal/cld_generated_cjk_uni_prop_80.cc) The support for GCC 6 and
newer compilers are essential today, when some server operating system
such as Ubuntu Server 16.10 has GCC 6 by default.
On the one hand, CLD3 requires C++11 support. Environments with old
compilers such as Ubuntu Server 14.04 needs to update the system or install
a newer compiler.
CLD3 needs protocol buffers as a new dependency. However,it is not
considered problematic because major server operating systems, CentOS and
Ubuntu Server provide them.
The FFI cld3-ruby was written by me (Akihiko Odaki) for use in Mastodon.
It is still new and may be inmature, but confirmed to pass existing tests.
2017-05-09 13:58:03 -04:00
|
|
|
@identifier = CLD3::NNetLanguageIdentifier.new(1, 2048)
|
2017-04-18 16:20:12 -04:00
|
|
|
end
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def detect(text, account)
|
2018-07-13 22:05:36 -04:00
|
|
|
input_text = prepare_text(text)
|
2019-03-15 00:07:09 -04:00
|
|
|
|
2018-07-13 22:05:36 -04:00
|
|
|
return if input_text.blank?
|
2018-10-05 13:17:46 -04:00
|
|
|
|
2018-07-13 22:05:36 -04:00
|
|
|
detect_language_code(input_text) || default_locale(account)
|
2017-04-18 16:20:12 -04:00
|
|
|
end
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def language_names
|
2019-03-15 00:07:09 -04:00
|
|
|
@language_names = CLD3::TaskContextParams::LANGUAGE_NAMES.map { |name| iso6391(name.to_s).to_sym }.uniq
|
2017-06-01 09:29:14 -04:00
|
|
|
end
|
|
|
|
|
2017-04-18 16:20:12 -04:00
|
|
|
private
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def prepare_text(text)
|
|
|
|
simplify_text(text).strip
|
|
|
|
end
|
|
|
|
|
2018-07-13 22:05:36 -04:00
|
|
|
def unreliable_input?(text)
|
2019-03-15 00:07:09 -04:00
|
|
|
!reliable_input?(text)
|
|
|
|
end
|
|
|
|
|
|
|
|
def reliable_input?(text)
|
|
|
|
sufficient_text_length?(text) || language_specific_character_set?(text)
|
|
|
|
end
|
|
|
|
|
|
|
|
def sufficient_text_length?(text)
|
2019-03-25 20:23:59 -04:00
|
|
|
text.split(/\s+/).size >= WORDS_THRESHOLD
|
2019-03-15 00:07:09 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def language_specific_character_set?(text)
|
|
|
|
words = text.scan(RELIABLE_CHARACTERS_RE)
|
|
|
|
|
|
|
|
if words.present?
|
2019-12-02 12:25:43 -05:00
|
|
|
words.reduce(0) { |acc, elem| acc + elem.size }.to_f / text.size > 0.3
|
2019-03-15 00:07:09 -04:00
|
|
|
else
|
|
|
|
false
|
|
|
|
end
|
2018-07-13 22:05:36 -04:00
|
|
|
end
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def detect_language_code(text)
|
2018-07-13 22:05:36 -04:00
|
|
|
return if unreliable_input?(text)
|
2020-03-08 19:12:52 -04:00
|
|
|
|
2018-07-13 22:05:36 -04:00
|
|
|
result = @identifier.find_language(text)
|
2020-03-08 19:12:52 -04:00
|
|
|
|
|
|
|
iso6391(result.language.to_s).to_sym if result&.reliable?
|
2017-09-08 06:32:22 -04:00
|
|
|
end
|
|
|
|
|
|
|
|
def iso6391(bcp47)
|
|
|
|
iso639 = bcp47.split('-').first
|
|
|
|
|
|
|
|
# CLD3 returns grandfathered language code for Hebrew
|
|
|
|
return 'he' if iso639 == 'iw'
|
|
|
|
|
|
|
|
ISO_639.find(iso639).alpha2
|
2017-05-03 10:59:31 -04:00
|
|
|
end
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def simplify_text(text)
|
2017-11-16 07:51:38 -05:00
|
|
|
new_text = remove_html(text)
|
2021-03-03 18:12:26 -05:00
|
|
|
new_text.gsub!(FetchLinkCardService::URL_PATTERN, '\1')
|
2017-11-16 07:51:38 -05:00
|
|
|
new_text.gsub!(Account::MENTION_RE, '')
|
2019-07-17 21:02:15 -04:00
|
|
|
new_text.gsub!(Tag::HASHTAG_RE) { |string| string.gsub(/[#_]/, '#' => '', '_' => ' ').gsub(/[a-z][A-Z]|[a-zA-Z][\d]/) { |s| s.insert(1, ' ') }.downcase }
|
2017-11-16 07:51:38 -05:00
|
|
|
new_text.gsub!(/:#{CustomEmoji::SHORTCODE_RE_FRAGMENT}:/, '')
|
|
|
|
new_text.gsub!(/\s+/, ' ')
|
|
|
|
new_text
|
|
|
|
end
|
|
|
|
|
|
|
|
def new_scrubber
|
|
|
|
scrubber = Rails::Html::PermitScrubber.new
|
|
|
|
scrubber.tags = %w(br p)
|
|
|
|
scrubber
|
|
|
|
end
|
|
|
|
|
|
|
|
def scrubber
|
|
|
|
@scrubber ||= new_scrubber
|
|
|
|
end
|
|
|
|
|
|
|
|
def remove_html(text)
|
|
|
|
text = Loofah.fragment(text).scrub!(scrubber).to_s
|
|
|
|
text.gsub!('<br>', "\n")
|
|
|
|
text.gsub!('</p><p>', "\n\n")
|
|
|
|
text.gsub!(/(^<p>|<\/p>$)/, '')
|
|
|
|
text
|
2017-04-21 22:26:25 -04:00
|
|
|
end
|
|
|
|
|
2017-09-16 08:59:41 -04:00
|
|
|
def default_locale(account)
|
2019-03-15 00:07:09 -04:00
|
|
|
account.user_locale&.to_sym || I18n.default_locale if account.local?
|
2017-04-18 16:20:12 -04:00
|
|
|
end
|
|
|
|
end
|