Path: blob/master/thirdparty/chardet/universaldetector.py
2992 views
######################## BEGIN LICENSE BLOCK ########################1# The Original Code is Mozilla Universal charset detector code.2#3# The Initial Developer of the Original Code is4# Netscape Communications Corporation.5# Portions created by the Initial Developer are Copyright (C) 20016# the Initial Developer. All Rights Reserved.7#8# Contributor(s):9# Mark Pilgrim - port to Python10# Shy Shalom - original C code11#12# This library is free software; you can redistribute it and/or13# modify it under the terms of the GNU Lesser General Public14# License as published by the Free Software Foundation; either15# version 2.1 of the License, or (at your option) any later version.16#17# This library is distributed in the hope that it will be useful,18# but WITHOUT ANY WARRANTY; without even the implied warranty of19# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU20# Lesser General Public License for more details.21#22# You should have received a copy of the GNU Lesser General Public23# License along with this library; if not, write to the Free Software24# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA25# 02110-1301 USA26######################### END LICENSE BLOCK #########################27"""28Module containing the UniversalDetector detector class, which is the primary29class a user of ``chardet`` should use.3031:author: Mark Pilgrim (initial port to Python)32:author: Shy Shalom (original C code)33:author: Dan Blanchard (major refactoring for 3.0)34:author: Ian Cordasco35"""363738import codecs39import logging40import re4142from .charsetgroupprober import CharSetGroupProber43from .enums import InputState, LanguageFilter, ProbingState44from .escprober import EscCharSetProber45from .latin1prober import Latin1Prober46from .mbcsgroupprober import MBCSGroupProber47from .sbcsgroupprober import SBCSGroupProber484950class UniversalDetector(object):51"""52The ``UniversalDetector`` class underlies the ``chardet.detect`` function53and coordinates all of the different charset probers.5455To get a ``dict`` containing an encoding and its confidence, you can simply56run:5758.. code::5960u = UniversalDetector()61u.feed(some_bytes)62u.close()63detected = u.result6465"""6667MINIMUM_THRESHOLD = 0.2068HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')69ESC_DETECTOR = re.compile(b'(\033|~{)')70WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')71ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',72'iso-8859-2': 'Windows-1250',73'iso-8859-5': 'Windows-1251',74'iso-8859-6': 'Windows-1256',75'iso-8859-7': 'Windows-1253',76'iso-8859-8': 'Windows-1255',77'iso-8859-9': 'Windows-1254',78'iso-8859-13': 'Windows-1257'}7980def __init__(self, lang_filter=LanguageFilter.ALL):81self._esc_charset_prober = None82self._charset_probers = []83self.result = None84self.done = None85self._got_data = None86self._input_state = None87self._last_char = None88self.lang_filter = lang_filter89self.logger = logging.getLogger(__name__)90self._has_win_bytes = None91self.reset()9293def reset(self):94"""95Reset the UniversalDetector and all of its probers back to their96initial states. This is called by ``__init__``, so you only need to97call this directly in between analyses of different documents.98"""99self.result = {'encoding': None, 'confidence': 0.0, 'language': None}100self.done = False101self._got_data = False102self._has_win_bytes = False103self._input_state = InputState.PURE_ASCII104self._last_char = b''105if self._esc_charset_prober:106self._esc_charset_prober.reset()107for prober in self._charset_probers:108prober.reset()109110def feed(self, byte_str):111"""112Takes a chunk of a document and feeds it through all of the relevant113charset probers.114115After calling ``feed``, you can check the value of the ``done``116attribute to see if you need to continue feeding the117``UniversalDetector`` more data, or if it has made a prediction118(in the ``result`` attribute).119120.. note::121You should always call ``close`` when you're done feeding in your122document if ``done`` is not already ``True``.123"""124if self.done:125return126127if not len(byte_str):128return129130if not isinstance(byte_str, bytearray):131byte_str = bytearray(byte_str)132133# First check for known BOMs, since these are guaranteed to be correct134if not self._got_data:135# If the data starts with BOM, we know it is UTF136if byte_str.startswith(codecs.BOM_UTF8):137# EF BB BF UTF-8 with BOM138self.result = {'encoding': "UTF-8-SIG",139'confidence': 1.0,140'language': ''}141elif byte_str.startswith((codecs.BOM_UTF32_LE,142codecs.BOM_UTF32_BE)):143# FF FE 00 00 UTF-32, little-endian BOM144# 00 00 FE FF UTF-32, big-endian BOM145self.result = {'encoding': "UTF-32",146'confidence': 1.0,147'language': ''}148elif byte_str.startswith(b'\xFE\xFF\x00\x00'):149# FE FF 00 00 UCS-4, unusual octet order BOM (3412)150self.result = {'encoding': "X-ISO-10646-UCS-4-3412",151'confidence': 1.0,152'language': ''}153elif byte_str.startswith(b'\x00\x00\xFF\xFE'):154# 00 00 FF FE UCS-4, unusual octet order BOM (2143)155self.result = {'encoding': "X-ISO-10646-UCS-4-2143",156'confidence': 1.0,157'language': ''}158elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):159# FF FE UTF-16, little endian BOM160# FE FF UTF-16, big endian BOM161self.result = {'encoding': "UTF-16",162'confidence': 1.0,163'language': ''}164165self._got_data = True166if self.result['encoding'] is not None:167self.done = True168return169170# If none of those matched and we've only see ASCII so far, check171# for high bytes and escape sequences172if self._input_state == InputState.PURE_ASCII:173if self.HIGH_BYTE_DETECTOR.search(byte_str):174self._input_state = InputState.HIGH_BYTE175elif self._input_state == InputState.PURE_ASCII and \176self.ESC_DETECTOR.search(self._last_char + byte_str):177self._input_state = InputState.ESC_ASCII178179self._last_char = byte_str[-1:]180181# If we've seen escape sequences, use the EscCharSetProber, which182# uses a simple state machine to check for known escape sequences in183# HZ and ISO-2022 encodings, since those are the only encodings that184# use such sequences.185if self._input_state == InputState.ESC_ASCII:186if not self._esc_charset_prober:187self._esc_charset_prober = EscCharSetProber(self.lang_filter)188if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:189self.result = {'encoding':190self._esc_charset_prober.charset_name,191'confidence':192self._esc_charset_prober.get_confidence(),193'language':194self._esc_charset_prober.language}195self.done = True196# If we've seen high bytes (i.e., those with values greater than 127),197# we need to do more complicated checks using all our multi-byte and198# single-byte probers that are left. The single-byte probers199# use character bigram distributions to determine the encoding, whereas200# the multi-byte probers use a combination of character unigram and201# bigram distributions.202elif self._input_state == InputState.HIGH_BYTE:203if not self._charset_probers:204self._charset_probers = [MBCSGroupProber(self.lang_filter)]205# If we're checking non-CJK encodings, use single-byte prober206if self.lang_filter & LanguageFilter.NON_CJK:207self._charset_probers.append(SBCSGroupProber())208self._charset_probers.append(Latin1Prober())209for prober in self._charset_probers:210if prober.feed(byte_str) == ProbingState.FOUND_IT:211self.result = {'encoding': prober.charset_name,212'confidence': prober.get_confidence(),213'language': prober.language}214self.done = True215break216if self.WIN_BYTE_DETECTOR.search(byte_str):217self._has_win_bytes = True218219def close(self):220"""221Stop analyzing the current document and come up with a final222prediction.223224:returns: The ``result`` attribute, a ``dict`` with the keys225`encoding`, `confidence`, and `language`.226"""227# Don't bother with checks if we're already done228if self.done:229return self.result230self.done = True231232if not self._got_data:233self.logger.debug('no data received!')234235# Default to ASCII if it is all we've seen so far236elif self._input_state == InputState.PURE_ASCII:237self.result = {'encoding': 'ascii',238'confidence': 1.0,239'language': ''}240241# If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD242elif self._input_state == InputState.HIGH_BYTE:243prober_confidence = None244max_prober_confidence = 0.0245max_prober = None246for prober in self._charset_probers:247if not prober:248continue249prober_confidence = prober.get_confidence()250if prober_confidence > max_prober_confidence:251max_prober_confidence = prober_confidence252max_prober = prober253if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):254charset_name = max_prober.charset_name255lower_charset_name = max_prober.charset_name.lower()256confidence = max_prober.get_confidence()257# Use Windows encoding name instead of ISO-8859 if we saw any258# extra Windows-specific bytes259if lower_charset_name.startswith('iso-8859'):260if self._has_win_bytes:261charset_name = self.ISO_WIN_MAP.get(lower_charset_name,262charset_name)263self.result = {'encoding': charset_name,264'confidence': confidence,265'language': max_prober.language}266267# Log all prober confidences if none met MINIMUM_THRESHOLD268if self.logger.getEffectiveLevel() == logging.DEBUG:269if self.result['encoding'] is None:270self.logger.debug('no probers hit minimum threshold')271for group_prober in self._charset_probers:272if not group_prober:273continue274if isinstance(group_prober, CharSetGroupProber):275for prober in group_prober.probers:276self.logger.debug('%s %s confidence = %s',277prober.charset_name,278prober.language,279prober.get_confidence())280else:281self.logger.debug('%s %s confidence = %s',282prober.charset_name,283prober.language,284prober.get_confidence())285return self.result286287288