diff options
author | Andreas Gohr <andi@splitbrain.org> | 2005-09-25 19:54:51 +0200 |
---|---|---|
committer | Andreas Gohr <andi@splitbrain.org> | 2005-09-25 19:54:51 +0200 |
commit | 93a60ad223af9f649d62d2acbdffd22ce5ad1b97 (patch) | |
tree | 16e6bfbea21c90fa3fc625b0d3cc5ee4d14692bd /inc/indexer.php | |
parent | 134f4ab222dddaf47588c908d0c2b81a3da2b76b (diff) | |
download | rpg-93a60ad223af9f649d62d2acbdffd22ce5ad1b97.tar.gz rpg-93a60ad223af9f649d62d2acbdffd22ce5ad1b97.tar.bz2 |
asian language support for the indexer #563
Asian languages do not use spaces to seperate words. The indexer however does
a word based lookup. Splitting for example Japanese texts into real words is
only possible with complicated natural language processing, something
completely out of scope for DokuWiki.
This patch solves the problem by treating all asian characters as single
words. When an asian word (consisting of multiple characters) is searched it
is treated as a phrase search, looking up each charcter by it self first,
then checking for the phrase in found documents.
darcs-hash:20050925175451-7ad00-933b33b51b5f2fa05e736c18b8db58a5fdbf41ce.gz
Diffstat (limited to 'inc/indexer.php')
-rw-r--r-- | inc/indexer.php | 19 |
1 files changed, 18 insertions, 1 deletions
diff --git a/inc/indexer.php b/inc/indexer.php index 6ece84d7b..a8511b1ee 100644 --- a/inc/indexer.php +++ b/inc/indexer.php @@ -12,6 +12,19 @@ require_once(DOKU_INC.'inc/utf8.php'); require_once(DOKU_INC.'inc/parserutils.php'); +// Asian characters are handled as words. The following regexp defines the +// Unicode-Ranges for Asian characters +// Ranges taken from http://en.wikipedia.org/wiki/Unicode_block +// I'm no language expert. If you think some ranges are wrongly chosen or +// a range is missing, please contact me +define(IDX_ASIAN,'['. + '\x{0E00}-\x{0E7F}'. // Thai + '\x{2E80}-\x{D7AF}'. // CJK -> Hangul + '\x{F900}-\x{FAFF}'. // CJK Compatibility Ideographs + '\x{FE30}-\x{FE4F}'. // CJK Compatibility Forms + ']'); + + /** * Split a page into words * @@ -37,9 +50,10 @@ function idx_getPageWords($page){ $words = array(); foreach ($tokens as $word => $count) { - // simple filter to restrict use of utf8_stripspecials if (preg_match('/[^0-9A-Za-z]/u', $word)) { + // handle asian chars as single words + $word = preg_replace('/('.IDX_ASIAN.')/u','\1 ',$word); $arr = explode(' ', utf8_stripspecials($word,' ','._\-:')); $arr = array_count_values($arr); @@ -312,6 +326,9 @@ function idx_tokenizer($string,&$stopwords){ $words = array(); if(preg_match('/[^0-9A-Za-z]/u', $string)){ + #handle asian chars as single words + $string = preg_replace('/('.IDX_ASIAN.')/u','\1 ',$string); + $arr = explode(' ', utf8_stripspecials($string,' ','._\-:')); foreach ($arr as $w) { if (!is_numeric($w) && strlen($w) < 3) continue; |