summaryrefslogtreecommitdiffstats
path: root/src/plugins/fts-lucene/Snowball.cc
blob: 43b54e36e9fadae1a140e43f6bb28778d0c3ed28 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
/*------------------------------------------------------------------------------
* Copyright (C) 2003-2006 Ben van Klinken and the CLucene Team
*
* Distributable under the terms of either the Apache License (Version 2.0) or
* the GNU Lesser General Public License, as specified in the COPYING file.
------------------------------------------------------------------------------*/
#include <CLucene.h>
#include "SnowballAnalyzer.h"
#include "SnowballFilter.h"
#include <CLucene/util/CLStreams.h>
#include <CLucene/analysis/Analyzers.h>
#include <CLucene/analysis/standard/StandardTokenizer.h>
#include <CLucene/analysis/standard/StandardFilter.h>

extern "C" {
#include "lib.h"
#include "buffer.h"
#include "unichar.h"
#include "lucene-wrapper.h"
};

CL_NS_USE(analysis)
CL_NS_USE(util)
CL_NS_USE2(analysis,standard)

CL_NS_DEF2(analysis,snowball)

  /** Builds the named analyzer with no stop words. */
  SnowballAnalyzer::SnowballAnalyzer(normalizer_func_t *_normalizer, const char* _language)
      : language(i_strdup(_language)),
	normalizer(_normalizer),
	stopSet(NULL),
	prevstream(NULL)
  {
  }

  SnowballAnalyzer::~SnowballAnalyzer()
  {
      if (prevstream)
	  _CLDELETE(prevstream);
      i_free(language);
      if ( stopSet != NULL )
	  _CLDELETE(stopSet);
  }

  /** Builds the named analyzer with the given stop words.
  */
  SnowballAnalyzer::SnowballAnalyzer(const char* language, const TCHAR** stopWords)
      : language(i_strdup(language)),
	normalizer(NULL),
	stopSet(_CLNEW CLTCSetList(true)),
	prevstream(NULL)
  {
      StopFilter::fillStopTable(stopSet,stopWords);
  }

  TokenStream* SnowballAnalyzer::tokenStream(const TCHAR* fieldName, CL_NS(util)::Reader* reader) {
	 return this->tokenStream(fieldName,reader,false);
  }

  /** Constructs a {@link StandardTokenizer} filtered by a {@link
      StandardFilter}, a {@link LowerCaseFilter} and a {@link StopFilter}. */
  TokenStream* SnowballAnalyzer::tokenStream(const TCHAR* fieldName, CL_NS(util)::Reader* reader, bool deleteReader) {
		BufferedReader* bufferedReader = reader->__asBufferedReader();
		TokenStream* result;

		if ( bufferedReader == NULL )
			result =  _CLNEW StandardTokenizer( _CLNEW FilteredBufferedReader(reader, deleteReader), true );
		else
			result = _CLNEW StandardTokenizer(bufferedReader, deleteReader);

	 result = _CLNEW StandardFilter(result, true);
    result = _CLNEW CL_NS(analysis)::LowerCaseFilter(result, true);
    if (stopSet != NULL)
      result = _CLNEW CL_NS(analysis)::StopFilter(result, true, stopSet);
    result = _CLNEW SnowballFilter(result, normalizer, language, true);
    return result;
  }
  
  TokenStream* SnowballAnalyzer::reusableTokenStream(const TCHAR* fieldName, CL_NS(util)::Reader* reader) {
      if (prevstream) _CLDELETE(prevstream);
      prevstream = this->tokenStream(fieldName, reader);
      return prevstream;
  }
  
  
  
  
  
  
    /** Construct the named stemming filter.
   *
   * @param in the input tokens to stem
   * @param name the name of a stemmer
   */
	SnowballFilter::SnowballFilter(TokenStream* in, normalizer_func_t *normalizer, const char* language, bool deleteTS):
		TokenFilter(in,deleteTS)
	{
		stemmer = sb_stemmer_new(language, NULL); //use utf8 encoding
		this->normalizer = normalizer;

		if ( stemmer == NULL ){
			_CLTHROWA(CL_ERR_IllegalArgument, "language not available for stemming\n"); //todo: richer error
		}
    }

	SnowballFilter::~SnowballFilter(){
		sb_stemmer_delete(stemmer);
	}

  /** Returns the next input Token, after being stemmed */
  Token* SnowballFilter::next(Token* token){
    if (input->next(token) == NULL)
      return NULL;

	unsigned char utf8text[LUCENE_MAX_WORD_LEN*5+1];
	unsigned int len = I_MIN(LUCENE_MAX_WORD_LEN, token->termLength());

	buffer_t buf = { { 0, 0 } };
	i_assert(sizeof(wchar_t) == sizeof(unichar_t));
	buffer_create_from_data(&buf, utf8text, sizeof(utf8text));
	uni_ucs4_to_utf8((const unichar_t *)token->termBuffer(), len, &buf);

    const sb_symbol* stemmed = sb_stemmer_stem(stemmer, utf8text, buf.used);
	if ( stemmed == NULL )
		_CLTHROWA(CL_ERR_Runtime,"Out of memory");

	int stemmedLen=sb_stemmer_length(stemmer);

	if (normalizer == NULL) {
	  unsigned int tchartext_size =
			  uni_utf8_strlen_n(stemmed, stemmedLen) + 1;
	  TCHAR tchartext[tchartext_size];
	  lucene_utf8_n_to_tchar(stemmed, stemmedLen, tchartext, tchartext_size);
	  token->set(tchartext,token->startOffset(), token->endOffset(), token->type());
	} else T_BEGIN {
	  buffer_t *norm_buf = t_buffer_create(stemmedLen);
	  normalizer(stemmed, stemmedLen, norm_buf);

	  unsigned int tchartext_size =
			  uni_utf8_strlen_n(norm_buf->data, norm_buf->used) + 1;
	  TCHAR tchartext[tchartext_size];
	  lucene_utf8_n_to_tchar((const unsigned char *)norm_buf->data,
							 norm_buf->used, tchartext, tchartext_size);
	  token->set(tchartext,token->startOffset(), token->endOffset(), token->type());
	} T_END;
	return token;
  }


CL_NS_END2