1、在solrconfig.xml中增加
- <queryParser name="imdismax"
- class="com.szhtp.search.parse.IMDisMaxQParserPlugin" />
- <requestHandler name="imdismax" class="solr.SearchHandler">
- <lst name="defaults">
- <str name="defType">imdismax</str>
- <!-- 查询关键字和设置权重 -->
- <str name="qf">keywords^3 question^2 answer^0.4</str>
- <str name="pf">keywords^3 question^2 answer^0.4</str>
- <str name="mm">2<-1 5<-2 6<90%</str>
- <!-- 输出时显示那些字段 -->
- <str name="fl">id,answer,score</str>
- <!-- 设置PhraseSlop的坡度 -->
- <int name="ps">100</int>
- <str name="hl.fl">answer</str>
- <!-- 默认查询语句用于容错处理 -->
- <str name="q.alt">*:*</str>
- <!-- 每个snippet返回的最大字符数。默认是100.如果为0,那么该字段不会被fragmented且整个字段的值会被返回。大字段时不会这么做。
- 这里的answer=<str name="hl.fl">answer</str>
- -->
- <str name="f.answer.hl.fragsize">50</str>
- <!-- instructs Solr to return the field itself if no query terms are
- found -->
- <!-- 如果没有生成snippet(没有terms 匹配),那么使用另一个字段值作为返回 -->
- <str name="f.answer.hl.alternateField">answer</str>
- <!-- 这个是solr制定fragment算法的扩展点。gap是默认值。
- regex是另一种选项,这种选项指明highlight的边界由一个正则表达式确定。
- 这是一种非典型的高级选项。为了知道默认设置和fragmenters (and formatters)是如何配置的,
- 可以看看solrconfig.xml中的highlight段 -->
- <str name="f.answer.hl.fragmenter">regex</str>
- <str name="tie">0.1</str>
- </lst>
- </requestHandler>
2、创建类IMDisMaxQParserPlugin.java
- public class IMDisMaxQParserPlugin extends QParserPlugin
- {
- public static String NAME = "imdismax";
- public void init(NamedList args) {
- }
- public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
- return new IMDisMaxQParser(qstr, localParams, params, req);
- }
- }
3、创建类IMDisMaxQParser.java 可以在里面增加中文分词
- public class IMDisMaxQParser extends DisMaxQParser
- {
- public IMDisMaxQParser(String qstr, SolrParams localParams,
- SolrParams params, SolrQueryRequest req)
- {
- super(qstr, localParams, params, req);
- if (null == this.qstr)
- {
- return;
- }
- Analyzer analyzer = req.getSchema().getQueryAnalyzer();
- if (null == analyzer)
- {
- return;
- }
- StringBuilder norm = new StringBuilder();
- try
- {
- //对question分词
- TokenStream tokens = analyzer.reusableTokenStream("question",
- new StringReader(this.qstr));
- tokens.reset();
- Token token = tokens.next();
- while (token != null)
- {
- norm.append(
- new String(token.termBuffer(), 0, token.termLength()))
- .append(" ");
- token = tokens.next();
- }
- }
- catch (Exception ex)
- {
- }
- if (norm.length() > 0)
- this.qstr = norm.toString();
- }
- }