group即分組,類似sql裡的group by功能,lucene中分組是通過内置的幾種collector結果集收集器實作的,有關group的結果集收集器都在org.apache.lucene.search.grouping包及其子包下,

包含group關鍵字的collector都是有關group分組的結果收集器,如果你隻需要統計如下這些分組資訊:
/** 所有組的數量 */
int totalgroupcount = 0;
/** 所有滿足條件的記錄數 */
int totalhitcount = 0;
/** 所有組内的滿足條件的記錄數(通常該值與totalhitcount是一緻的) */
int totalgroupedhitcount = -1;
則直接使用firstpassgroupingcollector收集器即可,如果你需要統計每個分組内部的命中總數以及命中索引文檔的評分等資訊,則需要使用secondpassgroupingcollector,為了提高第二次查詢的效率,你可以使用cachecollector來緩存第一次查詢結果,這樣第二次就直接從緩存中擷取第一次查詢結果,為了統計總的分組數量,你可能還需要使用allgroupscollector結果收集器。常用的結果收集器就這幾個。
下面是一個group分組使用示例,具體詳細說明請看代碼裡面的注釋:
package com.yida.framework.lucene5.group;
import java.io.ioexception;
import java.nio.file.paths;
import java.util.arraylist;
import java.util.collection;
import java.util.hashmap;
import java.util.iterator;
import java.util.list;
import java.util.random;
import org.apache.lucene.analysis.analyzer;
import org.apache.lucene.analysis.standard.standardanalyzer;
import org.apache.lucene.document.document;
import org.apache.lucene.document.field;
import org.apache.lucene.document.field.index;
import org.apache.lucene.document.field.store;
import org.apache.lucene.document.sorteddocvaluesfield;
import org.apache.lucene.document.textfield;
import org.apache.lucene.index.directoryreader;
import org.apache.lucene.index.indexreader;
import org.apache.lucene.index.indexwriter;
import org.apache.lucene.index.indexwriterconfig;
import org.apache.lucene.index.indexwriterconfig.openmode;
import org.apache.lucene.index.term;
import org.apache.lucene.queries.function.valuesource;
import org.apache.lucene.queries.function.valuesource.bytesreffieldsource;
import org.apache.lucene.search.cachingcollector;
import org.apache.lucene.search.collector;
import org.apache.lucene.search.indexsearcher;
import org.apache.lucene.search.multicollector;
import org.apache.lucene.search.query;
import org.apache.lucene.search.scoredoc;
import org.apache.lucene.search.simplecollector;
import org.apache.lucene.search.sort;
import org.apache.lucene.search.termquery;
import org.apache.lucene.search.grouping.abstractallgroupscollector;
import org.apache.lucene.search.grouping.abstractfirstpassgroupingcollector;
import org.apache.lucene.search.grouping.abstractsecondpassgroupingcollector;
import org.apache.lucene.search.grouping.groupdocs;
import org.apache.lucene.search.grouping.searchgroup;
import org.apache.lucene.search.grouping.topgroups;
import org.apache.lucene.search.grouping.function.functionallgroupscollector;
import org.apache.lucene.search.grouping.function.functionfirstpassgroupingcollector;
import org.apache.lucene.search.grouping.function.functionsecondpassgroupingcollector;
import org.apache.lucene.search.grouping.term.termallgroupscollector;
import org.apache.lucene.search.grouping.term.termfirstpassgroupingcollector;
import org.apache.lucene.search.grouping.term.termsecondpassgroupingcollector;
import org.apache.lucene.store.directory;
import org.apache.lucene.store.fsdirectory;
import org.apache.lucene.util.bytesref;
import org.apache.lucene.util.mutable.mutablevalue;
import org.apache.lucene.util.mutable.mutablevaluestr;
import com.yida.framework.lucene5.util.tools;
/**
* lucene分組測試
* @author lanxiaowei
*
*/
public class grouptest {
/** 索引目錄 */
private static final string indexdir = "c:/group-index";
/** 分詞器 */
private static analyzer analyzer = new standardanalyzer();
/** 分組域 */
private static string groupfield = "author";
public static void main(string[] args) throws exception {
// 建立測試索引
// createindex();
directory directory = fsdirectory.open(paths.get(indexdir));
indexreader reader = directoryreader.open(directory);
indexsearcher searcher = new indexsearcher(reader);
query query = new termquery(new term("content", "random"));
/**每個分組内部的排序規則*/
sort groupsort = sort.relevance;
groupby(searcher, query, groupsort);
//groupsearch(searcher);
}
public static void groupby(indexsearcher searcher, query query, sort groupsort)
throws ioexception {
/** 前n條中分組 */
int topngroups = 10;
/** 分組起始偏移量 */
int groupoffset = 0;
/** 是否填充searchgroup的sortvalues */
boolean fillfields = true;
/** groupsort用于對組進行排序,docsort用于對組内記錄進行排序,多數情況下兩者是相同的,但也可不同 */
sort docsort = groupsort;
/** 用于組内分頁,起始偏移量 */
int docoffset = 0;
/** 每組傳回多少條結果 */
int docspergroup = 2;
/** 是否需要計算總的分組數量 */
boolean requiredtotalgroupcount = true;
/** 是否需要緩存評分 */
boolean cachescores = true;
termfirstpassgroupingcollector c1 = new termfirstpassgroupingcollector(
"author", groupsort, groupoffset + topngroups);
//第一次查詢緩存容量的大小:設定為16m
double maxcacherammb = 16.0;
/** 将termfirstpassgroupingcollector包裝成cachingcollector,為第一次查詢加緩存,避免重複評分
* cachingcollector就是用來為結果收集器添加緩存功能的
*/
cachingcollector cachedcollector = cachingcollector.create(c1,
cachescores, maxcacherammb);
// 開始第一次分組統計
searcher.search(query, cachedcollector);
/**第一次查詢傳回的結果集topgroups中隻有分組域值以及每組總的評分,至于每個分組裡有幾條,分别哪些索引文檔,則需要進行第二次查詢擷取*/
collection<searchgroup<bytesref>> topgroups = c1.gettopgroups(
groupoffset, fillfields);
if (topgroups == null) {
system.out.println("no groups matched ");
return;
}
collector secondpasscollector = null;
// 是否擷取每個分組内部每個索引的評分
boolean getscores = true;
// 是否計算最大評分
boolean getmaxscores = true;
// 如果需要對lucene的score進行修正,則需要重載termsecondpassgroupingcollector
termsecondpassgroupingcollector c2 = new termsecondpassgroupingcollector(
"author", topgroups, groupsort, docsort, docoffset
+ docspergroup, getscores, getmaxscores, fillfields);
// 如果需要計算總的分組數量,則需要把termsecondpassgroupingcollector包裝成termallgroupscollector
// termallgroupscollector就是用來收集總分組數量的
termallgroupscollector allgroupscollector = null;
//若需要統計總的分組數量
if (requiredtotalgroupcount) {
allgroupscollector = new termallgroupscollector("author");
secondpasscollector = multicollector.wrap(c2, allgroupscollector);
} else {
secondpasscollector = c2;
/**如果第一次查詢已經加了緩存,則直接從緩存中取*/
if (cachedcollector.iscached()) {
// 第二次查詢直接從緩存中取
cachedcollector.replay(secondpasscollector);
// 開始第二次分組查詢
searcher.search(query, secondpasscollector);
/** 所有組的數量 */
int totalgroupcount = 0;
/** 所有滿足條件的記錄數 */
int totalhitcount = 0;
/** 所有組内的滿足條件的記錄數(通常該值與totalhitcount是一緻的) */
int totalgroupedhitcount = -1;
totalgroupcount = allgroupscollector.getgroupcount();
//列印總的分組數量
system.out.println("groupcount: " + totalgroupcount);
topgroups<bytesref> groupsresult = c2.gettopgroups(docoffset);
//這裡列印的3項資訊就是第一次查詢的統計結果
totalhitcount = groupsresult.totalhitcount;
totalgroupedhitcount = groupsresult.totalgroupedhitcount;
system.out.println("groupsresult.totalhitcount:" + totalhitcount);
system.out.println("groupsresult.totalgroupedhitcount:"
+ totalgroupedhitcount);
system.out.println("///////////////////////////////////////////////");
int groupidx = 0;
//下面列印的是第二次查詢的統計結果,如果你僅僅值需要第一次查詢的統計結果資訊,不需要每個分組内部的詳細資訊,則不需要進行第二次查詢,請知曉
// 疊代組
for (groupdocs<bytesref> groupdocs : groupsresult.groups) {
groupidx++;
string groupvl = groupdocs.groupvalue == null ? "分組域的域值為空" : new string(groupdocs.groupvalue.bytes);
// 分組域的域值,groupidx表示組的索引即第幾組
system.out.println("group[" + groupidx + "].groupfieldvalue:" + groupvl);
// 目前分組内命中的總記錄數
system.out
.println("group[" + groupidx + "].totalhits:" + groupdocs.totalhits);
int docidx = 0;
// 疊代組内的記錄
for (scoredoc scoredoc : groupdocs.scoredocs) {
docidx++;
// 列印分組内部每條記錄的索引文檔id及其評分
system.out.println("group[" + groupidx + "][" + docidx + "]{docid:score}:"
+ scoredoc.doc + "/" + scoredoc.score);
//根據docid可以擷取到整個document對象,通過doc.get(fieldname)可以擷取某個存儲域的域值
//注意searcher.doc根據docid傳回的document對象中不包含docvaluesfield域的域值,隻包含非docvaluesfield域的域值,請知曉
document doc = searcher.doc(scoredoc.doc);
system.out.println("group[" + groupidx + "][" + docidx + "]{docid:author}:"
+ doc.get("id") + ":" + doc.get("content"));
}
system.out.println("******************華麗且拉轟的分割線***********************");
public static void groupsearch(indexsearcher indexsearcher)
/** 第一次查詢隻有top n條記錄進行分組統計 */
final abstractfirstpassgroupingcollector<?> c1 = createrandomfirstpasscollector(
groupfield, groupsort, 10);
indexsearcher.search(new termquery(new term("content", "random")), c1);
/*
* final abstractsecondpassgroupingcollector<?> c2 =
* createsecondpasscollector( c1, groupfield, groupsort, null, 0, 5,
* true, true, true); indexsearcher.search(new termquery(new
* term("content", "random")), c2);
/** 第一個參數表示截取偏移量offset,截取[offset, offset+topn]範圍内的組 */
collection<?> groups = c1.gettopgroups(0, true);
system.out.println("group.size:" + groups.size());
for (object object : groups) {
searchgroup searchgroup = (searchgroup) object;
if (searchgroup.groupvalue != null) {
if (searchgroup.groupvalue.getclass().isassignablefrom(
bytesref.class)) {
string groupvl = new string(
(((bytesref) searchgroup.groupvalue)).bytes);
if (groupvl.equals("")) {
system.out.println("該分組不包含分組域");
} else {
system.out.println(groupvl);
}
} else if (searchgroup.groupvalue.getclass().isassignablefrom(
mutablevaluestr.class)) {
if (searchgroup.groupvalue.tostring().endswith("(null)")) {
system.out
.println(new string(
(((mutablevaluestr) searchgroup.groupvalue)).value
.bytes()));
}
} else {
system.out.println("該分組不包含分組域");
for (int i = 0; i < searchgroup.sortvalues.length; i++) {
system.out.println("searchgroup.sortvalues:"
+ searchgroup.sortvalues[i]);
* system.out.println("groups.maxscore:" + groups.maxscore);
* system.out.println("groups.totalhitcount:" + groups.totalhitcount);
* system.out.println("groups.totalgroupedhitcount:" +
* groups.totalgroupedhitcount); system.out.println("groups.length:" +
* groups.groups.length); system.out.println("");
*
* groupdocs<?> group = groups.groups[0]; comparegroupvalue("author3",
* group); system.out.println(group.scoredocs.length);
/**
* 建立測試用的索引文檔
*
* @throws ioexception
*/
public static void createindex() throws ioexception {
directory dir = fsdirectory.open(paths.get(indexdir));
indexwriterconfig indexwriterconfig = new indexwriterconfig(analyzer);
indexwriterconfig.setopenmode(openmode.create_or_append);
indexwriter writer = new indexwriter(dir, indexwriterconfig);
adddocuments(groupfield, writer);
* 添加索引文檔
* @param groupfield
* @param writer
public static void adddocuments(string groupfield, indexwriter writer)
// 0
document doc = new document();
addgroupfield(doc, groupfield, "author1");
doc.add(new textfield("content", "random text", field.store.yes));
doc.add(new field("id", "1", store.yes, index.not_analyzed));
writer.adddocument(doc);
// 1
doc = new document();
doc.add(new textfield("content", "some more random text",
field.store.yes));
doc.add(new field("id", "2", store.yes, index.not_analyzed));
// 2
doc.add(new textfield("content", "some more random textual data",
doc.add(new field("id", "3", store.yes, index.not_analyzed));
// 3
addgroupfield(doc, groupfield, "author2");
doc.add(new textfield("content", "some random text", field.store.yes));
doc.add(new field("id", "4", store.yes, index.not_analyzed));
// 4
addgroupfield(doc, groupfield, "author3");
doc.add(new field("id", "5", store.yes, index.not_analyzed));
// 5
doc.add(new textfield("content", "random", field.store.yes));
doc.add(new field("id", "6", store.yes, index.not_analyzed));
// 6 -- no author field
doc.add(new textfield("content",
"random word stuck in alot of other text", field.store.yes));
writer.commit();
writer.close();
* 判斷域值是否與分組域值相等
* @param expected
* @param group
private static void comparegroupvalue(string expected, groupdocs<?> group) {
if (expected == null) {
if (group.groupvalue == null) {
return;
} else if (group.groupvalue.getclass().isassignablefrom(
mutablevaluestr.class)) {
} else if (((bytesref) group.groupvalue).length == 0) {
if (group.groupvalue.getclass().isassignablefrom(bytesref.class)) {
system.out.println("expected == groupvalue?"
+ new bytesref(expected) == group.groupvalue);
} else if (group.groupvalue.getclass().isassignablefrom(
mutablevaluestr.class)) {
mutablevaluestr v = new mutablevaluestr();
v.value.copychars(expected);
.println("expected == groupvalue?" + v == group.groupvalue);
* 建立firstpasscollector首次檢索
* @param groupsort
* @param topdocs
* @param firstpassgroupingcollector
* @return
private abstractfirstpassgroupingcollector<?> createfirstpasscollector(
string groupfield, sort groupsort, int topdocs,
abstractfirstpassgroupingcollector<?> firstpassgroupingcollector)
if (termfirstpassgroupingcollector.class
.isassignablefrom(firstpassgroupingcollector.getclass())) {
valuesource vs = new bytesreffieldsource(groupfield);
return new functionfirstpassgroupingcollector(vs, new hashmap(),
groupsort, topdocs);
return new termfirstpassgroupingcollector(groupfield, groupsort,
topdocs);
private static abstractfirstpassgroupingcollector<?> createrandomfirstpasscollector(
string groupfield, sort groupsort, int topdocs) throws ioexception {
abstractfirstpassgroupingcollector<?> selected;
// boolean flag = new random().nextboolean();
if (false) {
// functionfirstpassgroupingcollector差別是對于分組域的值采用mutablevaluestr進行存儲,
// mutablevaluestr内部維護的是一個bytesrefbuilder,bytesrefbuilder内部有一個grow函數,會自動
// 擴充内部byte[]容量,而bytesref是定長的buffer
selected = new functionfirstpassgroupingcollector(vs,
new hashmap(), groupsort, topdocs);
// termfirstpassgroupingcollector适用于你的分組域是一個非docvaluesfield
selected = new termfirstpassgroupingcollector(groupfield,
return selected;
private static <t> abstractsecondpassgroupingcollector<t> createsecondpasscollector(
abstractfirstpassgroupingcollector firstpassgroupingcollector,
string groupfield, sort groupsort, sort sortwithingroup,
int groupoffset, int maxdocspergroup, boolean getscores,
boolean getmaxscores, boolean fillsortfields) throws ioexception {
collection<searchgroup<bytesref>> searchgroups = firstpassgroupingcollector
.gettopgroups(groupoffset, fillsortfields);
return (abstractsecondpassgroupingcollector) new termsecondpassgroupingcollector(
groupfield, searchgroups, groupsort, sortwithingroup,
maxdocspergroup, getscores, getmaxscores, fillsortfields);
collection<searchgroup<mutablevalue>> searchgroups = firstpassgroupingcollector
return (abstractsecondpassgroupingcollector) new functionsecondpassgroupingcollector(
searchgroups, groupsort, sortwithingroup, maxdocspergroup,
getscores, getmaxscores, fillsortfields, vs, new hashmap());
// basically converts searchgroups from mutablevalue to bytesref if grouping
// by valuesource
@suppresswarnings("unchecked")
private abstractsecondpassgroupingcollector<?> createsecondpasscollector(
abstractfirstpassgroupingcollector<?> firstpassgroupingcollector,
string groupfield, collection<searchgroup<bytesref>> searchgroups,
sort groupsort, sort sortwithingroup, int maxdocspergroup,
boolean getscores, boolean getmaxscores, boolean fillsortfields)
if (firstpassgroupingcollector.getclass().isassignablefrom(
termfirstpassgroupingcollector.class)) {
return new termsecondpassgroupingcollector(groupfield,
getscores, getmaxscores, fillsortfields);
list<searchgroup<mutablevalue>> mvalsearchgroups = new arraylist<searchgroup<mutablevalue>>(
searchgroups.size());
for (searchgroup<bytesref> mergedtopgroup : searchgroups) {
searchgroup<mutablevalue> sg = new searchgroup();
mutablevaluestr groupvalue = new mutablevaluestr();
if (mergedtopgroup.groupvalue != null) {
groupvalue.value.copybytes(mergedtopgroup.groupvalue);
} else {
groupvalue.exists = false;
sg.groupvalue = groupvalue;
sg.sortvalues = mergedtopgroup.sortvalues;
mvalsearchgroups.add(sg);
return new functionsecondpassgroupingcollector(mvalsearchgroups,
groupsort, sortwithingroup, maxdocspergroup, getscores,
getmaxscores, fillsortfields, vs, new hashmap());
private abstractallgroupscollector<?> createallgroupscollector(
string groupfield) {
return new termallgroupscollector(groupfield);
return new functionallgroupscollector(vs, new hashmap());
* 添加分組域
* @param doc
* 索引文檔
* 需要分組的域名稱
* @param value
* 域值
private static void addgroupfield(document doc, string groupfield,
string value) {
doc.add(new sorteddocvaluesfield(groupfield, new bytesref(value)));
}
最近本人身體出了點小狀況,人不太舒服,就不多說了,大家看看示例代碼自己了解了解,裡面注釋我寫的很詳細了,如果你們有哪裡看不懂,qq上聯系我。demo源碼在底下的附件裡,請知曉!
若你還有什麼疑問,請加我Q-Q:7-3-6-0-3-1-3-0-5,或者加裙:
,歡迎你加入一起交流學習。
轉載:http://iamyida.iteye.com/blog/2202651