1072
// in solrj/src/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
Override
public void connect(String serverAddress, int timeout, Watcher watcher, ZkUpdate updater) throws IOException, InterruptedException, TimeoutException {
updater.update(new SolrZooKeeper(serverAddress, timeout, watcher));
}
// in solrj/src/java/org/apache/solr/common/cloud/DefaultConnectionStrategy.java
Override
public void reconnect(final String serverAddress, final int zkClientTimeout,
final Watcher watcher, final ZkUpdate updater) throws IOException {
log.info("Connection expired - starting a new one...");
try {
updater
.update(new SolrZooKeeper(serverAddress, zkClientTimeout, watcher));
log.info("Reconnected to ZooKeeper");
} catch (Exception e) {
SolrException.log(log, "Reconnect to ZooKeeper failed", e);
log.info("Reconnect to ZooKeeper failed");
}
}
// in solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
public void makePath(String path, File file, boolean failOnExists, boolean retryOnConnLoss)
throws IOException, KeeperException, InterruptedException {
makePath(path, FileUtils.readFileToString(file).getBytes("UTF-8"),
CreateMode.PERSISTENT, null, failOnExists, retryOnConnLoss);
}
// in solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
public void makePath(String path, File file, boolean retryOnConnLoss) throws IOException,
KeeperException, InterruptedException {
makePath(path, FileUtils.readFileToString(file).getBytes("UTF-8"), retryOnConnLoss);
}
// in solrj/src/java/org/apache/solr/common/cloud/SolrZkClient.java
public void setData(String path, File file, boolean retryOnConnLoss) throws IOException,
KeeperException, InterruptedException {
if (log.isInfoEnabled()) {
log.info("Write to ZooKeepeer " + file.getAbsolutePath() + " to " + path);
}
String data = FileUtils.readFileToString(file);
setData(path, data.getBytes("UTF-8"), retryOnConnLoss);
}
// in solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
public synchronized void process(WatchedEvent event) {
if (log.isInfoEnabled()) {
log.info("Watcher " + this + " name:" + name + " got event " + event
+ " path:" + event.getPath() + " type:" + event.getType());
}
state = event.getState();
if (state == KeeperState.SyncConnected) {
connected = true;
clientConnected.countDown();
} else if (state == KeeperState.Expired) {
connected = false;
log.info("Attempting to reconnect to recover relationship with ZooKeeper...");
try {
connectionStrategy.reconnect(zkServerAddress, zkClientTimeout, this,
new ZkClientConnectionStrategy.ZkUpdate() {
@Override
public void update(SolrZooKeeper keeper)
throws InterruptedException, TimeoutException, IOException {
synchronized (connectionStrategy) {
waitForConnected(SolrZkClient.DEFAULT_CLIENT_CONNECT_TIMEOUT);
client.updateKeeper(keeper);
if (onReconnect != null) {
onReconnect.command();
}
synchronized (ConnectionManager.this) {
ConnectionManager.this.connected = true;
}
}
}
});
} catch (Exception e) {
SolrException.log(log, "", e);
}
log.info("Connected:" + connected);
}
// in solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
Override
public void update(SolrZooKeeper keeper)
throws InterruptedException, TimeoutException, IOException {
synchronized (connectionStrategy) {
waitForConnected(SolrZkClient.DEFAULT_CLIENT_CONNECT_TIMEOUT);
client.updateKeeper(keeper);
if (onReconnect != null) {
onReconnect.command();
}
synchronized (ConnectionManager.this) {
ConnectionManager.this.connected = true;
}
}
}
// in solrj/src/java/org/apache/solr/common/cloud/ConnectionManager.java
public synchronized void waitForConnected(long waitForConnection)
throws InterruptedException, TimeoutException, IOException {
long expire = System.currentTimeMillis() + waitForConnection;
long left = waitForConnection;
while (!connected && left > 0) {
wait(left);
left = expire - System.currentTimeMillis();
}
if (!connected) {
throw new TimeoutException("Could not connect to ZooKeeper " + zkServerAddress + " within " + waitForConnection + " ms");
}
}
// in solrj/src/java/org/apache/solr/common/util/StrUtils.java
public static void partialURLEncodeVal(Appendable dest, String val) throws IOException {
for (int i=0; i<val.length(); i++) {
char ch = val.charAt(i);
if (ch < 32) {
dest.append('%');
if (ch < 0x10) dest.append('0');
dest.append(Integer.toHexString(ch));
} else {
switch (ch) {
case ' ': dest.append('+'); break;
case '&': dest.append("%26"); break;
case '%': dest.append("%25"); break;
case '=': dest.append("%3D"); break;
case '+': dest.append("%2B"); break;
default : dest.append(ch); break;
}
}
}
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
public InputStream getStream() throws IOException {
URLConnection conn = this.url.openConnection();
contentType = conn.getContentType();
name = url.toExternalForm();
size = new Long( conn.getContentLength() );
return conn.getInputStream();
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
public InputStream getStream() throws IOException {
return new FileInputStream( file );
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
Override
public Reader getReader() throws IOException {
String charset = getCharsetFromContentType( contentType );
return charset == null
? new FileReader( file )
: new InputStreamReader( getStream(), charset );
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
public InputStream getStream() throws IOException {
return new ByteArrayInputStream( str.getBytes(DEFAULT_CHARSET) );
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
Override
public Reader getReader() throws IOException {
String charset = getCharsetFromContentType( contentType );
return charset == null
? new StringReader( str )
: new InputStreamReader( getStream(), charset );
}
// in solrj/src/java/org/apache/solr/common/util/ContentStreamBase.java
public Reader getReader() throws IOException {
String charset = getCharsetFromContentType( getContentType() );
return charset == null
? new InputStreamReader( getStream(), DEFAULT_CHARSET )
: new InputStreamReader( getStream(), charset );
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public static void escapeCharData(String str, Writer out) throws IOException {
escape(str, out, chardata_escapes);
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public static void escapeAttributeValue(String str, Writer out) throws IOException {
escape(str, out, attribute_escapes);
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public static void escapeAttributeValue(char [] chars, int start, int length, Writer out) throws IOException {
escape(chars, start, length, out, attribute_escapes);
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public final static void writeXML(Writer out, String tag, String val) throws IOException {
out.write('<');
out.write(tag);
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public final static void writeUnescapedXML(Writer out, String tag, String val, Object... attrs) throws IOException {
out.write('<');
out.write(tag);
for (int i=0; i<attrs.length; i++) {
out.write(' ');
out.write(attrs[i++].toString());
out.write('=');
out.write('"');
out.write(attrs[i].toString());
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
out.write(val);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public final static void writeXML(Writer out, String tag, String val, Object... attrs) throws IOException {
out.write('<');
out.write(tag);
for (int i=0; i<attrs.length; i++) {
out.write(' ');
out.write(attrs[i++].toString());
out.write('=');
out.write('"');
escapeAttributeValue(attrs[i].toString(), out);
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
public static void writeXML(Writer out, String tag, String val, Map<String, String> attrs) throws IOException {
out.write('<');
out.write(tag);
for (Map.Entry<String, String> entry : attrs.entrySet()) {
out.write(' ');
out.write(entry.getKey());
out.write('=');
out.write('"');
escapeAttributeValue(entry.getValue(), out);
out.write('"');
}
if (val == null) {
out.write('/');
out.write('>');
} else {
out.write('>');
escapeCharData(val,out);
out.write('<');
out.write('/');
out.write(tag);
out.write('>');
}
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
private static void escape(char [] chars, int offset, int length, Writer out, String [] escapes) throws IOException{
for (int i=offset; i<length; i++) {
char ch = chars[i];
if (ch<escapes.length) {
String replacement = escapes[ch];
if (replacement != null) {
out.write(replacement);
continue;
}
}
out.write(ch);
}
}
// in solrj/src/java/org/apache/solr/common/util/XML.java
private static void escape(String str, Writer out, String[] escapes) throws IOException {
for (int i=0; i<str.length(); i++) {
char ch = str.charAt(i);
if (ch<escapes.length) {
String replacement = escapes[ch];
if (replacement != null) {
out.write(replacement);
continue;
}
}
out.write(ch);
}
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
Override
public int read() throws IOException {
if (pos >= end) {
refill();
if (pos >= end) return -1;
}
return buf[pos++] & 0xff;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int peek() throws IOException {
if (pos >= end) {
refill();
if (pos >= end) return -1;
}
return buf[pos] & 0xff;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int readUnsignedByte() throws IOException {
if (pos >= end) {
refill();
if (pos >= end) {
throw new EOFException();
}
}
return buf[pos++] & 0xff;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int readWrappedStream(byte[] target, int offset, int len) throws IOException {
return in.read(target, offset, len);
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public void refill() throws IOException {
// this will set end to -1 at EOF
end = readWrappedStream(buf, 0, buf.length);
if (end > 0) readFromStream += end;
pos = 0;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
Override
public int available() throws IOException {
return end - pos;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
Override
public int read(byte b[], int off, int len) throws IOException {
int r=0; // number of bytes we have read
// first read from our buffer;
if (end-pos > 0) {
r = Math.min(end-pos, len);
System.arraycopy(buf, pos, b, off, r);
pos += r;
}
if (r == len) return r;
// amount left to read is >= buffer size
if (len-r >= buf.length) {
int ret = readWrappedStream(b, off+r, len-r);
if (ret >= 0) {
readFromStream += ret;
r += ret;
return r;
} else {
// negative return code
return r > 0 ? r : -1;
}
}
refill();
// read rest from our buffer
if (end-pos > 0) {
int toRead = Math.min(end-pos, len-r);
System.arraycopy(buf, pos, b, off+r, toRead);
pos += toRead;
r += toRead;
return r;
}
return r > 0 ? r : -1;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
Override
public void close() throws IOException {
in.close();
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public void readFully(byte b[]) throws IOException {
readFully(b, 0, b.length);
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public void readFully(byte b[], int off, int len) throws IOException {
while (len>0) {
int ret = read(b, off, len);
if (ret==-1) {
throw new EOFException();
}
off += ret;
len -= ret;
}
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int skipBytes(int n) throws IOException {
if (end-pos >= n) {
pos += n;
return n;
}
if (end-pos<0) return -1;
int r = end-pos;
pos = end;
while (r < n) {
refill();
if (end-pos <= 0) return r;
int toRead = Math.min(end-pos, n-r);
r += toRead;
pos += toRead;
}
return r;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public boolean readBoolean() throws IOException {
return readByte()==1;
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public byte readByte() throws IOException {
if (pos >= end) {
refill();
if (pos >= end) throw new EOFException();
}
return buf[pos++];
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public short readShort() throws IOException {
return (short)((readUnsignedByte() << 8) | readUnsignedByte());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int readUnsignedShort() throws IOException {
return (readUnsignedByte() << 8) | readUnsignedByte();
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public char readChar() throws IOException {
return (char)((readUnsignedByte() << 8) | readUnsignedByte());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public int readInt() throws IOException {
return ((readUnsignedByte() << 24)
|(readUnsignedByte() << 16)
|(readUnsignedByte() << 8)
| readUnsignedByte());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public long readLong() throws IOException {
return (((long)readUnsignedByte()) << 56)
| (((long)readUnsignedByte()) << 48)
| (((long)readUnsignedByte()) << 40)
| (((long)readUnsignedByte()) << 32)
| (((long)readUnsignedByte()) << 24)
| (readUnsignedByte() << 16)
| (readUnsignedByte() << 8)
| (readUnsignedByte());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public float readFloat() throws IOException {
return Float.intBitsToFloat(readInt());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public double readDouble() throws IOException {
return Double.longBitsToDouble(readLong());
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public String readLine() throws IOException {
return new DataInputStream(this).readLine();
}
// in solrj/src/java/org/apache/solr/common/util/FastInputStream.java
public String readUTF() throws IOException {
return new DataInputStream(this).readUTF();
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
Override
public void write(int b) throws IOException {
write((byte)b);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
Override
public void write(byte b[]) throws IOException {
write(b,0,b.length);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void write(byte b) throws IOException {
if (pos >= buf.length) {
out.write(buf);
written += pos;
pos=0;
}
buf[pos++] = b;
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
Override
public void write(byte arr[], int off, int len) throws IOException {
int space = buf.length - pos;
if (len < space) {
System.arraycopy(arr, off, buf, pos, len);
pos += len;
} else if (len<buf.length) {
// if the data to write is small enough, buffer it.
System.arraycopy(arr, off, buf, pos, space);
out.write(buf);
written += buf.length;
pos = len-space;
System.arraycopy(arr, off+space, buf, 0, pos);
} else {
if (pos>0) {
out.write(buf,0,pos); // flush
written += pos;
pos=0;
}
// don't buffer, just write to sink
out.write(arr, off, len);
written += len;
}
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void reserve(int len) throws IOException {
if (len > (buf.length - pos))
flushBuffer();
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeBoolean(boolean v) throws IOException {
write(v ? 1:0);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeByte(int v) throws IOException {
write((byte)v);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeShort(int v) throws IOException {
write((byte)(v >>> 8));
write((byte)v);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeChar(int v) throws IOException {
writeShort(v);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeInt(int v) throws IOException {
reserve(4);
buf[pos] = (byte)(v>>>24);
buf[pos+1] = (byte)(v>>>16);
buf[pos+2] = (byte)(v>>>8);
buf[pos+3] = (byte)(v);
pos+=4;
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeLong(long v) throws IOException {
reserve(8);
buf[pos] = (byte)(v>>>56);
buf[pos+1] = (byte)(v>>>48);
buf[pos+2] = (byte)(v>>>40);
buf[pos+3] = (byte)(v>>>32);
buf[pos+4] = (byte)(v>>>24);
buf[pos+5] = (byte)(v>>>16);
buf[pos+6] = (byte)(v>>>8);
buf[pos+7] = (byte)(v);
pos+=8;
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeFloat(float v) throws IOException {
writeInt(Float.floatToRawIntBits(v));
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeDouble(double v) throws IOException {
writeLong(Double.doubleToRawLongBits(v));
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeBytes(String s) throws IOException {
// non-optimized version, but this shouldn't be used anyway
for (int i=0; i<s.length(); i++)
write((byte)s.charAt(i));
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeChars(String s) throws IOException {
// non-optimized version
for (int i=0; i<s.length(); i++)
writeChar(s.charAt(i));
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void writeUTF(String s) throws IOException {
// non-optimized version, but this shouldn't be used anyway
DataOutputStream daos = new DataOutputStream(this);
daos.writeUTF(s);
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
Override
public void flush() throws IOException {
flushBuffer();
out.flush();
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
Override
public void close() throws IOException {
flushBuffer();
out.close();
}
// in solrj/src/java/org/apache/solr/common/util/FastOutputStream.java
public void flushBuffer() throws IOException {
if (pos > 0) {
out.write(buf, 0, pos);
written += pos;
pos=0;
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void marshal(Object nl, OutputStream os) throws IOException {
init(FastOutputStream.wrap(os));
try {
daos.writeByte(VERSION);
writeVal(nl);
} finally {
daos.flushBuffer();
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public Object unmarshal(InputStream is) throws IOException {
FastInputStream dis = FastInputStream.wrap(is);
version = dis.readByte();
if (version != VERSION) {
throw new RuntimeException("Invalid version (expected " + VERSION +
", but " + version + ") or the data in not in 'javabin' format");
}
return readVal(dis);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public SimpleOrderedMap<Object> readOrderedMap(FastInputStream dis) throws IOException {
int sz = readSize(dis);
SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public NamedList<Object> readNamedList(FastInputStream dis) throws IOException {
int sz = readSize(dis);
NamedList<Object> nl = new NamedList<Object>();
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeNamedList(NamedList<?> nl) throws IOException {
writeTag(nl instanceof SimpleOrderedMap ? ORDERED_MAP : NAMED_LST, nl.size());
for (int i = 0; i < nl.size(); i++) {
String name = nl.getName(i);
writeExternString(name);
Object val = nl.getVal(i);
writeVal(val);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeVal(Object val) throws IOException {
if (writeKnownType(val)) {
return;
} else {
Object tmpVal = val;
if (resolver != null) {
tmpVal = resolver.resolve(val, this);
if (tmpVal == null) return; // null means the resolver took care of it fully
if (writeKnownType(tmpVal)) return;
}
}
writeVal(val.getClass().getName() + ':' + val.toString());
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public Object readVal(FastInputStream dis) throws IOException {
tagByte = dis.readByte();
// if ((tagByte & 0xe0) == 0) {
// if top 3 bits are clear, this is a normal tag
// OK, try type + size in single byte
switch (tagByte >>> 5) {
case STR >>> 5:
return readStr(dis);
case SINT >>> 5:
return readSmallInt(dis);
case SLONG >>> 5:
return readSmallLong(dis);
case ARR >>> 5:
return readArray(dis);
case ORDERED_MAP >>> 5:
return readOrderedMap(dis);
case NAMED_LST >>> 5:
return readNamedList(dis);
case EXTERN_STRING >>> 5:
return readExternString(dis);
}
switch (tagByte) {
case NULL:
return null;
case DATE:
return new Date(dis.readLong());
case INT:
return dis.readInt();
case BOOL_TRUE:
return Boolean.TRUE;
case BOOL_FALSE:
return Boolean.FALSE;
case FLOAT:
return dis.readFloat();
case DOUBLE:
return dis.readDouble();
case LONG:
return dis.readLong();
case BYTE:
return dis.readByte();
case SHORT:
return dis.readShort();
case MAP:
return readMap(dis);
case SOLRDOC:
return readSolrDocument(dis);
case SOLRDOCLST:
return readSolrDocumentList(dis);
case BYTEARR:
return readByteArray(dis);
case ITERATOR:
return readIterator(dis);
case END:
return END_OBJ;
case SOLRINPUTDOC:
return readSolrInputDocument(dis);
}
throw new RuntimeException("Unknown type " + tagByte);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public boolean writeKnownType(Object val) throws IOException {
if (writePrimitive(val)) return true;
if (val instanceof NamedList) {
writeNamedList((NamedList<?>) val);
return true;
}
if (val instanceof SolrDocumentList) { // SolrDocumentList is a List, so must come before List check
writeSolrDocumentList((SolrDocumentList) val);
return true;
}
if (val instanceof Collection) {
writeArray((Collection) val);
return true;
}
if (val instanceof Object[]) {
writeArray((Object[]) val);
return true;
}
if (val instanceof SolrDocument) {
//this needs special treatment to know which fields are to be written
if (resolver == null) {
writeSolrDocument((SolrDocument) val);
} else {
Object retVal = resolver.resolve(val, this);
if (retVal != null) {
if (retVal instanceof SolrDocument) {
writeSolrDocument((SolrDocument) retVal);
} else {
writeVal(retVal);
}
}
}
return true;
}
if (val instanceof SolrInputDocument) {
writeSolrInputDocument((SolrInputDocument)val);
return true;
}
if (val instanceof Map) {
writeMap((Map) val);
return true;
}
if (val instanceof Iterator) {
writeIterator((Iterator) val);
return true;
}
if (val instanceof Iterable) {
writeIterator(((Iterable) val).iterator());
return true;
}
return false;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeTag(byte tag) throws IOException {
daos.writeByte(tag);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeTag(byte tag, int size) throws IOException {
if ((tag & 0xe0) != 0) {
if (size < 0x1f) {
daos.writeByte(tag | size);
} else {
daos.writeByte(tag | 0x1f);
writeVInt(size - 0x1f, daos);
}
} else {
daos.writeByte(tag);
writeVInt(size, daos);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeByteArray(byte[] arr, int offset, int len) throws IOException {
writeTag(BYTEARR, len);
daos.write(arr, offset, len);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public byte[] readByteArray(FastInputStream dis) throws IOException {
byte[] arr = new byte[readVInt(dis)];
dis.readFully(arr);
return arr;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeSolrDocument(SolrDocument doc) throws IOException {
writeTag(SOLRDOC);
writeTag(ORDERED_MAP, doc.size());
for (Map.Entry<String, Object> entry : doc) {
String name = entry.getKey();
writeExternString(name);
Object val = entry.getValue();
writeVal(val);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public SolrDocument readSolrDocument(FastInputStream dis) throws IOException {
NamedList nl = (NamedList) readVal(dis);
SolrDocument doc = new SolrDocument();
for (int i = 0; i < nl.size(); i++) {
String name = nl.getName(i);
Object val = nl.getVal(i);
doc.setField(name, val);
}
return doc;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException {
SolrDocumentList solrDocs = new SolrDocumentList();
List list = (List) readVal(dis);
solrDocs.setNumFound((Long) list.get(0));
solrDocs.setStart((Long) list.get(1));
solrDocs.setMaxScore((Float) list.get(2));
@SuppressWarnings("unchecked")
List<SolrDocument> l = (List<SolrDocument>) readVal(dis);
solrDocs.addAll(l);
return solrDocs;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeSolrDocumentList(SolrDocumentList docs)
throws IOException {
writeTag(SOLRDOCLST);
List<Number> l = new ArrayList<Number>(3);
l.add(docs.getNumFound());
l.add(docs.getStart());
l.add(docs.getMaxScore());
writeArray(l);
writeArray(docs);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public SolrInputDocument readSolrInputDocument(FastInputStream dis) throws IOException {
int sz = readVInt(dis);
float docBoost = (Float)readVal(dis);
SolrInputDocument sdoc = new SolrInputDocument();
sdoc.setDocumentBoost(docBoost);
for (int i = 0; i < sz; i++) {
float boost = 1.0f;
String fieldName;
Object boostOrFieldName = readVal(dis);
if (boostOrFieldName instanceof Float) {
boost = (Float)boostOrFieldName;
fieldName = (String)readVal(dis);
} else {
fieldName = (String)boostOrFieldName;
}
Object fieldVal = readVal(dis);
sdoc.setField(fieldName, fieldVal, boost);
}
return sdoc;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeSolrInputDocument(SolrInputDocument sdoc) throws IOException {
writeTag(SOLRINPUTDOC, sdoc.size());
writeFloat(sdoc.getDocumentBoost());
for (SolrInputField inputField : sdoc.values()) {
if (inputField.getBoost() != 1.0f) {
writeFloat(inputField.getBoost());
}
writeExternString(inputField.getName());
writeVal(inputField.getValue());
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public Map<Object,Object> readMap(FastInputStream dis)
throws IOException {
int sz = readVInt(dis);
Map<Object,Object> m = new LinkedHashMap<Object,Object>();
for (int i = 0; i < sz; i++) {
Object key = readVal(dis);
Object val = readVal(dis);
m.put(key, val);
}
return m;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeIterator(Iterator iter) throws IOException {
writeTag(ITERATOR);
while (iter.hasNext()) {
writeVal(iter.next());
}
writeVal(END_OBJ);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public List<Object> readIterator(FastInputStream fis) throws IOException {
ArrayList<Object> l = new ArrayList<Object>();
while (true) {
Object o = readVal(fis);
if (o == END_OBJ) break;
l.add(o);
}
return l;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeArray(List l) throws IOException {
writeTag(ARR, l.size());
for (int i = 0; i < l.size(); i++) {
writeVal(l.get(i));
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeArray(Collection coll) throws IOException {
writeTag(ARR, coll.size());
for (Object o : coll) {
writeVal(o);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeArray(Object[] arr) throws IOException {
writeTag(ARR, arr.length);
for (int i = 0; i < arr.length; i++) {
Object o = arr[i];
writeVal(o);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public List<Object> readArray(FastInputStream dis) throws IOException {
int sz = readSize(dis);
ArrayList<Object> l = new ArrayList<Object>(sz);
for (int i = 0; i < sz; i++) {
l.add(readVal(dis));
}
return l;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeStr(String s) throws IOException {
if (s == null) {
writeTag(NULL);
return;
}
int end = s.length();
int maxSize = end * 4;
if (bytes == null || bytes.length < maxSize) bytes = new byte[maxSize];
int sz = ByteUtils.UTF16toUTF8(s, 0, end, bytes, 0);
writeTag(STR, sz);
daos.write(bytes, 0, sz);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public String readStr(FastInputStream dis) throws IOException {
int sz = readSize(dis);
if (bytes == null || bytes.length < sz) bytes = new byte[sz];
dis.readFully(bytes, 0, sz);
arr.reset();
ByteUtils.UTF8toUTF16(bytes, 0, sz, arr);
return arr.toString();
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeInt(int val) throws IOException {
if (val > 0) {
int b = SINT | (val & 0x0f);
if (val >= 0x0f) {
b |= 0x10;
daos.writeByte(b);
writeVInt(val >>> 4, daos);
} else {
daos.writeByte(b);
}
} else {
daos.writeByte(INT);
daos.writeInt(val);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public int readSmallInt(FastInputStream dis) throws IOException {
int v = tagByte & 0x0F;
if ((tagByte & 0x10) != 0)
v = (readVInt(dis) << 4) | v;
return v;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeLong(long val) throws IOException {
if ((val & 0xff00000000000000L) == 0) {
int b = SLONG | ((int) val & 0x0f);
if (val >= 0x0f) {
b |= 0x10;
daos.writeByte(b);
writeVLong(val >>> 4, daos);
} else {
daos.writeByte(b);
}
} else {
daos.writeByte(LONG);
daos.writeLong(val);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public long readSmallLong(FastInputStream dis) throws IOException {
long v = tagByte & 0x0F;
if ((tagByte & 0x10) != 0)
v = (readVLong(dis) << 4) | v;
return v;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeFloat(float val) throws IOException {
daos.writeByte(FLOAT);
daos.writeFloat(val);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public boolean writePrimitive(Object val) throws IOException {
if (val == null) {
daos.writeByte(NULL);
return true;
} else if (val instanceof String) {
writeStr((String) val);
return true;
} else if (val instanceof Number) {
if (val instanceof Integer) {
writeInt(((Integer) val).intValue());
return true;
} else if (val instanceof Long) {
writeLong(((Long) val).longValue());
return true;
} else if (val instanceof Float) {
writeFloat(((Float) val).floatValue());
return true;
} else if (val instanceof Double) {
daos.writeByte(DOUBLE);
daos.writeDouble(((Double) val).doubleValue());
return true;
} else if (val instanceof Byte) {
daos.writeByte(BYTE);
daos.writeByte(((Byte) val).intValue());
return true;
} else if (val instanceof Short) {
daos.writeByte(SHORT);
daos.writeShort(((Short) val).intValue());
return true;
}
return false;
} else if (val instanceof Date) {
daos.writeByte(DATE);
daos.writeLong(((Date) val).getTime());
return true;
} else if (val instanceof Boolean) {
if ((Boolean) val) daos.writeByte(BOOL_TRUE);
else daos.writeByte(BOOL_FALSE);
return true;
} else if (val instanceof byte[]) {
writeByteArray((byte[]) val, 0, ((byte[]) val).length);
return true;
} else if (val instanceof ByteBuffer) {
ByteBuffer buf = (ByteBuffer) val;
writeByteArray(buf.array(),buf.position(),buf.limit() - buf.position());
return true;
} else if (val == END_OBJ) {
writeTag(END);
return true;
}
return false;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeMap(Map<?,?> val) throws IOException {
writeTag(MAP, val.size());
for (Map.Entry<?,?> entry : val.entrySet()) {
Object key = entry.getKey();
if (key instanceof String) {
writeExternString((String) key);
} else {
writeVal(key);
}
writeVal(entry.getValue());
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public int readSize(FastInputStream in) throws IOException {
int sz = tagByte & 0x1f;
if (sz == 0x1f) sz += readVInt(in);
return sz;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public static void writeVInt(int i, FastOutputStream out) throws IOException {
while ((i & ~0x7F) != 0) {
out.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
out.writeByte((byte) i);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public static int readVInt(FastInputStream in) throws IOException {
byte b = in.readByte();
int i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = in.readByte();
i |= (b & 0x7F) << shift;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public static void writeVLong(long i, FastOutputStream out) throws IOException {
while ((i & ~0x7F) != 0) {
out.writeByte((byte) ((i & 0x7f) | 0x80));
i >>>= 7;
}
out.writeByte((byte) i);
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public static long readVLong(FastInputStream in) throws IOException {
byte b = in.readByte();
long i = b & 0x7F;
for (int shift = 7; (b & 0x80) != 0; shift += 7) {
b = in.readByte();
i |= (long) (b & 0x7F) << shift;
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public void writeExternString(String s) throws IOException {
if (s == null) {
writeTag(NULL);
return;
}
Integer idx = stringsMap == null ? null : stringsMap.get(s);
if (idx == null) idx = 0;
writeTag(EXTERN_STRING, idx);
if (idx == 0) {
writeStr(s);
if (stringsMap == null) stringsMap = new HashMap<String, Integer>();
stringsMap.put(s, ++stringsCount);
}
}
// in solrj/src/java/org/apache/solr/common/util/JavaBinCodec.java
public String readExternString(FastInputStream fis) throws IOException {
int idx = readSize(fis);
if (idx != 0) {// idx != 0 is the index of the extern string
return stringsList.get(idx - 1);
} else {// idx == 0 means it has a string value
String s = (String) readVal(fis);
if (stringsList == null) stringsList = new ArrayList<String>();
stringsList.add(s);
return s;
}
}
// in solrj/src/java/org/apache/solr/common/util/DateUtil.java
public static Calendar formatDate(Date date, Calendar cal, Appendable out) throws IOException {
// using a stringBuilder for numbers can be nice since
// a temporary string isn't used (it's added directly to the
// builder's buffer.
StringBuilder sb = out instanceof StringBuilder ? (StringBuilder)out : new StringBuilder();
if (cal==null) cal = Calendar.getInstance(TimeZone.getTimeZone("GMT"), Locale.US);
cal.setTime(date);
int i = cal.get(Calendar.YEAR);
sb.append(i);
sb.append('-');
i = cal.get(Calendar.MONTH) + 1; // 0 based, so add 1
if (i<10) sb.append('0');
sb.append(i);
sb.append('-');
i=cal.get(Calendar.DAY_OF_MONTH);
if (i<10) sb.append('0');
sb.append(i);
sb.append('T');
i=cal.get(Calendar.HOUR_OF_DAY); // 24 hour time format
if (i<10) sb.append('0');
sb.append(i);
sb.append(':');
i=cal.get(Calendar.MINUTE);
if (i<10) sb.append('0');
sb.append(i);
sb.append(':');
i=cal.get(Calendar.SECOND);
if (i<10) sb.append('0');
sb.append(i);
i=cal.get(Calendar.MILLISECOND);
if (i != 0) {
sb.append('.');
if (i<100) sb.append('0');
if (i<10) sb.append('0');
sb.append(i);
// handle canonical format specifying fractional
// seconds shall not end in '0'. Given the slowness of
// integer div/mod, simply checking the last character
// is probably the fastest way to check.
int lastIdx = sb.length()-1;
if (sb.charAt(lastIdx)=='0') {
lastIdx--;
if (sb.charAt(lastIdx)=='0') {
lastIdx--;
}
sb.setLength(lastIdx+1);
}
}
sb.append('Z');
if (out != sb)
out.append(sb);
return cal;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/DirectXmlRequest.java
Override
public UpdateResponse process( SolrServer server ) throws SolrServerException, IOException
{
long startTime = System.currentTimeMillis();
UpdateResponse res = new UpdateResponse();
res.setResponse( server.request( this ) );
res.setElapsedTime( System.currentTimeMillis()-startTime );
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/SolrPing.java
Override
public SolrPingResponse process( SolrServer server ) throws SolrServerException, IOException
{
long startTime = System.currentTimeMillis();
SolrPingResponse res = new SolrPingResponse();
res.setResponse( server.request( this ) );
res.setElapsedTime( System.currentTimeMillis()-startTime );
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/AbstractUpdateRequest.java
Override
public UpdateResponse process( SolrServer server ) throws SolrServerException, IOException
{
long startTime = System.currentTimeMillis();
UpdateResponse res = new UpdateResponse();
res.setResponse( server.request( this ) );
res.setElapsedTime( System.currentTimeMillis()-startTime );
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return contentStreams;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/ContentStreamUpdateRequest.java
public void addFile(File file, String contentType) throws IOException {
ContentStreamBase cs = new ContentStreamBase.FileStream(file);
cs.setContentType(contentType);
addContentStream(cs);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return null;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/FieldAnalysisRequest.java
Override
public FieldAnalysisResponse process(SolrServer server) throws SolrServerException, IOException {
if (fieldTypes == null && fieldNames == null) {
throw new IllegalStateException("At least one field type or field name need to be specified");
}
if (fieldValue == null) {
throw new IllegalStateException("The field value must be set");
}
long startTime = System.currentTimeMillis();
FieldAnalysisResponse res = new FieldAnalysisResponse();
res.setResponse(server.request(this));
res.setElapsedTime(System.currentTimeMillis() - startTime);
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public Collection<ContentStream> getContentStreams(SolrRequest req) throws IOException {
if (req instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) req;
if (isEmpty(updateRequest)) return null;
List<ContentStream> l = new ArrayList<ContentStream>();
l.add(new LazyContentStream(updateRequest));
return l;
}
return req.getContentStreams();
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public ContentStream getContentStream(UpdateRequest req) throws IOException {
return new ContentStreamBase.StringStream(req.getXML());
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public void write(SolrRequest request, OutputStream os) throws IOException {
if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
OutputStreamWriter writer = new OutputStreamWriter(os, UTF_8);
updateRequest.writeXML(writer);
writer.flush();
}
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public InputStream getStream() throws IOException {
return getDelegate().getStream();
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public Reader getReader() throws IOException {
return getDelegate().getReader();
}
// in solrj/src/java/org/apache/solr/client/solrj/request/RequestWriter.java
public void writeTo(OutputStream os) throws IOException {
write(req, os);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return ClientUtils.toContentStreams( getXML(), ClientUtils.TEXT_XML );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
public String getXML() throws IOException {
StringWriter writer = new StringWriter();
writeXML( writer );
writer.flush();
// If action is COMMIT or OPTIMIZE, it is sent with params
String xml = writer.toString();
//System.out.println( "SEND:"+xml );
return (xml.length() > 0) ? xml : null;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequest.java
public void writeXML( Writer writer ) throws IOException {
if( (documents != null && documents.size() > 0) || docIterator != null) {
if( commitWithin > 0 ) {
writer.write("<add commitWithin=\""+commitWithin+"\">");
}
else {
writer.write("<add>");
}
if(documents != null) {
for (SolrInputDocument doc : documents) {
if (doc != null) {
ClientUtils.writeXML(doc, writer);
}
}
}
if (docIterator != null) {
while (docIterator.hasNext()) {
SolrInputDocument doc = docIterator.next();
if (doc != null) {
ClientUtils.writeXML(doc, writer);
}
}
}
writer.write("</add>");
}
// Add the delete commands
boolean deleteI = deleteById != null && deleteById.size() > 0;
boolean deleteQ = deleteQuery != null && deleteQuery.size() > 0;
if( deleteI || deleteQ ) {
if(commitWithin>0) {
writer.append( "<delete commitWithin=\"" + commitWithin + "\">" );
} else {
writer.append( "<delete>" );
}
if( deleteI ) {
for( String id : deleteById ) {
writer.append( "<id>" );
XML.escapeCharData( id, writer );
writer.append( "</id>" );
}
}
if( deleteQ ) {
for( String q : deleteQuery ) {
writer.append( "<query>" );
XML.escapeCharData( q, writer );
writer.append( "</query>" );
}
}
writer.append( "</delete>" );
}
}
// in solrj/src/java/org/apache/solr/client/solrj/request/LukeRequest.java
Override
public LukeResponse process( SolrServer server ) throws SolrServerException, IOException
{
long startTime = System.currentTimeMillis();
LukeResponse res = new LukeResponse();
res.setResponse( server.request( this ) );
res.setElapsedTime( System.currentTimeMillis()-startTime );
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequestExt.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return ClientUtils.toContentStreams(getXML(), ClientUtils.TEXT_XML);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequestExt.java
public String getXML() throws IOException {
StringWriter writer = new StringWriter();
writeXML(writer);
writer.flush();
String xml = writer.toString();
return (xml.length() > 0) ? xml : null;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/UpdateRequestExt.java
public void writeXML(Writer writer) throws IOException {
List<List<SolrDoc>> getDocLists = getDocLists(documents);
for (List<SolrDoc> docs : getDocLists) {
if ((docs != null && docs.size() > 0)) {
SolrDoc firstDoc = docs.get(0);
int commitWithin = firstDoc.commitWithin != -1 ? firstDoc.commitWithin : this.commitWithin;
boolean overwrite = firstDoc.overwrite;
if (commitWithin > -1 || overwrite != true) {
writer.write("<add commitWithin=\"" + commitWithin + "\" " + "overwrite=\"" + overwrite + "\">");
} else {
writer.write("<add>");
}
if (documents != null) {
for (SolrDoc doc : documents) {
if (doc != null) {
ClientUtils.writeXML(doc.document, writer);
}
}
}
writer.write("</add>");
}
}
// Add the delete commands
boolean deleteI = deleteById != null && deleteById.size() > 0;
boolean deleteQ = deleteQuery != null && deleteQuery.size() > 0;
if (deleteI || deleteQ) {
writer.append("<delete>");
if (deleteI) {
for (Map.Entry<String,Long> entry : deleteById.entrySet()) {
writer.append("<id");
Long version = entry.getValue();
if (version != null) {
writer.append(" version=\"" + version + "\"");
}
writer.append(">");
XML.escapeCharData(entry.getKey(), writer);
writer.append("</id>");
}
}
if (deleteQ) {
for (String q : deleteQuery) {
writer.append("<query>");
XML.escapeCharData(q, writer);
writer.append("</query>");
}
}
writer.append("</delete>");
}
}
// in solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return ClientUtils.toContentStreams(getXML(), ClientUtils.TEXT_XML);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java
Override
public DocumentAnalysisResponse process(SolrServer server) throws SolrServerException, IOException {
long startTime = System.currentTimeMillis();
DocumentAnalysisResponse res = new DocumentAnalysisResponse();
res.setResponse(server.request(this));
res.setElapsedTime(System.currentTimeMillis() - startTime);
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/DocumentAnalysisRequest.java
String getXML() throws IOException {
StringWriter writer = new StringWriter();
writer.write("<docs>");
for (SolrInputDocument document : documents) {
ClientUtils.writeXML(document, writer);
}
writer.write("</docs>");
writer.flush();
String xml = writer.toString();
return (xml.length() > 0) ? xml : null;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
public void marshal(UpdateRequest updateRequest, OutputStream os) throws IOException {
NamedList nl = new NamedList();
NamedList params = solrParamsToNamedList(updateRequest.getParams());
if (updateRequest.getCommitWithin() != -1) {
params.add("commitWithin", updateRequest.getCommitWithin());
}
Iterator<SolrInputDocument> docIter = null;
if (updateRequest.getDocuments() != null) {
docIter = updateRequest.getDocuments().iterator();
}
if(updateRequest.getDocIterator() != null){
docIter = updateRequest.getDocIterator();
}
nl.add("params", params);// 0: params
nl.add("delById", updateRequest.getDeleteById());
nl.add("delByQ", updateRequest.getDeleteQuery());
nl.add("docs", docIter);
JavaBinCodec codec = new JavaBinCodec();
codec.marshal(nl, os);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
public UpdateRequest unmarshal(InputStream is, final StreamingUpdateHandler handler) throws IOException {
final UpdateRequest updateRequest = new UpdateRequest();
List<List<NamedList>> doclist;
List<String> delById;
List<String> delByQ;
final NamedList[] namedList = new NamedList[1];
JavaBinCodec codec = new JavaBinCodec() {
// NOTE: this only works because this is an anonymous inner class
// which will only ever be used on a single stream -- if this class
// is ever refactored, this will not work.
private boolean seenOuterMostDocIterator = false;
@Override
public NamedList readNamedList(FastInputStream dis) throws IOException {
int sz = readSize(dis);
NamedList nl = new NamedList();
if (namedList[0] == null) {
namedList[0] = nl;
}
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
@Override
public List readIterator(FastInputStream fis) throws IOException {
// default behavior for reading any regular Iterator in the stream
if (seenOuterMostDocIterator) return super.readIterator(fis);
// special treatment for first outermost Iterator
// (the list of documents)
seenOuterMostDocIterator = true;
return readOuterMostDocIterator(fis);
}
private List readOuterMostDocIterator(FastInputStream fis) throws IOException {
NamedList params = (NamedList) namedList[0].getVal(0);
updateRequest.setParams(new ModifiableSolrParams(SolrParams.toSolrParams(params)));
if (handler == null) return super.readIterator(fis);
while (true) {
Object o = readVal(fis);
if (o == END_OBJ) break;
SolrInputDocument sdoc = null;
if (o instanceof List) {
sdoc = listToSolrInputDocument((List<NamedList>) o);
} else if (o instanceof NamedList) {
UpdateRequest req = new UpdateRequest();
req.setParams(new ModifiableSolrParams(SolrParams.toSolrParams((NamedList) o)));
handler.update(null, req);
} else {
sdoc = (SolrInputDocument) o;
}
handler.update(sdoc, updateRequest);
}
return Collections.EMPTY_LIST;
}
};
codec.unmarshal(is);
// NOTE: if the update request contains only delete commands the params
// must be loaded now
if(updateRequest.getParams()==null) {
NamedList params = (NamedList) namedList[0].get("params");
if(params!=null) {
updateRequest.setParams(new ModifiableSolrParams(SolrParams.toSolrParams(params)));
}
}
delById = (List<String>) namedList[0].get("delById");
delByQ = (List<String>) namedList[0].get("delByQ");
doclist = (List) namedList[0].get("docs");
if (doclist != null && !doclist.isEmpty()) {
List<SolrInputDocument> solrInputDocs = new ArrayList<SolrInputDocument>();
for (Object o : doclist) {
if (o instanceof List) {
solrInputDocs.add(listToSolrInputDocument((List<NamedList>)o));
} else {
solrInputDocs.add((SolrInputDocument)o);
}
}
updateRequest.add(solrInputDocs);
}
if (delById != null) {
for (String s : delById) {
updateRequest.deleteById(s);
}
}
if (delByQ != null) {
for (String s : delByQ) {
updateRequest.deleteByQuery(s);
}
}
return updateRequest;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
Override
public NamedList readNamedList(FastInputStream dis) throws IOException {
int sz = readSize(dis);
NamedList nl = new NamedList();
if (namedList[0] == null) {
namedList[0] = nl;
}
for (int i = 0; i < sz; i++) {
String name = (String) readVal(dis);
Object val = readVal(dis);
nl.add(name, val);
}
return nl;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
Override
public List readIterator(FastInputStream fis) throws IOException {
// default behavior for reading any regular Iterator in the stream
if (seenOuterMostDocIterator) return super.readIterator(fis);
// special treatment for first outermost Iterator
// (the list of documents)
seenOuterMostDocIterator = true;
return readOuterMostDocIterator(fis);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/JavaBinUpdateRequestCodec.java
private List readOuterMostDocIterator(FastInputStream fis) throws IOException {
NamedList params = (NamedList) namedList[0].getVal(0);
updateRequest.setParams(new ModifiableSolrParams(SolrParams.toSolrParams(params)));
if (handler == null) return super.readIterator(fis);
while (true) {
Object o = readVal(fis);
if (o == END_OBJ) break;
SolrInputDocument sdoc = null;
if (o instanceof List) {
sdoc = listToSolrInputDocument((List<NamedList>) o);
} else if (o instanceof NamedList) {
UpdateRequest req = new UpdateRequest();
req.setParams(new ModifiableSolrParams(SolrParams.toSolrParams((NamedList) o)));
handler.update(null, req);
} else {
sdoc = (SolrInputDocument) o;
}
handler.update(sdoc, updateRequest);
}
return Collections.EMPTY_LIST;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
Override
public Collection<ContentStream> getContentStreams() throws IOException {
return null;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
Override
public CoreAdminResponse process(SolrServer server) throws SolrServerException, IOException
{
long startTime = System.currentTimeMillis();
CoreAdminResponse res = new CoreAdminResponse();
res.setResponse( server.request( this ) );
res.setElapsedTime( System.currentTimeMillis()-startTime );
return res;
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse reloadCore( String name, SolrServer server ) throws SolrServerException, IOException
{
CoreAdminRequest req = new CoreAdminRequest();
req.setCoreName( name );
req.setAction( CoreAdminAction.RELOAD );
return req.process( server );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse unloadCore( String name, SolrServer server ) throws SolrServerException, IOException
{
return unloadCore(name, false, server);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse unloadCore( String name, boolean deleteIndex, SolrServer server ) throws SolrServerException, IOException
{
Unload req = new Unload(deleteIndex);
req.setCoreName( name );
return req.process( server );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse renameCore(String coreName, String newName, SolrServer server ) throws SolrServerException, IOException
{
CoreAdminRequest req = new CoreAdminRequest();
req.setCoreName(coreName);
req.setOtherCoreName(newName);
req.setAction( CoreAdminAction.RENAME );
return req.process( server );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse getStatus( String name, SolrServer server ) throws SolrServerException, IOException
{
CoreAdminRequest req = new CoreAdminRequest();
req.setCoreName( name );
req.setAction( CoreAdminAction.STATUS );
return req.process( server );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse createCore( String name, String instanceDir, SolrServer server ) throws SolrServerException, IOException
{
return CoreAdminRequest.createCore(name, instanceDir, server, null, null);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse createCore( String name, String instanceDir, SolrServer server, String configFile, String schemaFile ) throws SolrServerException, IOException
{
CoreAdminRequest.Create req = new CoreAdminRequest.Create();
req.setCoreName( name );
req.setInstanceDir(instanceDir);
if(configFile != null){
req.setConfigName(configFile);
}
if(schemaFile != null){
req.setSchemaName(schemaFile);
}
return req.process( server );
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse persist(String fileName, SolrServer server) throws SolrServerException, IOException
{
CoreAdminRequest.Persist req = new CoreAdminRequest.Persist();
req.setFileName(fileName);
return req.process(server);
}
// in solrj/src/java/org/apache/solr/client/solrj/request/CoreAdminRequest.java
public static CoreAdminResponse mergeIndexes(String name,
String[] indexDirs, String[] srcCores, SolrServer server) throws SolrServerException,
IOException {
CoreAdminRequest.MergeIndexes req = new CoreAdminRequest.MergeIndexes();
req.setCoreName(name);
req.setIndexDirs(Arrays.asList(indexDirs));
req.setSrcCores(Arrays.asList(srcCores));
return req.process(server);
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java
public void run() {
runnerLock.lock();
// info is ok since this should only happen once for each thread
log.info("starting runner: {}", this);
HttpPost method = null;
HttpResponse response = null;
try {
while (!queue.isEmpty()) {
try {
final UpdateRequest updateRequest = queue.poll(250,
TimeUnit.MILLISECONDS);
if (updateRequest == null)
break;
String contentType = server.requestWriter.getUpdateContentType();
final boolean isXml = ClientUtils.TEXT_XML.equals(contentType);
final ModifiableSolrParams origParams = new ModifiableSolrParams(updateRequest.getParams());
EntityTemplate template = new EntityTemplate(new ContentProducer() {
public void writeTo(OutputStream out) throws IOException {
try {
if (isXml) {
out.write("<stream>".getBytes("UTF-8")); // can be anything
}
UpdateRequest req = updateRequest;
while (req != null) {
SolrParams currentParams = new ModifiableSolrParams(req.getParams());
if (!origParams.toNamedList().equals(currentParams.toNamedList())) {
queue.add(req); // params are different, push back to queue
break;
}
server.requestWriter.write(req, out);
if (isXml) {
// check for commit or optimize
SolrParams params = req.getParams();
if (params != null) {
String fmt = null;
if (params.getBool(UpdateParams.OPTIMIZE, false)) {
fmt = "<optimize waitSearcher=\"%s\" waitFlush=\"%s\" />";
} else if (params.getBool(UpdateParams.COMMIT, false)) {
fmt = "<commit waitSearcher=\"%s\" waitFlush=\"%s\" />";
}
if (fmt != null) {
byte[] content = String.format(
fmt,
params.getBool(UpdateParams.WAIT_SEARCHER, false)
+ "").getBytes("UTF-8");
out.write(content);
}
}
}
out.flush();
req = queue.poll(250, TimeUnit.MILLISECONDS);
}
if (isXml) {
out.write("</stream>".getBytes("UTF-8"));
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
});
// The parser 'wt=' and 'version=' params are used instead of the
// original params
ModifiableSolrParams requestParams = new ModifiableSolrParams(origParams);
requestParams.set(CommonParams.WT, server.parser.getWriterType());
requestParams.set(CommonParams.VERSION, server.parser.getVersion());
method = new HttpPost(server.getBaseURL() + "/update"
+ ClientUtils.toQueryString(requestParams, false));
method.setEntity(template);
method.addHeader("User-Agent", HttpSolrServer.AGENT);
method.addHeader("Content-Type", contentType);
response = server.getHttpClient().execute(method);
int statusCode = response.getStatusLine().getStatusCode();
log.info("Status for: "
+ updateRequest.getDocuments().get(0).getFieldValue("id")
+ " is " + statusCode);
if (statusCode != HttpStatus.SC_OK) {
StringBuilder msg = new StringBuilder();
msg.append(response.getStatusLine().getReasonPhrase());
msg.append("\n\n");
msg.append("\n\n");
msg.append("request: ").append(method.getURI());
handleError(new Exception(msg.toString()));
}
} finally {
try {
if (response != null) {
response.getEntity().getContent().close();
}
} catch (Exception ex) {
}
}
}
} catch (Throwable e) {
handleError(e);
} finally {
// remove it from the list of running things unless we are the last
// runner and the queue is full...
// in which case, the next queue.put() would block and there would be no
// runners to handle it.
// This case has been further handled by using offer instead of put, and
// using a retry loop
// to avoid blocking forever (see request()).
synchronized (runners) {
if (runners.size() == 1 && queue.remainingCapacity() == 0) {
// keep this runner alive
scheduler.execute(this);
} else {
runners.remove(this);
}
}
log.info("finished: {}", this);
runnerLock.unlock();
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java
public void writeTo(OutputStream out) throws IOException {
try {
if (isXml) {
out.write("<stream>".getBytes("UTF-8")); // can be anything
}
UpdateRequest req = updateRequest;
while (req != null) {
SolrParams currentParams = new ModifiableSolrParams(req.getParams());
if (!origParams.toNamedList().equals(currentParams.toNamedList())) {
queue.add(req); // params are different, push back to queue
break;
}
server.requestWriter.write(req, out);
if (isXml) {
// check for commit or optimize
SolrParams params = req.getParams();
if (params != null) {
String fmt = null;
if (params.getBool(UpdateParams.OPTIMIZE, false)) {
fmt = "<optimize waitSearcher=\"%s\" waitFlush=\"%s\" />";
} else if (params.getBool(UpdateParams.COMMIT, false)) {
fmt = "<commit waitSearcher=\"%s\" waitFlush=\"%s\" />";
}
if (fmt != null) {
byte[] content = String.format(
fmt,
params.getBool(UpdateParams.WAIT_SEARCHER, false)
+ "").getBytes("UTF-8");
out.write(content);
}
}
}
out.flush();
req = queue.poll(250, TimeUnit.MILLISECONDS);
}
if (isXml) {
out.write("</stream>".getBytes("UTF-8"));
}
} catch (InterruptedException e) {
e.printStackTrace();
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/ConcurrentUpdateSolrServer.java
public NamedList<Object> request(final SolrRequest request)
throws SolrServerException, IOException {
if (!(request instanceof UpdateRequest)) {
return server.request(request);
}
UpdateRequest req = (UpdateRequest) request;
// this happens for commit...
if (req.getDocuments() == null || req.getDocuments().isEmpty()) {
blockUntilFinished();
return server.request(request);
}
SolrParams params = req.getParams();
if (params != null) {
// check if it is waiting for the searcher
if (params.getBool(UpdateParams.WAIT_SEARCHER, false)) {
log.info("blocking for commit/optimize");
blockUntilFinished(); // empty the queue
return server.request(request);
}
}
try {
CountDownLatch tmpLock = lock;
if (tmpLock != null) {
tmpLock.await();
}
boolean success = queue.offer(req);
for (;;) {
synchronized (runners) {
if (runners.isEmpty() || (queue.remainingCapacity() < queue.size() // queue
// is
// half
// full
// and
// we
// can
// add
// more
// runners
&& runners.size() < threadCount)) {
// We need more runners, so start a new one.
Runner r = new Runner();
runners.add(r);
scheduler.execute(r);
} else {
// break out of the retry loop if we added the element to the queue
// successfully, *and*
// while we are still holding the runners lock to prevent race
// conditions.
// race conditions.
if (success)
break;
}
}
// Retry to add to the queue w/o the runners lock held (else we risk
// temporary deadlock)
// This retry could also fail because
// 1) existing runners were not able to take off any new elements in the
// queue
// 2) the queue was filled back up since our last try
// If we succeed, the queue may have been completely emptied, and all
// runners stopped.
// In all cases, we should loop back to the top to see if we need to
// start more runners.
//
if (!success) {
success = queue.offer(req, 100, TimeUnit.MILLISECONDS);
}
}
} catch (InterruptedException e) {
log.error("interrupted", e);
throw new IOException(e.getLocalizedMessage());
}
// RETURN A DUMMY result
NamedList<Object> dummy = new NamedList<Object>();
dummy.add("NOTE", "the request is processed in a background stream");
return dummy;
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
Override
public Collection<ContentStream> getContentStreams(SolrRequest req) throws IOException {
if (req instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) req;
if (isNull(updateRequest.getDocuments()) &&
isNull(updateRequest.getDeleteById()) &&
isNull(updateRequest.getDeleteQuery())
&& (updateRequest.getDocIterator() == null) ) {
return null;
}
List<ContentStream> l = new ArrayList<ContentStream>();
l.add(new LazyContentStream(updateRequest));
return l;
} else {
return super.getContentStreams(req);
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
Override
public ContentStream getContentStream(final UpdateRequest request) throws IOException {
final BAOS baos = new BAOS();
new JavaBinUpdateRequestCodec().marshal(request, baos);
return new ContentStream() {
public String getName() {
return null;
}
public String getSourceInfo() {
return "javabin";
}
public String getContentType() {
return "application/javabin";
}
public Long getSize() // size if we know it, otherwise null
{
return new Long(baos.size());
}
public InputStream getStream() throws IOException {
return new ByteArrayInputStream(baos.getbuf(), 0, baos.size());
}
public Reader getReader() throws IOException {
throw new RuntimeException("No reader available . this is a binarystream");
}
};
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
public InputStream getStream() throws IOException {
return new ByteArrayInputStream(baos.getbuf(), 0, baos.size());
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
public Reader getReader() throws IOException {
throw new RuntimeException("No reader available . this is a binarystream");
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/BinaryRequestWriter.java
Override
public void write(SolrRequest request, OutputStream os) throws IOException {
if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
new JavaBinUpdateRequestCodec().marshal(updateRequest, os);
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/CloudSolrServer.java
Override
public NamedList<Object> request(SolrRequest request) throws SolrServerException, IOException {
connect();
// TODO: if you can hash here, you could favor the shard leader
CloudState cloudState = zkStateReader.getCloudState();
SolrParams reqParams = request.getParams();
if (reqParams == null) {
reqParams = new ModifiableSolrParams();
}
String collection = reqParams.get("collection", defaultCollection);
if (collection == null) {
throw new SolrServerException("No collection param specified on request and no default collection has been set.");
}
// Extract each comma separated collection name and store in a List.
List<String> collectionList = StrUtils.splitSmart(collection, ",", true);
// Retrieve slices from the cloud state and, for each collection specified,
// add it to the Map of slices.
Map<String,Slice> slices = new HashMap<String,Slice>();
for (int i = 0; i < collectionList.size(); i++) {
String coll= collectionList.get(i);
ClientUtils.appendMap(coll, slices, cloudState.getSlices(coll));
}
Set<String> liveNodes = cloudState.getLiveNodes();
// IDEA: have versions on various things... like a global cloudState version
// or shardAddressVersion (which only changes when the shards change)
// to allow caching.
// build a map of unique nodes
// TODO: allow filtering by group, role, etc
Map<String,ZkNodeProps> nodes = new HashMap<String,ZkNodeProps>();
List<String> urlList = new ArrayList<String>();
for (Slice slice : slices.values()) {
for (ZkNodeProps nodeProps : slice.getShards().values()) {
ZkCoreNodeProps coreNodeProps = new ZkCoreNodeProps(nodeProps);
String node = coreNodeProps.getNodeName();
if (!liveNodes.contains(coreNodeProps.getNodeName())
|| !coreNodeProps.getState().equals(
ZkStateReader.ACTIVE)) continue;
if (nodes.put(node, nodeProps) == null) {
String url = coreNodeProps.getCoreUrl();
urlList.add(url);
}
}
}
Collections.shuffle(urlList, rand);
//System.out.println("########################## MAKING REQUEST TO " + urlList);
LBHttpSolrServer.Req req = new LBHttpSolrServer.Req(request, urlList);
LBHttpSolrServer.Rsp rsp = lbServer.request(req);
return rsp.getResponse();
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
Override
public NamedList<Object> processResponse(InputStream body, String encoding) {
try {
JavaBinCodec codec = new JavaBinCodec() {
@Override
public SolrDocument readSolrDocument(FastInputStream dis) throws IOException {
SolrDocument doc = super.readSolrDocument(dis);
callback.streamSolrDocument( doc );
return null;
}
@Override
public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException {
SolrDocumentList solrDocs = new SolrDocumentList();
List list = (List) readVal(dis);
solrDocs.setNumFound((Long) list.get(0));
solrDocs.setStart((Long) list.get(1));
solrDocs.setMaxScore((Float) list.get(2));
callback.streamDocListInfo(
solrDocs.getNumFound(),
solrDocs.getStart(),
solrDocs.getMaxScore() );
// Read the Array
tagByte = dis.readByte();
if( (tagByte >>> 5) != (ARR >>> 5) ) {
throw new RuntimeException( "doclist must have an array" );
}
int sz = readSize(dis);
for (int i = 0; i < sz; i++) {
// must be a SolrDocument
readVal( dis );
}
return solrDocs;
}
};
return (NamedList<Object>) codec.unmarshal(body);
}
catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "parsing error", e);
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
Override
public SolrDocument readSolrDocument(FastInputStream dis) throws IOException {
SolrDocument doc = super.readSolrDocument(dis);
callback.streamSolrDocument( doc );
return null;
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/StreamingBinaryResponseParser.java
Override
public SolrDocumentList readSolrDocumentList(FastInputStream dis) throws IOException {
SolrDocumentList solrDocs = new SolrDocumentList();
List list = (List) readVal(dis);
solrDocs.setNumFound((Long) list.get(0));
solrDocs.setStart((Long) list.get(1));
solrDocs.setMaxScore((Float) list.get(2));
callback.streamDocListInfo(
solrDocs.getNumFound(),
solrDocs.getStart(),
solrDocs.getMaxScore() );
// Read the Array
tagByte = dis.readByte();
if( (tagByte >>> 5) != (ARR >>> 5) ) {
throw new RuntimeException( "doclist must have an array" );
}
int sz = readSize(dis);
for (int i = 0; i < sz; i++) {
// must be a SolrDocument
readVal( dis );
}
return solrDocs;
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpClientUtil.java
Override
public void process(HttpRequest request, HttpContext context)
throws HttpException, IOException {
if (!request.containsHeader("Accept-Encoding")) {
request.addHeader("Accept-Encoding", "gzip, deflate");
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpClientUtil.java
public void process(final HttpResponse response, final HttpContext context)
throws HttpException, IOException {
HttpEntity entity = response.getEntity();
Header ceheader = entity.getContentEncoding();
if (ceheader != null) {
HeaderElement[] codecs = ceheader.getElements();
for (int i = 0; i < codecs.length; i++) {
if (codecs[i].getName().equalsIgnoreCase("gzip")) {
response
.setEntity(new GzipDecompressingEntity(response.getEntity()));
return;
}
if (codecs[i].getName().equalsIgnoreCase("deflate")) {
response.setEntity(new DeflateDecompressingEntity(response
.getEntity()));
return;
}
}
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpClientUtil.java
public InputStream getContent() throws IOException, IllegalStateException {
return new GZIPInputStream(wrappedEntity.getContent());
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpClientUtil.java
public InputStream getContent() throws IOException, IllegalStateException {
return new InflaterInputStream(wrappedEntity.getContent());
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
Override
public NamedList<Object> request(final SolrRequest request)
throws SolrServerException, IOException {
ResponseParser responseParser = request.getResponseParser();
if (responseParser == null) {
responseParser = parser;
}
return request(request, responseParser);
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
public NamedList<Object> request(final SolrRequest request,
final ResponseParser processor) throws SolrServerException, IOException {
HttpRequestBase method = null;
InputStream is = null;
SolrParams params = request.getParams();
Collection<ContentStream> streams = requestWriter.getContentStreams(request);
String path = requestWriter.getPath(request);
if (path == null || !path.startsWith("/")) {
path = DEFAULT_PATH;
}
ResponseParser parser = request.getResponseParser();
if (parser == null) {
parser = this.parser;
}
// The parser 'wt=' and 'version=' params are used instead of the original
// params
ModifiableSolrParams wparams = new ModifiableSolrParams(params);
wparams.set(CommonParams.WT, parser.getWriterType());
wparams.set(CommonParams.VERSION, parser.getVersion());
if (invariantParams != null) {
wparams.add(invariantParams);
}
params = wparams;
int tries = maxRetries + 1;
try {
while( tries-- > 0 ) {
// Note: since we aren't do intermittent time keeping
// ourselves, the potential non-timeout latency could be as
// much as tries-times (plus scheduling effects) the given
// timeAllowed.
try {
if( SolrRequest.METHOD.GET == request.getMethod() ) {
if( streams != null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "GET can't send streams!" );
}
method = new HttpGet( baseUrl + path + ClientUtils.toQueryString( params, false ) );
}
else if( SolrRequest.METHOD.POST == request.getMethod() ) {
String url = baseUrl + path;
boolean isMultipart = ( streams != null && streams.size() > 1 );
LinkedList<NameValuePair> postParams = new LinkedList<NameValuePair>();
if (streams == null || isMultipart) {
HttpPost post = new HttpPost(url);
post.setHeader("Content-Charset", "UTF-8");
if (!this.useMultiPartPost && !isMultipart) {
post.addHeader("Content-Type",
"application/x-www-form-urlencoded; charset=UTF-8");
}
List<FormBodyPart> parts = new LinkedList<FormBodyPart>();
Iterator<String> iter = params.getParameterNamesIterator();
while (iter.hasNext()) {
String p = iter.next();
String[] vals = params.getParams(p);
if (vals != null) {
for (String v : vals) {
if (this.useMultiPartPost || isMultipart) {
parts.add(new FormBodyPart(p, new StringBody(v, Charset.forName("UTF-8"))));
} else {
postParams.add(new BasicNameValuePair(p, v));
}
}
}
}
if (isMultipart) {
for (ContentStream content : streams) {
String contentType = content.getContentType();
if(contentType==null) {
contentType = "application/octet-stream"; // default
}
parts.add(new FormBodyPart(content.getName(),
new InputStreamBody(
content.getStream(),
contentType,
content.getName())));
}
}
if (parts.size() > 0) {
MultipartEntity entity = new MultipartEntity(HttpMultipartMode.STRICT);
for(FormBodyPart p: parts) {
entity.addPart(p);
}
post.setEntity(entity);
} else {
//not using multipart
post.setEntity(new UrlEncodedFormEntity(postParams, "UTF-8"));
}
method = post;
}
// It is has one stream, it is the post body, put the params in the URL
else {
String pstr = ClientUtils.toQueryString(params, false);
HttpPost post = new HttpPost(url + pstr);
// Single stream as body
// Using a loop just to get the first one
final ContentStream[] contentStream = new ContentStream[1];
for (ContentStream content : streams) {
contentStream[0] = content;
break;
}
if (contentStream[0] instanceof RequestWriter.LazyContentStream) {
post.setEntity(new InputStreamEntity(contentStream[0].getStream(), -1) {
@Override
public Header getContentType() {
return new BasicHeader("Content-Type", contentStream[0].getContentType());
}
@Override
public boolean isRepeatable() {
return false;
}
});
} else {
post.setEntity(new InputStreamEntity(contentStream[0].getStream(), -1) {
@Override
public Header getContentType() {
return new BasicHeader("Content-Type", contentStream[0].getContentType());
}
@Override
public boolean isRepeatable() {
return false;
}
});
}
method = post;
}
}
else {
throw new SolrServerException("Unsupported method: "+request.getMethod() );
}
}
catch( NoHttpResponseException r ) {
method = null;
if(is != null) {
is.close();
}
// If out of tries then just rethrow (as normal error).
if (tries < 1) {
throw r;
}
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
public UpdateResponse add(Iterator<SolrInputDocument> docIterator)
throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.setDocIterator(docIterator);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/HttpSolrServer.java
public UpdateResponse addBeans(final Iterator<?> beanIterator)
throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.setDocIterator(new Iterator<SolrInputDocument>() {
public boolean hasNext() {
return beanIterator.hasNext();
}
public SolrInputDocument next() {
Object o = beanIterator.next();
if (o == null) return null;
return getBinder().toSolrInputDocument(o);
}
public void remove() {
beanIterator.remove();
}
});
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java
public Rsp request(Req req) throws SolrServerException, IOException {
Rsp rsp = new Rsp();
Exception ex = null;
List<ServerWrapper> skipped = new ArrayList<ServerWrapper>(req.getNumDeadServersToTry());
for (String serverStr : req.getServers()) {
serverStr = normalize(serverStr);
// if the server is currently a zombie, just skip to the next one
ServerWrapper wrapper = zombieServers.get(serverStr);
if (wrapper != null) {
// System.out.println("ZOMBIE SERVER QUERIED: " + serverStr);
if (skipped.size() < req.getNumDeadServersToTry())
skipped.add(wrapper);
continue;
}
rsp.server = serverStr;
HttpSolrServer server = makeServer(serverStr);
try {
rsp.rsp = server.request(req.getRequest());
return rsp; // SUCCESS
} catch (SolrException e) {
// we retry on 404 or 403 or 503 - you can see this on solr shutdown
if (e.code() == 404 || e.code() == 403 || e.code() == 503 || e.code() == 500) {
ex = addZombie(server, e);
} else {
// Server is alive but the request was likely malformed or invalid
throw e;
}
// TODO: consider using below above - currently does cause a problem with distrib updates:
// seems to match up against a failed forward to leader exception as well...
// || e.getMessage().contains("java.net.SocketException")
// || e.getMessage().contains("java.net.ConnectException")
} catch (SocketException e) {
ex = addZombie(server, e);
} catch (SocketTimeoutException e) {
ex = addZombie(server, e);
} catch (SolrServerException e) {
Throwable rootCause = e.getRootCause();
if (rootCause instanceof IOException) {
ex = addZombie(server, e);
} else {
throw e;
}
} catch (Exception e) {
throw new SolrServerException(e);
}
}
// try the servers we previously skipped
for (ServerWrapper wrapper : skipped) {
try {
rsp.rsp = wrapper.solrServer.request(req.getRequest());
zombieServers.remove(wrapper.getKey());
return rsp; // SUCCESS
} catch (SolrException e) {
// we retry on 404 or 403 or 503 - you can see this on solr shutdown
if (e.code() == 404 || e.code() == 403 || e.code() == 503 || e.code() == 500) {
ex = e;
// already a zombie, no need to re-add
} else {
// Server is alive but the request was malformed or invalid
zombieServers.remove(wrapper.getKey());
throw e;
}
} catch (SocketException e) {
ex = e;
} catch (SocketTimeoutException e) {
ex = e;
} catch (SolrServerException e) {
Throwable rootCause = e.getRootCause();
if (rootCause instanceof IOException) {
ex = e;
// already a zombie, no need to re-add
} else {
throw e;
}
} catch (Exception e) {
throw new SolrServerException(e);
}
}
if (ex == null) {
throw new SolrServerException("No live SolrServers available to handle this request");
} else {
throw new SolrServerException("No live SolrServers available to handle this request:" + zombieServers.keySet(), ex);
}
}
// in solrj/src/java/org/apache/solr/client/solrj/impl/LBHttpSolrServer.java
Override
public NamedList<Object> request(final SolrRequest request)
throws SolrServerException, IOException {
Exception ex = null;
ServerWrapper[] serverList = aliveServerList;
int maxTries = serverList.length;
Map<String,ServerWrapper> justFailed = null;
for (int attempts=0; attempts<maxTries; attempts++) {
int count = counter.incrementAndGet();
ServerWrapper wrapper = serverList[count % serverList.length];
wrapper.lastUsed = System.currentTimeMillis();
try {
return wrapper.solrServer.request(request);
} catch (SolrException e) {
// Server is alive but the request was malformed or invalid
throw e;
} catch (SolrServerException e) {
if (e.getRootCause() instanceof IOException) {
ex = e;
moveAliveToDead(wrapper);
if (justFailed == null) justFailed = new HashMap<String,ServerWrapper>();
justFailed.put(wrapper.getKey(), wrapper);
} else {
throw e;
}
} catch (Exception e) {
throw new SolrServerException(e);
}
}
// try other standard servers that we didn't try just now
for (ServerWrapper wrapper : zombieServers.values()) {
if (wrapper.standard==false || justFailed!=null && justFailed.containsKey(wrapper.getKey())) continue;
try {
NamedList<Object> rsp = wrapper.solrServer.request(request);
// remove from zombie list *before* adding to alive to avoid a race that could lose a server
zombieServers.remove(wrapper.getKey());
addToAlive(wrapper);
return rsp;
} catch (SolrException e) {
// Server is alive but the request was malformed or invalid
throw e;
} catch (SolrServerException e) {
if (e.getRootCause() instanceof IOException) {
ex = e;
// still dead
} else {
throw e;
}
} catch (Exception e) {
throw new SolrServerException(e);
}
}
if (ex == null) {
throw new SolrServerException("No live SolrServers available to handle this request");
} else {
throw new SolrServerException("No live SolrServers available to handle this request", ex);
}
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse add(Collection<SolrInputDocument> docs) throws SolrServerException, IOException {
return add(docs, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse add(Collection<SolrInputDocument> docs, int commitWithinMs) throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.add(docs);
req.setCommitWithin(commitWithinMs);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse addBeans(Collection<?> beans ) throws SolrServerException, IOException {
return addBeans(beans, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse addBeans(Collection<?> beans, int commitWithinMs) throws SolrServerException, IOException {
DocumentObjectBinder binder = this.getBinder();
ArrayList<SolrInputDocument> docs = new ArrayList<SolrInputDocument>(beans.size());
for (Object bean : beans) {
docs.add(binder.toSolrInputDocument(bean));
}
return add(docs, commitWithinMs);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse add(SolrInputDocument doc ) throws SolrServerException, IOException {
return add(doc, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse add(SolrInputDocument doc, int commitWithinMs) throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.add(doc);
req.setCommitWithin(commitWithinMs);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse addBean(Object obj) throws IOException, SolrServerException {
return addBean(obj, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse addBean(Object obj, int commitWithinMs) throws IOException, SolrServerException {
return add(getBinder().toSolrInputDocument(obj),commitWithinMs);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse commit( ) throws SolrServerException, IOException {
return commit(true, true);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse optimize( ) throws SolrServerException, IOException {
return optimize(true, true, 1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse commit( boolean waitFlush, boolean waitSearcher ) throws SolrServerException, IOException {
return new UpdateRequest().setAction( UpdateRequest.ACTION.COMMIT, waitFlush, waitSearcher ).process( this );
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse commit( boolean waitFlush, boolean waitSearcher, boolean softCommit ) throws SolrServerException, IOException {
return new UpdateRequest().setAction( UpdateRequest.ACTION.COMMIT, waitFlush, waitSearcher, softCommit ).process( this );
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse optimize( boolean waitFlush, boolean waitSearcher ) throws SolrServerException, IOException {
return optimize(waitFlush, waitSearcher, 1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse optimize(boolean waitFlush, boolean waitSearcher, int maxSegments ) throws SolrServerException, IOException {
return new UpdateRequest().setAction( UpdateRequest.ACTION.OPTIMIZE, waitFlush, waitSearcher, maxSegments ).process( this );
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse rollback() throws SolrServerException, IOException {
return new UpdateRequest().rollback().process( this );
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteById(String id) throws SolrServerException, IOException {
return deleteById(id, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteById(String id, int commitWithinMs) throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.deleteById(id);
req.setCommitWithin(commitWithinMs);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteById(List<String> ids) throws SolrServerException, IOException {
return deleteById(ids, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteById(List<String> ids, int commitWithinMs) throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.deleteById(ids);
req.setCommitWithin(commitWithinMs);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteByQuery(String query) throws SolrServerException, IOException {
return deleteByQuery(query, -1);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public UpdateResponse deleteByQuery(String query, int commitWithinMs) throws SolrServerException, IOException {
UpdateRequest req = new UpdateRequest();
req.deleteByQuery(query);
req.setCommitWithin(commitWithinMs);
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public SolrPingResponse ping() throws SolrServerException, IOException {
return new SolrPing().process( this );
}
// in solrj/src/java/org/apache/solr/client/solrj/SolrServer.java
public QueryResponse queryAndStreamResponse( SolrParams params, StreamingResponseCallback callback ) throws SolrServerException, IOException
{
ResponseParser parser = new StreamingBinaryResponseParser( callback );
QueryRequest req = new QueryRequest( params );
req.setStreamingResponseCallback( callback );
req.setResponseParser( parser );
return req.process(this);
}
// in solrj/src/java/org/apache/solr/client/solrj/util/ClientUtils.java
public static void writeXML( SolrInputDocument doc, Writer writer ) throws IOException
{
writer.write("<doc boost=\""+doc.getDocumentBoost()+"\">");
for( SolrInputField field : doc ) {
float boost = field.getBoost();
String name = field.getName();
for( Object v : field ) {
String update = null;
if (v instanceof Map) {
// currently only supports a single value
for (Entry<Object,Object> entry : ((Map<Object,Object>)v).entrySet()) {
update = entry.getKey().toString();
Object fieldVal = entry.getValue();
v = fieldVal;
}
}
if (v instanceof Date) {
v = DateUtil.getThreadLocalDateFormat().format( (Date)v );
} else if (v instanceof byte[]) {
byte[] bytes = (byte[]) v;
v = Base64.byteArrayToBase64(bytes, 0,bytes.length);
} else if (v instanceof ByteBuffer) {
ByteBuffer bytes = (ByteBuffer) v;
v = Base64.byteArrayToBase64(bytes.array(), bytes.position(),bytes.limit() - bytes.position());
}
if (update == null) {
if( boost != 1.0f ) {
XML.writeXML(writer, "field", v.toString(), "name", name, "boost", boost );
} else if (v != null) {
XML.writeXML(writer, "field", v.toString(), "name", name );
}
} else {
if( boost != 1.0f ) {
XML.writeXML(writer, "field", v.toString(), "name", name, "boost", boost, "update", update);
} else if (v != null) {
XML.writeXML(writer, "field", v.toString(), "name", name, "update", update);
}
}
// only write the boost for the first multi-valued field
// otherwise, the used boost is the product of all the boost values
boost = 1.0f;
}
}
writer.write("</doc>");
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int read() throws IOException {
if (start>=end) return -1;
return buf[start++];
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int read(CharBuffer cb) throws IOException {
/***
int sz = size();
if (sz<=0) return -1;
if (sz>0) cb.put(buf, start, sz);
return -1;
***/
int sz = size();
if (sz>0) cb.put(buf, start, sz);
start=end;
while (true) {
fill();
int s = size();
if (s==0) return sz==0 ? -1 : sz;
sz += s;
cb.put(buf, start, s);
}
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int fill() throws IOException {
return 0; // or -1?
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public final Appendable append(CharSequence csq) throws IOException {
return append(csq, 0, csq.length());
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public Appendable append(CharSequence csq, int start, int end) throws IOException {
write(csq.subSequence(start, end).toString());
return null;
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public final Appendable append(char c) throws IOException {
write(c);
return this;
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public Appendable append(CharSequence csq, int start, int end) throws IOException {
return this;
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int read() throws IOException {
if (start>=end) fill();
return start>=end ? -1 : buf[start++];
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int read(CharBuffer cb) throws IOException {
// empty the buffer and then read direct
int sz = size();
if (sz>0) cb.put(buf,start,end);
int sz2 = in.read(cb);
if (sz2>=0) return sz+sz2;
return sz>0 ? sz : -1;
}
// in solrj/src/java/org/apache/noggit/CharArr.java
public int fill() throws IOException {
if (start>=end) {
reset();
} else if (start>0) {
System.arraycopy(buf, start, buf, 0, size());
end=size(); start=0;
}
/***
// fill fully or not???
do {
int sz = in.read(buf,end,buf.length-end);
if (sz==-1) return;
end+=sz;
} while (end < buf.length);
***/
int sz = in.read(buf,end,buf.length-end);
if (sz>0) end+=sz;
return sz;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
protected void fill() throws IOException {
if (in!=null) {
gpos += end;
start=0;
int num = in.read(buf,0,buf.length);
end = num>=0 ? num : 0;
}
if (start>=end) eof=true;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private void getMore() throws IOException {
fill();
if (start>=end) {
throw err(null);
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
protected int getChar() throws IOException {
if (start>=end) {
fill();
if (start>=end) return -1;
}
return buf[start++];
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private int getCharNWS() throws IOException {
for (;;) {
int ch = getChar();
if (!(ch==' ' || ch=='\t' || ch=='\n' || ch=='\r')) return ch;
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private void expect(char[] arr) throws IOException {
for (int i=1; i<arr.length; i++) {
int ch = getChar();
if (ch != arr[i]) {
throw err("Expected " + new String(arr));
}
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private long readNumber(int firstChar, boolean isNeg) throws IOException {
out.unsafeWrite(firstChar); // unsafe OK since we know output is big enough
// We build up the number in the negative plane since it's larger (by one) than
// the positive plane.
long v = '0' - firstChar;
// can't overflow a long in 18 decimal digits (i.e. 17 additional after the first).
// we also need 22 additional to handle double so we'll handle in 2 separate loops.
int i;
for (i=0; i<17; i++) {
int ch = getChar();
// TODO: is this switch faster as an if-then-else?
switch(ch) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
v = v*10 - (ch-'0');
out.unsafeWrite(ch);
continue;
case '.':
out.unsafeWrite('.');
valstate = readFrac(out,22-i);
return 0;
case 'e':
case 'E':
out.unsafeWrite(ch);
nstate=0;
valstate = readExp(out,22-i);
return 0;
default:
// return the number, relying on nextEvent() to return an error
// for invalid chars following the number.
if (ch!=-1) --start; // push back last char if not EOF
valstate = LONG;
return isNeg ? v : -v;
}
}
// after this, we could overflow a long and need to do extra checking
boolean overflow = false;
long maxval = isNeg ? Long.MIN_VALUE : -Long.MAX_VALUE;
for (; i<22; i++) {
int ch = getChar();
switch(ch) {
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
case '8':
case '9':
if (v < (0x8000000000000000L/10)) overflow=true; // can't multiply by 10 w/o overflowing
v *= 10;
int digit = ch - '0';
if (v < maxval + digit) overflow=true; // can't add digit w/o overflowing
v -= digit;
out.unsafeWrite(ch);
continue;
case '.':
out.unsafeWrite('.');
valstate = readFrac(out,22-i);
return 0;
case 'e':
case 'E':
out.unsafeWrite(ch);
nstate=0;
valstate = readExp(out,22-i);
return 0;
default:
// return the number, relying on nextEvent() to return an error
// for invalid chars following the number.
if (ch!=-1) --start; // push back last char if not EOF
valstate = overflow ? BIGNUMBER : LONG;
return isNeg ? v : -v;
}
}
nstate=0;
valstate = BIGNUMBER;
return 0;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private int readFrac(CharArr arr, int lim) throws IOException {
nstate = HAS_FRACTION; // deliberate set instead of '|'
while(--lim>=0) {
int ch = getChar();
if (ch>='0' && ch<='9') {
arr.write(ch);
} else if (ch=='e' || ch=='E') {
arr.write(ch);
return readExp(arr,lim);
} else {
if (ch!=-1) start--; // back up
return NUMBER;
}
}
return BIGNUMBER;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private int readExp(CharArr arr, int lim) throws IOException {
nstate |= HAS_EXPONENT;
int ch = getChar(); lim--;
if (ch=='+' || ch=='-') {
arr.write(ch);
ch = getChar(); lim--;
}
// make sure at least one digit is read.
if (ch<'0' || ch>'9') {
throw err("missing exponent number");
}
arr.write(ch);
return readExpDigits(arr,lim);
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private int readExpDigits(CharArr arr, int lim) throws IOException {
while (--lim>=0) {
int ch = getChar();
if (ch>='0' && ch<='9') {
arr.write(ch);
} else {
if (ch!=-1) start--; // back up
return NUMBER;
}
}
return BIGNUMBER;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private void continueNumber(CharArr arr) throws IOException {
if (arr != out) arr.write(out);
if ((nstate & HAS_EXPONENT)!=0){
readExpDigits(arr, Integer.MAX_VALUE);
return;
}
if (nstate != 0) {
readFrac(arr, Integer.MAX_VALUE);
return;
}
for(;;) {
int ch = getChar();
if (ch>='0' && ch <='9') {
arr.write(ch);
} else if (ch=='.') {
arr.write(ch);
readFrac(arr,Integer.MAX_VALUE);
return;
} else if (ch=='e' || ch=='E') {
arr.write(ch);
readExp(arr,Integer.MAX_VALUE);
return;
} else {
if (ch!=-1) start--;
return;
}
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private char readEscapedChar() throws IOException {
switch (getChar()) {
case '"' : return '"';
case '\\' : return '\\';
case '/' : return '/';
case 'n' : return '\n';
case 'r' : return '\r';
case 't' : return '\t';
case 'f' : return '\f';
case 'b' : return '\b';
case 'u' :
return (char)(
(hexval(getChar()) << 12)
| (hexval(getChar()) << 8)
| (hexval(getChar()) << 4)
| (hexval(getChar())));
}
throw err("Invalid character escape in string");
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private CharArr readStringChars() throws IOException {
char c=0;
int i;
for (i=start; i<end; i++) {
c = buf[i];
if (c=='"') {
tmp.set(buf,start,i); // directly use input buffer
start=i+1; // advance past last '"'
return tmp;
} else if (c=='\\') {
break;
}
}
out.reset();
readStringChars2(out, i);
return out;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private void readStringChars2(CharArr arr, int middle) throws IOException {
for (;;) {
if (middle>=end) {
arr.write(buf,start,middle-start);
start=middle;
getMore();
middle=start;
}
int ch = buf[middle++];
if (ch=='"') {
int len = middle-start-1;
if (len>0) arr.write(buf,start,len);
start=middle;
return;
} else if (ch=='\\') {
int len = middle-start-1;
if (len>0) arr.write(buf,start,len);
start=middle;
arr.write(readEscapedChar());
middle=start;
}
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private int next(int ch) throws IOException {
for(;;) {
switch (ch) {
case ' ':
case '\t': break;
case '\r':
case '\n': break; // try and keep track of linecounts?
case '"' :
valstate = STRING;
return STRING;
case '{' :
push();
state= DID_OBJSTART;
return OBJECT_START;
case '[':
push();
state=DID_ARRSTART;
return ARRAY_START;
case '0' :
out.reset();
//special case '0'? If next char isn't '.' val=0
ch=getChar();
if (ch=='.') {
start--; ch='0';
readNumber('0',false);
return valstate;
} else if (ch>'9' || ch<'0') {
out.unsafeWrite('0');
if (ch!=-1) start--;
lval = 0;
valstate=LONG;
return LONG;
} else {
throw err("Leading zeros not allowed");
}
case '1' :
case '2' :
case '3' :
case '4' :
case '5' :
case '6' :
case '7' :
case '8' :
case '9' :
out.reset();
lval = readNumber(ch,false);
return valstate;
case '-' :
out.reset();
out.unsafeWrite('-');
ch = getChar();
if (ch<'0' || ch>'9') throw err("expected digit after '-'");
lval = readNumber(ch,true);
return valstate;
case 't':
valstate=BOOLEAN;
// TODO: test performance of this non-branching inline version.
// if ((('r'-getChar())|('u'-getChar())|('e'-getChar())) != 0) err("");
expect(JSONUtil.TRUE_CHARS);
bool=true;
return BOOLEAN;
case 'f':
valstate=BOOLEAN;
expect(JSONUtil.FALSE_CHARS);
bool=false;
return BOOLEAN;
case 'n':
valstate=NULL;
expect(JSONUtil.NULL_CHARS);
return NULL;
case -1:
if (getLevel()>0) throw err("Premature EOF");
return EOF;
default: throw err(null);
}
ch = getChar();
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public int nextEvent() throws IOException {
if (valstate==STRING) {
readStringChars2(devNull,start);
}
else if (valstate==BIGNUMBER) {
continueNumber(devNull);
}
valstate=0;
int ch; // TODO: factor out getCharNWS() to here and check speed
switch (state) {
case 0:
return event = next(getCharNWS());
case DID_OBJSTART:
ch = getCharNWS();
if (ch=='}') {
pop();
return event = OBJECT_END;
}
if (ch != '"') {
throw err("Expected string");
}
state = DID_MEMNAME;
valstate = STRING;
return event = STRING;
case DID_MEMNAME:
ch = getCharNWS();
if (ch!=':') {
throw err("Expected key,value separator ':'");
}
state = DID_MEMVAL; // set state first because it might be pushed...
return event = next(getChar());
case DID_MEMVAL:
ch = getCharNWS();
if (ch=='}') {
pop();
return event = OBJECT_END;
} else if (ch!=',') {
throw err("Expected ',' or '}'");
}
ch = getCharNWS();
if (ch != '"') {
throw err("Expected string");
}
state = DID_MEMNAME;
valstate = STRING;
return event = STRING;
case DID_ARRSTART:
ch = getCharNWS();
if (ch==']') {
pop();
return event = ARRAY_END;
}
state = DID_ARRELEM; // set state first, might be pushed...
return event = next(ch);
case DID_ARRELEM:
ch = getCharNWS();
if (ch==']') {
pop();
return event = ARRAY_END;
} else if (ch!=',') {
throw err("Expected ',' or ']'");
}
// state = DID_ARRELEM;
return event = next(getChar());
}
return 0;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
private void goTo(int what) throws IOException {
if (valstate==what) { valstate=0; return; }
if (valstate==0) {
int ev = nextEvent(); // TODO
if (valstate!=what) {
throw err("type mismatch");
}
valstate=0;
}
else {
throw err("type mismatch");
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public String getString() throws IOException {
return getStringChars().toString();
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public CharArr getStringChars() throws IOException {
goTo(STRING);
return readStringChars();
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public void getString(CharArr output) throws IOException {
goTo(STRING);
readStringChars2(output,start);
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public long getLong() throws IOException {
goTo(LONG);
return lval;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public double getDouble() throws IOException {
return Double.parseDouble(getNumberChars().toString());
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public CharArr getNumberChars() throws IOException {
int ev=0;
if (valstate==0) ev = nextEvent();
if (valstate == LONG || valstate == NUMBER) {
valstate=0;
return out;
}
else if (valstate==BIGNUMBER) {
continueNumber(out);
valstate=0;
return out;
} else {
throw err("Unexpected " + ev);
}
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public void getNumberChars(CharArr output) throws IOException {
int ev=0;
if (valstate==0) ev=nextEvent();
if (valstate == LONG || valstate == NUMBER) output.write(this.out);
else if (valstate==BIGNUMBER) {
continueNumber(output);
} else {
throw err("Unexpected " + ev);
}
valstate=0;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public boolean getBoolean() throws IOException {
goTo(BOOLEAN);
return bool;
}
// in solrj/src/java/org/apache/noggit/JSONParser.java
public void getNull() throws IOException {
goTo(NULL);
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public static Object fromJSON(String json) throws IOException {
JSONParser p = new JSONParser(json);
return getVal(p);
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public static Object getVal(JSONParser parser) throws IOException {
return new ObjectBuilder(parser).getVal();
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getVal() throws IOException {
int ev = parser.lastEvent();
switch(ev) {
case JSONParser.STRING: return getString();
case JSONParser.LONG: return getLong();
case JSONParser.NUMBER: return getNumber();
case JSONParser.BIGNUMBER: return getBigNumber();
case JSONParser.BOOLEAN: return getBoolean();
case JSONParser.NULL: return getNull();
case JSONParser.OBJECT_START: return getObject();
case JSONParser.OBJECT_END: return null; // OR ERROR?
case JSONParser.ARRAY_START: return getArray();
case JSONParser.ARRAY_END: return null; // OR ERROR?
case JSONParser.EOF: return null; // OR ERROR?
default: return null; // OR ERROR?
}
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getString() throws IOException {
return parser.getString();
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getLong() throws IOException {
return Long.valueOf(parser.getLong());
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getNumber() throws IOException {
CharArr num = parser.getNumberChars();
String numstr = num.toString();
double d = Double.parseDouble(numstr);
if (!Double.isInfinite(d)) return Double.valueOf(d);
// TODO: use more efficient constructor in Java5
return new BigDecimal(numstr);
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getBigNumber() throws IOException {
CharArr num = parser.getNumberChars();
String numstr = num.toString();
for(int ch; (ch=num.read())!=-1;) {
if (ch=='.' || ch=='e' || ch=='E') return new BigDecimal(numstr);
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getBoolean() throws IOException {
return parser.getBoolean();
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getNull() throws IOException {
parser.getNull();
return null;
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object newObject() throws IOException {
return new LinkedHashMap();
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getKey() throws IOException {
return parser.getString();
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public void addKeyVal(Object map, Object key, Object val) throws IOException {
Object prev = ((Map)map).put(key,val);
// TODO: test for repeated value?
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getObject() throws IOException {
Object m = newObject();
for(;;) {
int ev = parser.nextEvent();
if (ev==JSONParser.OBJECT_END) return objectEnd(m);
Object key = getKey();
ev = parser.nextEvent();
Object val = getVal();
addKeyVal(m, key, val);
}
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public void addArrayVal(Object arr, Object val) throws IOException {
((List)arr).add(val);
}
// in solrj/src/java/org/apache/noggit/ObjectBuilder.java
public Object getArray() throws IOException {
Object arr = newArray();
for(;;) {
int ev = parser.nextEvent();
if (ev==JSONParser.ARRAY_END) return endArray(arr);
Object val = getVal();
addArrayVal(arr, val);
}
}
// in contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java
void doAdd(SolrContentHandler handler, AddUpdateCommand template)
throws IOException {
template.solrDoc = handler.newDocument();
processor.processAdd(template);
}
// in contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingDocumentLoader.java
void addDoc(SolrContentHandler handler) throws IOException {
templateAdd.clear();
doAdd(handler, templateAdd);
}
// in contrib/extraction/src/java/org/apache/solr/handler/extraction/ExtractingRequestHandler.java
private TikaConfig getDefaultConfig(ClassLoader classLoader) throws MimeTypeException, IOException {
return new TikaConfig(classLoader);
}
// in contrib/uima/src/java/org/apache/solr/uima/processor/UIMAUpdateRequestProcessor.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
String text = null;
try {
/* get Solr document */
SolrInputDocument solrInputDocument = cmd.getSolrInputDocument();
/* get the fields to analyze */
String[] texts = getTextsToAnalyze(solrInputDocument);
for (int i = 0; i < texts.length; i++) {
text = texts[i];
if (text != null && text.length()>0) {
/* process the text value */
JCas jcas = processText(text);
UIMAToSolrMapper uimaToSolrMapper = new UIMAToSolrMapper(solrInputDocument, jcas);
/* get field mapping from config */
Map<String, Map<String, MapField>> typesAndFeaturesFieldsMap = solrUIMAConfiguration
.getTypesFeaturesFieldsMapping();
/* map type features on fields */
for (String typeFQN : typesAndFeaturesFieldsMap.keySet()) {
uimaToSolrMapper.map(typeFQN, typesAndFeaturesFieldsMap.get(typeFQN));
}
}
}
} catch (Exception e) {
String logField = solrUIMAConfiguration.getLogField();
if(logField == null){
SchemaField uniqueKeyField = solrCore.getSchema().getUniqueKeyField();
if(uniqueKeyField != null){
logField = uniqueKeyField.getName();
}
}
String optionalFieldInfo = logField == null ? "." :
new StringBuilder(". ").append(logField).append("=")
.append((String)cmd.getSolrInputDocument().getField(logField).getValue())
.append(", ").toString();
int len = Math.min(text.length(), 100);
if (solrUIMAConfiguration.isIgnoreErrors()) {
log.warn(new StringBuilder("skip the text processing due to ")
.append(e.getLocalizedMessage()).append(optionalFieldInfo)
.append(" text=\"").append(text.substring(0, len)).append("...\"").toString());
} else {
throw new SolrException(ErrorCode.SERVER_ERROR,
new StringBuilder("processing error: ")
.append(e.getLocalizedMessage()).append(optionalFieldInfo)
.append(" text=\"").append(text.substring(0, len)).append("...\"").toString(), e);
}
}
super.processAdd(cmd);
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2TokenizerFactory.java
public short nextToken() throws IOException {
final boolean hasNextToken = wordTokenFilter.incrementToken();
if (hasNextToken) {
short flags = 0;
final char[] image = term.buffer();
final int length = term.length();
tempCharSequence.reset(image, 0, length);
if (length == 1 && image[0] == ',') {
// ChineseTokenizer seems to convert all punctuation to ','
// characters
flags = ITokenizer.TT_PUNCTUATION;
} else if (numeric.matcher(tempCharSequence).matches()) {
flags = ITokenizer.TT_NUMERIC;
} else {
flags = ITokenizer.TT_TERM;
}
return flags;
}
return ITokenizer.TT_EOF;
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/LuceneCarrot2TokenizerFactory.java
public void reset(Reader input) throws IOException {
try {
sentenceTokenizer.reset(input);
wordTokenFilter = (TokenStream) tokenFilterClass.getConstructor(
TokenStream.class).newInstance(sentenceTokenizer);
term = wordTokenFilter.addAttribute(CharTermAttribute.class);
} catch (Exception e) {
throw ExceptionUtils.wrapAsRuntimeException(e);
}
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
Override
public IResource[] getAll(final String resource) {
final String resourceName = carrot2ResourcesDir + "/" + resource;
log.debug("Looking for Solr resource: " + resourceName);
InputStream resourceStream = null;
final byte [] asBytes;
try {
resourceStream = resourceLoader.openResource(resourceName);
asBytes = IOUtils.toByteArray(resourceStream);
} catch (RuntimeException e) {
log.debug("Resource not found in Solr's config: " + resourceName
+ ". Using the default " + resource + " from Carrot JAR.");
return new IResource[] {};
} catch (IOException e) {
log.warn("Could not read Solr resource " + resourceName);
return new IResource[] {};
} finally {
if (resourceStream != null) Closeables.closeQuietly(resourceStream);
}
log.info("Loaded Solr resource: " + resourceName);
final IResource foundResource = new IResource() {
@Override
public InputStream open() throws IOException {
return new ByteArrayInputStream(asBytes);
}
@Override
public int hashCode() {
// In case multiple resources are found they will be deduped, but we don't use it in Solr,
// so simply rely on instance equivalence.
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
// In case multiple resources are found they will be deduped, but we don't use it in Solr,
// so simply rely on instance equivalence.
return super.equals(obj);
}
@Override
public String toString() {
return "Solr config resource: " + resourceName;
}
};
return new IResource[] { foundResource };
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
Override
public InputStream open() throws IOException {
return new ByteArrayInputStream(asBytes);
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/carrot2/CarrotClusteringEngine.java
private List<Document> getDocuments(SolrDocumentList solrDocList, Map<SolrDocument, Integer> docIds,
Query query, final SolrQueryRequest sreq) throws IOException {
SolrHighlighter highlighter = null;
SolrParams solrParams = sreq.getParams();
SolrCore core = sreq.getCore();
String urlField = solrParams.get(CarrotParams.URL_FIELD_NAME, "url");
String titleFieldSpec = solrParams.get(CarrotParams.TITLE_FIELD_NAME, "title");
String snippetFieldSpec = solrParams.get(CarrotParams.SNIPPET_FIELD_NAME, titleFieldSpec);
String languageField = solrParams.get(CarrotParams.LANGUAGE_FIELD_NAME, null);
// Maps Solr field names to Carrot2 custom field names
Map<String, String> customFields = getCustomFieldsMap(solrParams);
// Parse language code map string into a map
Map<String, String> languageCodeMap = Maps.newHashMap();
if (StringUtils.isNotBlank(languageField)) {
for (String pair : solrParams.get(CarrotParams.LANGUAGE_CODE_MAP, "")
.split("[, ]")) {
final String[] split = pair.split(":");
if (split.length == 2 && StringUtils.isNotBlank(split[0]) && StringUtils.isNotBlank(split[1])) {
languageCodeMap.put(split[0], split[1]);
} else {
log.warn("Unsupported format for " + CarrotParams.LANGUAGE_CODE_MAP
+ ": '" + pair + "'. Skipping this mapping.");
}
}
}
// Get the documents
boolean produceSummary = solrParams.getBool(CarrotParams.PRODUCE_SUMMARY, false);
SolrQueryRequest req = null;
String[] snippetFieldAry = null;
if (produceSummary) {
highlighter = HighlightComponent.getHighlighter(core);
if (highlighter != null){
Map<String, Object> args = Maps.newHashMap();
snippetFieldAry = snippetFieldSpec.split("[, ]");
args.put(HighlightParams.FIELDS, snippetFieldAry);
args.put(HighlightParams.HIGHLIGHT, "true");
args.put(HighlightParams.SIMPLE_PRE, ""); //we don't care about actually highlighting the area
args.put(HighlightParams.SIMPLE_POST, "");
args.put(HighlightParams.FRAGSIZE, solrParams.getInt(CarrotParams.SUMMARY_FRAGSIZE, solrParams.getInt(HighlightParams.FRAGSIZE, 100)));
args.put(HighlightParams.SNIPPETS, solrParams.getInt(CarrotParams.SUMMARY_SNIPPETS, solrParams.getInt(HighlightParams.SNIPPETS, 1)));
req = new LocalSolrQueryRequest(core, query.toString(), "", 0, 1, args) {
@Override
public SolrIndexSearcher getSearcher() {
return sreq.getSearcher();
}
};
} else {
log.warn("No highlighter configured, cannot produce summary");
produceSummary = false;
}
}
Iterator<SolrDocument> docsIter = solrDocList.iterator();
List<Document> result = new ArrayList<Document>(solrDocList.size());
float[] scores = {1.0f};
int[] docsHolder = new int[1];
Query theQuery = query;
while (docsIter.hasNext()) {
SolrDocument sdoc = docsIter.next();
String snippet = null;
// TODO: docIds will be null when running distributed search.
// See comment in ClusteringComponent#finishStage().
if (produceSummary && docIds != null) {
docsHolder[0] = docIds.get(sdoc).intValue();
DocList docAsList = new DocSlice(0, 1, docsHolder, scores, 1, 1.0f);
NamedList<Object> highlights = highlighter.doHighlighting(docAsList, theQuery, req, snippetFieldAry);
if (highlights != null && highlights.size() == 1) {//should only be one value given our setup
//should only be one document
@SuppressWarnings("unchecked")
NamedList<String []> tmp = (NamedList<String[]>) highlights.getVal(0);
final StringBuilder sb = new StringBuilder();
for (int j = 0; j < snippetFieldAry.length; j++) {
// Join fragments with a period, so that Carrot2 does not create
// cross-fragment phrases, such phrases rarely make sense.
String [] highlt = tmp.get(snippetFieldAry[j]);
if (highlt != null && highlt.length > 0) {
for (int i = 0; i < highlt.length; i++) {
sb.append(highlt[i]);
sb.append(" . ");
}
}
}
snippet = sb.toString();
}
}
// If summaries not enabled or summary generation failed, use full content.
if (snippet == null) {
snippet = getConcatenated(sdoc, snippetFieldSpec);
}
// Create a Carrot2 document
Document carrotDocument = new Document(getConcatenated(sdoc, titleFieldSpec),
snippet, ObjectUtils.toString(sdoc.getFieldValue(urlField), ""));
// Store Solr id of the document, we need it to map document instances
// found in clusters back to identifiers.
carrotDocument.setField(SOLR_DOCUMENT_ID, sdoc.getFieldValue(idFieldName));
// Set language
if (StringUtils.isNotBlank(languageField)) {
Collection<Object> languages = sdoc.getFieldValues(languageField);
if (languages != null) {
// Use the first Carrot2-supported language
for (Object l : languages) {
String lang = ObjectUtils.toString(l, "");
if (languageCodeMap.containsKey(lang)) {
lang = languageCodeMap.get(lang);
}
// Language detection Library for Java uses dashes to separate
// language variants, such as 'zh-cn', but Carrot2 uses underscores.
if (lang.indexOf('-') > 0) {
lang = lang.replace('-', '_');
}
// If the language is supported by Carrot2, we'll get a non-null value
final LanguageCode carrot2Language = LanguageCode.forISOCode(lang);
if (carrot2Language != null) {
carrotDocument.setLanguage(carrot2Language);
break;
}
}
}
}
// Add custom fields
if (customFields != null) {
for (Entry<String, String> entry : customFields.entrySet()) {
carrotDocument.setField(entry.getValue(), sdoc.getFieldValue(entry.getKey()));
}
}
result.add(carrotDocument);
}
return result;
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (!params.getBool(COMPONENT_NAME, false)) {
return;
}
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/ClusteringComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (!params.getBool(COMPONENT_NAME, false)) {
return;
}
String name = getClusteringEngineName(rb);
boolean useResults = params.getBool(ClusteringParams.USE_SEARCH_RESULTS, false);
if (useResults == true) {
SearchClusteringEngine engine = getSearchClusteringEngine(rb);
if (engine != null) {
DocListAndSet results = rb.getResults();
Map<SolrDocument,Integer> docIds = new HashMap<SolrDocument, Integer>(results.docList.size());
SolrDocumentList solrDocList = engine.getSolrDocumentList(results.docList, rb.req, docIds);
Object clusters = engine.cluster(rb.getQuery(), solrDocList, docIds, rb.req);
rb.rsp.add("clusters", clusters);
} else {
log.warn("No engine for: " + name);
}
}
boolean useCollection = params.getBool(ClusteringParams.USE_COLLECTION, false);
if (useCollection == true) {
DocumentClusteringEngine engine = documentClusteringEngines.get(name);
if (engine != null) {
boolean useDocSet = params.getBool(ClusteringParams.USE_DOC_SET, false);
NamedList nl = null;
//TODO: This likely needs to be made into a background task that runs in an executor
if (useDocSet == true) {
nl = engine.cluster(rb.getResults().docSet, params);
} else {
nl = engine.cluster(params);
}
rb.rsp.add("clusters", nl);
} else {
log.warn("No engine for " + name);
}
}
}
// in contrib/clustering/src/java/org/apache/solr/handler/clustering/SearchClusteringEngine.java
public SolrDocumentList getSolrDocumentList(DocList docList, SolrQueryRequest sreq,
Map<SolrDocument, Integer> docIds) throws IOException{
return SolrPluginUtils.docListToSolrDocumentList(
docList, sreq.getSearcher(), getFieldsToLoad(sreq), docIds);
}
// in contrib/langid/src/java/org/apache/solr/update/processor/LanguageIdentifierUpdateProcessor.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
if (isEnabled()) {
process(cmd.getSolrInputDocument());
} else {
log.debug("Processor not enabled, not running");
}
super.processAdd(cmd);
}
// in contrib/langid/src/java/org/apache/solr/update/processor/LangDetectLanguageIdentifierUpdateProcessorFactory.java
public static synchronized void loadData() throws IOException, LangDetectException {
if (loaded) {
return;
}
loaded = true;
List<String> profileData = new ArrayList<String>();
Charset encoding = Charset.forName("UTF-8");
for (String language : languages) {
InputStream stream = LangDetectLanguageIdentifierUpdateProcessor.class.getResourceAsStream("langdetect-profiles/" + language);
BufferedReader reader = new BufferedReader(new InputStreamReader(stream, encoding));
profileData.add(new String(IOUtils.toCharArray(reader)));
reader.close();
}
DetectorFactory.loadProfile(profileData);
DetectorFactory.setSeed(0);
}
// in contrib/analysis-extras/src/java/org/apache/solr/schema/ICUCollationField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
// in contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/SolrWriter.java
static String getResourceAsString(InputStream in) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream(1024);
byte[] buf = new byte[1024];
int sz = 0;
try {
while ((sz = in.read(buf)) != -1) {
baos.write(buf, 0, sz);
}
} finally {
try {
in.close();
} catch (Exception e) {
}
}
return new String(baos.toByteArray(), "UTF-8");
}
// in contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java
private void parse(XMLStreamReader parser,
Handler handler,
Map<String, Object> values,
Stack<Set<String>> stack, // lists of values to purge
boolean recordStarted
) throws IOException, XMLStreamException {
Set<String> valuesAddedinThisFrame = null;
if (isRecord) {
// This Node is a match for an XPATH from a forEach attribute,
// prepare for the clean up that will occurr when the record
// is emitted after its END_ELEMENT is matched
recordStarted = true;
valuesAddedinThisFrame = new HashSet<String>();
stack.push(valuesAddedinThisFrame);
} else if (recordStarted) {
// This node is a child of some parent which matched against forEach
// attribute. Continue to add values to an existing record.
valuesAddedinThisFrame = stack.peek();
}
try {
/* The input stream has deposited us at this Node in our tree of
* intresting nodes. Depending on how this node is of interest,
* process further tokens from the input stream and decide what
* we do next
*/
if (attributes != null) {
// we interested in storing attributes from the input stream
for (Node node : attributes) {
String value = parser.getAttributeValue(null, node.name);
if (value != null || (recordStarted && !isRecord)) {
putText(values, value, node.fieldName, node.multiValued);
valuesAddedinThisFrame.add(node.fieldName);
}
}
}
Set<Node> childrenFound = new HashSet<Node>();
int event = -1;
int flattenedStarts=0; // our tag depth when flattening elements
StringBuilder text = new StringBuilder();
while (true) {
event = parser.next();
if (event == END_ELEMENT) {
if (flattenedStarts > 0) flattenedStarts--;
else {
if (hasText && valuesAddedinThisFrame != null) {
valuesAddedinThisFrame.add(fieldName);
putText(values, text.toString(), fieldName, multiValued);
}
if (isRecord) handler.handle(getDeepCopy(values), forEachPath);
if (childNodes != null && recordStarted && !isRecord && !childrenFound.containsAll(childNodes)) {
// nonReccord nodes where we have not collected text for ALL
// the child nodes.
for (Node n : childNodes) {
// For the multivalue child nodes where we could have, but
// didnt, collect text. Push a null string into values.
if (!childrenFound.contains(n)) n.putNulls(values);
}
}
return;
}
}
else if (hasText && (event==CDATA || event==CHARACTERS || event==SPACE)) {
text.append(parser.getText());
}
else if (event == START_ELEMENT) {
if ( flatten )
flattenedStarts++;
else
handleStartElement(parser, childrenFound, handler, values, stack, recordStarted);
}
// END_DOCUMENT is least likely to appear and should be
// last in if-then-else skip chain
else if (event == END_DOCUMENT) return;
}
}finally {
if ((isRecord || !recordStarted) && !stack.empty()) {
Set<String> cleanThis = stack.pop();
if (cleanThis != null) {
for (String fld : cleanThis) values.remove(fld);
}
}
}
}
// in contrib/dataimporthandler/src/java/org/apache/solr/handler/dataimport/XPathRecordReader.java
private void handleStartElement(XMLStreamReader parser, Set<Node> childrenFound,
Handler handler, Map<String, Object> values,
Stack<Set<String>> stack, boolean recordStarted)
throws IOException, XMLStreamException {
Node n = getMatchingNode(parser,childNodes);
Map<String, Object> decends=new HashMap<String, Object>();
if (n != null) {
childrenFound.add(n);
n.parse(parser, handler, values, stack, recordStarted);
return;
}
// The stream has diverged from the tree of interesting elements, but
// are there any wildCardNodes ... anywhere in our path from the root?
Node dn = this; // checking our Node first!
do {
if (dn.wildCardNodes != null) {
// Check to see if the streams tag matches one of the "//" all
// decendents type expressions for this node.
n = getMatchingNode(parser, dn.wildCardNodes);
if (n != null) {
childrenFound.add(n);
n.parse(parser, handler, values, stack, recordStarted);
break;
}
// add the list of this nodes wild decendents to the cache
for (Node nn : dn.wildCardNodes) decends.put(nn.name, nn);
}
dn = dn.wildAncestor; // leap back along the tree toward root
} while (dn != null) ;
if (n == null) {
// we have a START_ELEMENT which is not within the tree of
// interesting nodes. Skip over the contents of this element
// but recursivly repeat the above for any START_ELEMENTs
// found within this element.
int count = 1; // we have had our first START_ELEMENT
while (count != 0) {
int token = parser.next();
if (token == START_ELEMENT) {
Node nn = (Node) decends.get(parser.getLocalName());
if (nn != null) {
// We have a //Node which matches the stream's parser.localName
childrenFound.add(nn);
// Parse the contents of this stream element
nn.parse(parser, handler, values, stack, recordStarted);
}
else count++;
}
else if (token == END_ELEMENT) count--;
}
}
}
// in contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
VelocityEngine engine = getEngine(request); // TODO: have HTTP headers available for configuring engine
Template template = getTemplate(engine, request);
VelocityContext context = new VelocityContext();
context.put("request", request);
// Turn the SolrQueryResponse into a SolrResponse.
// QueryResponse has lots of conveniences suitable for a view
// Problem is, which SolrResponse class to use?
// One patch to SOLR-620 solved this by passing in a class name as
// as a parameter and using reflection and Solr's class loader to
// create a new instance. But for now the implementation simply
// uses QueryResponse, and if it chokes in a known way, fall back
// to bare bones SolrResponseBase.
// TODO: Can this writer know what the handler class is? With echoHandler=true it can get its string name at least
SolrResponse rsp = new QueryResponse();
NamedList<Object> parsedResponse = BinaryResponseWriter.getParsedResponse(request, response);
try {
rsp.setResponse(parsedResponse);
// page only injected if QueryResponse works
context.put("page", new PageTool(request, response)); // page tool only makes sense for a SearchHandler request... *sigh*
} catch (ClassCastException e) {
// known edge case where QueryResponse's extraction assumes "response" is a SolrDocumentList
// (AnalysisRequestHandler emits a "response")
e.printStackTrace();
rsp = new SolrResponseBase();
rsp.setResponse(parsedResponse);
}
context.put("response", rsp);
// Velocity context tools - TODO: make these pluggable
context.put("esc", new EscapeTool());
context.put("date", new ComparisonDateTool());
context.put("list", new ListTool());
context.put("math", new MathTool());
context.put("number", new NumberTool());
context.put("sort", new SortTool());
context.put("engine", engine); // for $engine.resourceExists(...)
String layout_template = request.getParams().get("v.layout");
String json_wrapper = request.getParams().get("v.json");
boolean wrap_response = (layout_template != null) || (json_wrapper != null);
// create output, optionally wrap it into a json object
if (wrap_response) {
StringWriter stringWriter = new StringWriter();
template.merge(context, stringWriter);
if (layout_template != null) {
context.put("content", stringWriter.toString());
stringWriter = new StringWriter();
try {
engine.getTemplate(layout_template + ".vm").merge(context, stringWriter);
} catch (Exception e) {
throw new IOException(e.getMessage());
}
}
if (json_wrapper != null) {
writer.write(request.getParams().get("v.json") + "(");
writer.write(getJSONWrap(stringWriter.toString()));
writer.write(')');
} else { // using a layout, but not JSON wrapping
writer.write(stringWriter.toString());
}
} else {
template.merge(context, writer);
}
}
// in contrib/velocity/src/java/org/apache/solr/response/VelocityResponseWriter.java
private Template getTemplate(VelocityEngine engine, SolrQueryRequest request) throws IOException {
Template template;
String template_name = request.getParams().get("v.template");
String qt = request.getParams().get("qt");
String path = (String) request.getContext().get("path");
if (template_name == null && path != null) {
template_name = path;
} // TODO: path is never null, so qt won't get picked up maybe special case for '/select' to use qt, otherwise use path?
if (template_name == null && qt != null) {
template_name = qt;
}
if (template_name == null) template_name = "index";
try {
template = engine.getTemplate(template_name + ".vm");
} catch (Exception e) {
throw new IOException(e.getMessage());
}
return template;
}
// in core/src/java/org/apache/solr/handler/DocumentAnalysisRequestHandler.java
DocumentAnalysisRequest resolveAnalysisRequest(SolrQueryRequest req) throws IOException, XMLStreamException {
DocumentAnalysisRequest request = new DocumentAnalysisRequest();
SolrParams params = req.getParams();
String query = params.get(AnalysisParams.QUERY, params.get(CommonParams.Q, null));
request.setQuery(query);
boolean showMatch = params.getBool(AnalysisParams.SHOW_MATCH, false);
request.setShowMatch(showMatch);
ContentStream stream = extractSingleContentStream(req);
InputStream is = null;
XMLStreamReader parser = null;
try {
is = stream.getStream();
final String charset = ContentStreamBase.getCharsetFromContentType(stream.getContentType());
parser = (charset == null) ?
inputFactory.createXMLStreamReader(is) : inputFactory.createXMLStreamReader(is, charset);
while (true) {
int event = parser.next();
switch (event) {
case XMLStreamConstants.END_DOCUMENT: {
parser.close();
return request;
}
case XMLStreamConstants.START_ELEMENT: {
String currTag = parser.getLocalName();
if ("doc".equals(currTag)) {
log.trace("Reading doc...");
SolrInputDocument document = readDocument(parser, req.getSchema());
request.addDocument(document);
}
break;
}
}
}
} finally {
if (parser != null) parser.close();
IOUtils.closeQuietly(is);
}
}
// in core/src/java/org/apache/solr/handler/DumpRequestHandler.java
Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException
{
// Show params
rsp.add( "params", req.getParams().toNamedList() );
// Write the streams...
if( req.getContentStreams() != null ) {
ArrayList<NamedList<Object>> streams = new ArrayList<NamedList<Object>>();
// Cycle through each stream
for( ContentStream content : req.getContentStreams() ) {
NamedList<Object> stream = new SimpleOrderedMap<Object>();
stream.add( "name", content.getName() );
stream.add( "sourceInfo", content.getSourceInfo() );
stream.add( "size", content.getSize() );
stream.add( "contentType", content.getContentType() );
Reader reader = content.getReader();
try {
stream.add( "stream", IOUtils.toString(reader) );
} finally {
reader.close();
}
streams.add( stream );
}
rsp.add( "streams", streams );
}
rsp.add("context", req.getContext());
}
// in core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
Override
public boolean incrementToken() throws IOException {
if (tokenIterator.hasNext()) {
clearAttributes();
AttributeSource next = tokenIterator.next();
Iterator<Class<? extends Attribute>> atts = next.getAttributeClassesIterator();
while (atts.hasNext()) // make sure all att impls in the token exist here
addAttribute(atts.next());
next.copyTo(this);
return true;
} else {
return false;
}
}
// in core/src/java/org/apache/solr/handler/AnalysisRequestHandlerBase.java
Override
public void reset() throws IOException {
super.reset();
tokenIterator = tokens.iterator();
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
NamedList getCommandResponse(NamedList<String> commands) throws IOException {
HttpPost post = new HttpPost(masterUrl);
List<BasicNameValuePair> formparams = new ArrayList<BasicNameValuePair>();
formparams.add(new BasicNameValuePair("wt", "javabin"));
for (Map.Entry<String, String> c : commands) {
formparams.add(new BasicNameValuePair(c.getKey(), c.getValue()));
}
UrlEncodedFormEntity entity = new UrlEncodedFormEntity(formparams, "UTF-8");
post.setEntity(entity);
return getNamedListResponse(post);
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
private NamedList<?> getNamedListResponse(HttpPost method) throws IOException {
InputStream input = null;
NamedList<?> result = null;
try {
HttpResponse response = myHttpClient.execute(method);
int status = response.getStatusLine().getStatusCode();
if (status != HttpStatus.SC_OK) {
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,
"Request failed for the url " + method);
}
input = response.getEntity().getContent();
result = (NamedList<?>)new JavaBinCodec().unmarshal(input);
} finally {
try {
if (input != null) {
input.close();
}
} catch (Exception e) {
}
}
return result;
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
void fetchFileList(long gen) throws IOException {
HttpPost post = new HttpPost(masterUrl);
List<BasicNameValuePair> formparams = new ArrayList<BasicNameValuePair>();
formparams.add(new BasicNameValuePair("wt", "javabin"));
formparams.add(new BasicNameValuePair(COMMAND, CMD_GET_FILE_LIST));
formparams.add(new BasicNameValuePair(GENERATION, String.valueOf(gen)));
UrlEncodedFormEntity entity = new UrlEncodedFormEntity(formparams, "UTF-8");
post.setEntity(entity);
@SuppressWarnings("unchecked")
NamedList<List<Map<String, Object>>> nl
= (NamedList<List<Map<String, Object>>>) getNamedListResponse(post);
List<Map<String, Object>> f = nl.get(CMD_GET_FILE_LIST);
if (f != null)
filesToDownload = Collections.synchronizedList(f);
else {
filesToDownload = Collections.emptyList();
LOG.error("No files to download for index generation: "+ gen);
}
f = nl.get(CONF_FILES);
if (f != null)
confFilesToDownload = Collections.synchronizedList(f);
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
boolean fetchLatestIndex(SolrCore core, boolean force) throws IOException, InterruptedException {
successfulInstall = false;
replicationStartTime = System.currentTimeMillis();
try {
//get the current 'replicateable' index version in the master
NamedList response = null;
try {
response = getLatestVersion();
} catch (Exception e) {
LOG.error("Master at: " + masterUrl + " is not available. Index fetch failed. Exception: " + e.getMessage());
return false;
}
long latestVersion = (Long) response.get(CMD_INDEX_VERSION);
long latestGeneration = (Long) response.get(GENERATION);
IndexCommit commit;
RefCounted<SolrIndexSearcher> searcherRefCounted = null;
try {
searcherRefCounted = core.getNewestSearcher(false);
if (searcherRefCounted == null) {
SolrException.log(LOG, "No open searcher found - fetch aborted");
return false;
}
commit = searcherRefCounted.get().getIndexReader().getIndexCommit();
} finally {
if (searcherRefCounted != null)
searcherRefCounted.decref();
}
if (latestVersion == 0L) {
if (force && commit.getGeneration() != 0) {
// since we won't get the files for an empty index,
// we just clear ours and commit
core.getUpdateHandler().getSolrCoreState().getIndexWriter(core).deleteAll();
SolrQueryRequest req = new LocalSolrQueryRequest(core,
new ModifiableSolrParams());
core.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
}
//there is nothing to be replicated
successfulInstall = true;
return true;
}
if (!force && IndexDeletionPolicyWrapper.getCommitTimestamp(commit) == latestVersion) {
//master and slave are already in sync just return
LOG.info("Slave in sync with master.");
successfulInstall = true;
return true;
}
LOG.info("Master's generation: " + latestGeneration);
LOG.info("Slave's generation: " + commit.getGeneration());
LOG.info("Starting replication process");
// get the list of files first
fetchFileList(latestGeneration);
// this can happen if the commit point is deleted before we fetch the file list.
if(filesToDownload.isEmpty()) return false;
LOG.info("Number of files in latest index in master: " + filesToDownload.size());
// Create the sync service
fsyncService = Executors.newSingleThreadExecutor();
// use a synchronized list because the list is read by other threads (to show details)
filesDownloaded = Collections.synchronizedList(new ArrayList<Map<String, Object>>());
// if the generateion of master is older than that of the slave , it means they are not compatible to be copied
// then a new index direcory to be created and all the files need to be copied
boolean isFullCopyNeeded = IndexDeletionPolicyWrapper.getCommitTimestamp(commit) >= latestVersion || force;
File tmpIndexDir = createTempindexDir(core);
if (isIndexStale())
isFullCopyNeeded = true;
successfulInstall = false;
boolean deleteTmpIdxDir = true;
File indexDir = null ;
try {
indexDir = new File(core.getIndexDir());
downloadIndexFiles(isFullCopyNeeded, tmpIndexDir, latestGeneration);
LOG.info("Total time taken for download : " + ((System.currentTimeMillis() - replicationStartTime) / 1000) + " secs");
Collection<Map<String, Object>> modifiedConfFiles = getModifiedConfFiles(confFilesToDownload);
if (!modifiedConfFiles.isEmpty()) {
downloadConfFiles(confFilesToDownload, latestGeneration);
if (isFullCopyNeeded) {
successfulInstall = modifyIndexProps(tmpIndexDir.getName());
deleteTmpIdxDir = false;
} else {
successfulInstall = copyIndexFiles(tmpIndexDir, indexDir);
}
if (successfulInstall) {
LOG.info("Configuration files are modified, core will be reloaded");
logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);//write to a file time of replication and conf files.
reloadCore();
}
} else {
terminateAndWaitFsyncService();
if (isFullCopyNeeded) {
successfulInstall = modifyIndexProps(tmpIndexDir.getName());
deleteTmpIdxDir = false;
} else {
successfulInstall = copyIndexFiles(tmpIndexDir, indexDir);
}
if (successfulInstall) {
logReplicationTimeAndConfFiles(modifiedConfFiles, successfulInstall);
doCommit();
}
}
replicationStartTime = 0;
return successfulInstall;
} catch (ReplicationHandlerException e) {
LOG.error("User aborted Replication");
return false;
} catch (SolrException e) {
throw e;
} catch (InterruptedException e) {
throw new InterruptedException("Index fetch interrupted");
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Index fetch failed : ", e);
} finally {
if (deleteTmpIdxDir) delTree(tmpIndexDir);
else delTree(indexDir);
}
} finally {
if (!successfulInstall) {
logReplicationTimeAndConfFiles(null, successfulInstall);
}
filesToDownload = filesDownloaded = confFilesDownloaded = confFilesToDownload = null;
replicationStartTime = 0;
fileFetcher = null;
if (fsyncService != null && !fsyncService.isShutdown()) fsyncService.shutdownNow();
fsyncService = null;
stop = false;
fsyncException = null;
}
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
private void doCommit() throws IOException {
SolrQueryRequest req = new LocalSolrQueryRequest(solrCore,
new ModifiableSolrParams());
// reboot the writer on the new index and get a new searcher
solrCore.getUpdateHandler().newIndexWriter();
try {
// first try to open an NRT searcher so that the new
// IndexWriter is registered with the reader
Future[] waitSearcher = new Future[1];
solrCore.getSearcher(true, false, waitSearcher, true);
if (waitSearcher[0] != null) {
try {
waitSearcher[0].get();
} catch (InterruptedException e) {
SolrException.log(LOG,e);
} catch (ExecutionException e) {
SolrException.log(LOG,e);
}
}
// update our commit point to the right dir
solrCore.getUpdateHandler().commit(new CommitUpdateCommand(req, false));
} finally {
req.close();
}
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
private boolean copyIndexFiles(File tmpIdxDir, File indexDir) throws IOException {
String segmentsFile = null;
List<String> copiedfiles = new ArrayList<String>();
for (Map<String, Object> f : filesDownloaded) {
String fname = (String) f.get(NAME);
// the segments file must be copied last
// or else if there is a failure in between the
// index will be corrupted
if (fname.startsWith("segments_")) {
//The segments file must be copied in the end
//Otherwise , if the copy fails index ends up corrupted
segmentsFile = fname;
continue;
}
if (!copyAFile(tmpIdxDir, indexDir, fname, copiedfiles)) return false;
copiedfiles.add(fname);
}
//copy the segments file last
if (segmentsFile != null) {
if (!copyAFile(tmpIdxDir, indexDir, segmentsFile, copiedfiles)) return false;
}
return true;
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
private void copyTmpConfFiles2Conf(File tmpconfDir) throws IOException {
File confDir = new File(solrCore.getResourceLoader().getConfigDir());
for (File file : tmpconfDir.listFiles()) {
File oldFile = new File(confDir, file.getName());
if (oldFile.exists()) {
File backupFile = new File(confDir, oldFile.getName() + "." + getDateAsStr(new Date(oldFile.lastModified())));
boolean status = oldFile.renameTo(backupFile);
if (!status) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unable to rename: " + oldFile + " to: " + backupFile);
}
}
boolean status = file.renameTo(oldFile);
if (status) {
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unable to rename: " + file + " to: " + oldFile);
}
}
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
FastInputStream getStream() throws IOException {
post = new HttpPost(masterUrl);
//the method is command=filecontent
List<BasicNameValuePair> formparams = new ArrayList<BasicNameValuePair>();
formparams.add(new BasicNameValuePair(COMMAND, CMD_GET_FILE));
//add the version to download. This is used to reserve the download
formparams.add(new BasicNameValuePair(GENERATION, indexGen.toString()));
if (isConf) {
//set cf instead of file for config file
formparams.add(new BasicNameValuePair(CONF_FILE_SHORT, fileName));
} else {
formparams.add(new BasicNameValuePair(FILE, fileName));
}
if (useInternal) {
formparams.add(new BasicNameValuePair(COMPRESSION, "true"));
}
if (useExternal) {
formparams.add(new BasicNameValuePair("Accept-Encoding", "gzip,deflate"));
}
//use checksum
if (this.includeChecksum)
formparams.add(new BasicNameValuePair(CHECKSUM, "true"));
//wt=filestream this is a custom protocol
formparams.add(new BasicNameValuePair("wt", FILE_STREAM));
// This happen if there is a failure there is a retry. the offset=<sizedownloaded> ensures that
// the server starts from the offset
if (bytesDownloaded > 0) {
formparams.add(new BasicNameValuePair(OFFSET, "" + bytesDownloaded));
}
UrlEncodedFormEntity entity = new UrlEncodedFormEntity(formparams, "UTF-8");
post.setEntity(entity);
HttpResponse response = myHttpClient.execute(post);
InputStream is = response.getEntity().getContent();
//wrap it using FastInputStream
if (useInternal) {
is = new InflaterInputStream(is);
} else if (useExternal) {
is = checkCompressed(post, is);
}
return new FastInputStream(is);
}
// in core/src/java/org/apache/solr/handler/SnapPuller.java
private InputStream checkCompressed(AbstractHttpMessage method, InputStream respBody) throws IOException {
Header contentEncodingHeader = method.getFirstHeader("Content-Encoding");
if (contentEncodingHeader != null) {
String contentEncoding = contentEncodingHeader.getValue();
if (contentEncoding.contains("gzip")) {
respBody = new GZIPInputStream(respBody);
} else if (contentEncoding.contains("deflate")) {
respBody = new InflaterInputStream(respBody);
}
} else {
Header contentTypeHeader = method.getFirstHeader("Content-Type");
if (contentTypeHeader != null) {
String contentType = contentTypeHeader.getValue();
if (contentType != null) {
if (contentType.startsWith("application/x-gzip-compressed")) {
respBody = new GZIPInputStream(respBody);
} else if (contentType.startsWith("application/x-deflate")) {
respBody = new InflaterInputStream(respBody);
}
}
}
}
return respBody;
}
// in core/src/java/org/apache/solr/handler/loader/XMLLoader.java
Transformer getTransformer(String xslt, SolrQueryRequest request) throws IOException {
// not the cleanest way to achieve this
// no need to synchronize access to context, right?
// Nothing else happens with it at the same time
final Map<Object,Object> ctx = request.getContext();
Transformer result = (Transformer)ctx.get(CONTEXT_TRANSFORMER_KEY);
if(result==null) {
SolrConfig solrConfig = request.getCore().getSolrConfig();
result = TransformerProvider.instance.getTransformer(solrConfig, xslt, xsltCacheLifetimeSeconds);
result.setErrorListener(xmllog);
ctx.put(CONTEXT_TRANSFORMER_KEY,result);
}
return result;
}
// in core/src/java/org/apache/solr/handler/loader/XMLLoader.java
void processUpdate(SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser)
throws XMLStreamException, IOException, FactoryConfigurationError,
InstantiationException, IllegalAccessException,
TransformerConfigurationException {
AddUpdateCommand addCmd = null;
SolrParams params = req.getParams();
while (true) {
int event = parser.next();
switch (event) {
case XMLStreamConstants.END_DOCUMENT:
parser.close();
return;
case XMLStreamConstants.START_ELEMENT:
String currTag = parser.getLocalName();
if (currTag.equals(UpdateRequestHandler.ADD)) {
log.trace("SolrCore.update(add)");
addCmd = new AddUpdateCommand(req);
// First look for commitWithin parameter on the request, will be overwritten for individual <add>'s
addCmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1);
addCmd.overwrite = params.getBool(UpdateParams.OVERWRITE, true);
for (int i = 0; i < parser.getAttributeCount(); i++) {
String attrName = parser.getAttributeLocalName(i);
String attrVal = parser.getAttributeValue(i);
if (UpdateRequestHandler.OVERWRITE.equals(attrName)) {
addCmd.overwrite = StrUtils.parseBoolean(attrVal);
} else if (UpdateRequestHandler.COMMIT_WITHIN.equals(attrName)) {
addCmd.commitWithin = Integer.parseInt(attrVal);
} else {
log.warn("Unknown attribute id in add:" + attrName);
}
}
} else if ("doc".equals(currTag)) {
if(addCmd != null) {
log.trace("adding doc...");
addCmd.clear();
addCmd.solrDoc = readDoc(parser);
processor.processAdd(addCmd);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unexpected <doc> tag without an <add> tag surrounding it.");
}
} else if (UpdateRequestHandler.COMMIT.equals(currTag) || UpdateRequestHandler.OPTIMIZE.equals(currTag)) {
log.trace("parsing " + currTag);
CommitUpdateCommand cmd = new CommitUpdateCommand(req, UpdateRequestHandler.OPTIMIZE.equals(currTag));
ModifiableSolrParams mp = new ModifiableSolrParams();
for (int i = 0; i < parser.getAttributeCount(); i++) {
String attrName = parser.getAttributeLocalName(i);
String attrVal = parser.getAttributeValue(i);
mp.set(attrName, attrVal);
}
RequestHandlerUtils.validateCommitParams(mp);
SolrParams p = SolrParams.wrapDefaults(mp, req.getParams()); // default to the normal request params for commit options
RequestHandlerUtils.updateCommit(cmd, p);
processor.processCommit(cmd);
} // end commit
else if (UpdateRequestHandler.ROLLBACK.equals(currTag)) {
log.trace("parsing " + currTag);
RollbackUpdateCommand cmd = new RollbackUpdateCommand(req);
processor.processRollback(cmd);
} // end rollback
else if (UpdateRequestHandler.DELETE.equals(currTag)) {
log.trace("parsing delete");
processDelete(req, processor, parser);
} // end delete
break;
}
}
}
// in core/src/java/org/apache/solr/handler/loader/XMLLoader.java
void processDelete(SolrQueryRequest req, UpdateRequestProcessor processor, XMLStreamReader parser) throws XMLStreamException, IOException {
// Parse the command
DeleteUpdateCommand deleteCmd = new DeleteUpdateCommand(req);
// First look for commitWithin parameter on the request, will be overwritten for individual <delete>'s
SolrParams params = req.getParams();
deleteCmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1);
for (int i = 0; i < parser.getAttributeCount(); i++) {
String attrName = parser.getAttributeLocalName(i);
String attrVal = parser.getAttributeValue(i);
if ("fromPending".equals(attrName)) {
// deprecated
} else if ("fromCommitted".equals(attrName)) {
// deprecated
} else if (UpdateRequestHandler.COMMIT_WITHIN.equals(attrName)) {
deleteCmd.commitWithin = Integer.parseInt(attrVal);
} else {
log.warn("unexpected attribute delete/@" + attrName);
}
}
StringBuilder text = new StringBuilder();
while (true) {
int event = parser.next();
switch (event) {
case XMLStreamConstants.START_ELEMENT:
String mode = parser.getLocalName();
if (!("id".equals(mode) || "query".equals(mode))) {
log.warn("unexpected XML tag /delete/" + mode);
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"unexpected XML tag /delete/" + mode);
}
text.setLength(0);
if ("id".equals(mode)) {
for (int i = 0; i < parser.getAttributeCount(); i++) {
String attrName = parser.getAttributeLocalName(i);
String attrVal = parser.getAttributeValue(i);
if (UpdateRequestHandler.VERSION.equals(attrName)) {
deleteCmd.setVersion(Long.parseLong(attrVal));
}
}
}
break;
case XMLStreamConstants.END_ELEMENT:
String currTag = parser.getLocalName();
if ("id".equals(currTag)) {
deleteCmd.setId(text.toString());
} else if ("query".equals(currTag)) {
deleteCmd.setQuery(text.toString());
} else if ("delete".equals(currTag)) {
return;
} else {
log.warn("unexpected XML tag /delete/" + currTag);
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"unexpected XML tag /delete/" + currTag);
}
processor.processDelete(deleteCmd);
deleteCmd.clear();
break;
// Add everything to the text
case XMLStreamConstants.SPACE:
case XMLStreamConstants.CDATA:
case XMLStreamConstants.CHARACTERS:
text.append(parser.getText());
break;
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
private void parseAndLoadDocs(final SolrQueryRequest req, SolrQueryResponse rsp, InputStream stream,
final UpdateRequestProcessor processor) throws IOException {
UpdateRequest update = null;
JavaBinUpdateRequestCodec.StreamingUpdateHandler handler = new JavaBinUpdateRequestCodec.StreamingUpdateHandler() {
private AddUpdateCommand addCmd = null;
@Override
public void update(SolrInputDocument document, UpdateRequest updateRequest) {
if (document == null) {
// Perhaps commit from the parameters
try {
RequestHandlerUtils.handleCommit(req, processor, updateRequest.getParams(), false);
RequestHandlerUtils.handleRollback(req, processor, updateRequest.getParams(), false);
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ERROR handling commit/rollback");
}
return;
}
if (addCmd == null) {
addCmd = getAddCommand(req, updateRequest.getParams());
}
addCmd.solrDoc = document;
try {
processor.processAdd(addCmd);
addCmd.clear();
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "ERROR adding document " + document);
}
}
};
FastInputStream in = FastInputStream.wrap(stream);
for (; ; ) {
try {
update = new JavaBinUpdateRequestCodec().unmarshal(in, handler);
} catch (EOFException e) {
break; // this is expected
} catch (Exception e) {
log.error("Exception while processing update request", e);
break;
}
if (update.getDeleteById() != null || update.getDeleteQuery() != null) {
delete(req, update, processor);
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JavabinLoader.java
private void delete(SolrQueryRequest req, UpdateRequest update, UpdateRequestProcessor processor) throws IOException {
SolrParams params = update.getParams();
DeleteUpdateCommand delcmd = new DeleteUpdateCommand(req);
if(params != null) {
delcmd.commitWithin = params.getInt(UpdateParams.COMMIT_WITHIN, -1);
}
if(update.getDeleteById() != null) {
for (String s : update.getDeleteById()) {
delcmd.id = s;
processor.processDelete(delcmd);
}
delcmd.id = null;
}
if(update.getDeleteQuery() != null) {
for (String s : update.getDeleteQuery()) {
delcmd.query = s;
processor.processDelete(delcmd);
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
DeleteUpdateCommand parseDelete() throws IOException {
assertNextEvent( JSONParser.OBJECT_START );
DeleteUpdateCommand cmd = new DeleteUpdateCommand(req);
cmd.commitWithin = commitWithin;
while( true ) {
int ev = parser.nextEvent();
if( ev == JSONParser.STRING ) {
String key = parser.getString();
if( parser.wasKey() ) {
if( "id".equals( key ) ) {
cmd.setId(parser.getString());
}
else if( "query".equals(key) ) {
cmd.setQuery(parser.getString());
}
else if( "commitWithin".equals(key) ) {
cmd.commitWithin = Integer.parseInt(parser.getString());
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown key: "+key+" ["+parser.getPosition()+"]" );
}
}
else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"invalid string: " + key
+" at ["+parser.getPosition()+"]" );
}
}
else if( ev == JSONParser.OBJECT_END ) {
if( cmd.getId() == null && cmd.getQuery() == null ) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Missing id or query for delete ["+parser.getPosition()+"]" );
}
return cmd;
}
else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Got: "+JSONParser.getEventString( ev )
+" at ["+parser.getPosition()+"]" );
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
RollbackUpdateCommand parseRollback() throws IOException {
assertNextEvent( JSONParser.OBJECT_START );
assertNextEvent( JSONParser.OBJECT_END );
return new RollbackUpdateCommand(req);
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
void parseCommitOptions(CommitUpdateCommand cmd ) throws IOException
{
assertNextEvent( JSONParser.OBJECT_START );
final Map<String,Object> map = (Map)ObjectBuilder.getVal(parser);
// SolrParams currently expects string values...
SolrParams p = new SolrParams() {
@Override
public String get(String param) {
Object o = map.get(param);
return o == null ? null : o.toString();
}
@Override
public String[] getParams(String param) {
return new String[]{get(param)};
}
@Override
public Iterator<String> getParameterNamesIterator() {
return map.keySet().iterator();
}
};
RequestHandlerUtils.validateCommitParams(p);
p = SolrParams.wrapDefaults(p, req.getParams()); // default to the normal request params for commit options
RequestHandlerUtils.updateCommit(cmd, p);
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
AddUpdateCommand parseAdd() throws IOException
{
AddUpdateCommand cmd = new AddUpdateCommand(req);
cmd.commitWithin = commitWithin;
cmd.overwrite = overwrite;
float boost = 1.0f;
while( true ) {
int ev = parser.nextEvent();
if( ev == JSONParser.STRING ) {
if( parser.wasKey() ) {
String key = parser.getString();
if( "doc".equals( key ) ) {
if( cmd.solrDoc != null ) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "multiple docs in same add command" );
}
ev = assertNextEvent( JSONParser.OBJECT_START );
cmd.solrDoc = parseDoc( ev );
}
else if( UpdateRequestHandler.OVERWRITE.equals( key ) ) {
cmd.overwrite = parser.getBoolean(); // reads next boolean
}
else if( UpdateRequestHandler.COMMIT_WITHIN.equals( key ) ) {
cmd.commitWithin = (int)parser.getLong();
}
else if( "boost".equals( key ) ) {
boost = Float.parseFloat( parser.getNumberChars().toString() );
}
else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown key: "+key+" ["+parser.getPosition()+"]" );
}
}
else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Should be a key "
+" at ["+parser.getPosition()+"]" );
}
}
else if( ev == JSONParser.OBJECT_END ) {
if( cmd.solrDoc == null ) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,"missing solr document. "+parser.getPosition() );
}
cmd.solrDoc.setDocumentBoost( boost );
return cmd;
}
else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Got: "+JSONParser.getEventString( ev )
+" at ["+parser.getPosition()+"]" );
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
void handleAdds() throws IOException
{
while( true ) {
AddUpdateCommand cmd = new AddUpdateCommand(req);
cmd.commitWithin = commitWithin;
cmd.overwrite = overwrite;
int ev = parser.nextEvent();
if (ev == JSONParser.ARRAY_END) break;
assertEvent(ev, JSONParser.OBJECT_START);
cmd.solrDoc = parseDoc(ev);
processor.processAdd(cmd);
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
int assertNextEvent(int expected ) throws IOException
{
int got = parser.nextEvent();
assertEvent(got, expected);
return got;
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private SolrInputDocument parseDoc(int ev) throws IOException {
assert ev == JSONParser.OBJECT_START;
SolrInputDocument sdoc = new SolrInputDocument();
for (;;) {
SolrInputField sif = parseField();
if (sif == null) return sdoc;
SolrInputField prev = sdoc.put(sif.getName(), sif);
if (prev != null) {
// blech - repeated keys
sif.addValue(prev.getValue(), prev.getBoost());
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private SolrInputField parseField() throws IOException {
int ev = parser.nextEvent();
if (ev == JSONParser.OBJECT_END) {
return null;
}
String fieldName = parser.getString();
SolrInputField sif = new SolrInputField(fieldName);
parseFieldValue(sif);
return sif;
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private void parseFieldValue(SolrInputField sif) throws IOException {
int ev = parser.nextEvent();
if (ev == JSONParser.OBJECT_START) {
parseExtendedFieldValue(sif, ev);
} else {
Object val = parseNormalFieldValue(ev);
sif.setValue(val, 1.0f);
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private void parseExtendedFieldValue(SolrInputField sif, int ev) throws IOException {
assert ev == JSONParser.OBJECT_START;
float boost = 1.0f;
Object normalFieldValue = null;
Map<String, Object> extendedInfo = null;
for (;;) {
ev = parser.nextEvent();
switch (ev) {
case JSONParser.STRING:
String label = parser.getString();
if ("boost".equals(label)) {
ev = parser.nextEvent();
if( ev != JSONParser.NUMBER &&
ev != JSONParser.LONG &&
ev != JSONParser.BIGNUMBER ) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "boost should have number! "+JSONParser.getEventString(ev) );
}
boost = (float)parser.getDouble();
} else if ("value".equals(label)) {
normalFieldValue = parseNormalFieldValue(parser.nextEvent());
} else {
// If we encounter other unknown map keys, then use a map
if (extendedInfo == null) {
extendedInfo = new HashMap<String, Object>(2);
}
// for now, the only extended info will be field values
// we could either store this as an Object or a SolrInputField
Object val = parseNormalFieldValue(parser.nextEvent());
extendedInfo.put(label, val);
}
break;
case JSONParser.OBJECT_END:
if (extendedInfo != null) {
if (normalFieldValue != null) {
extendedInfo.put("value",normalFieldValue);
}
sif.setValue(extendedInfo, boost);
} else {
sif.setValue(normalFieldValue, boost);
}
return;
default:
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing JSON extended field value. Unexpected "+JSONParser.getEventString(ev) );
}
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private Object parseNormalFieldValue(int ev) throws IOException {
if (ev == JSONParser.ARRAY_START) {
List<Object> val = parseArrayFieldValue(ev);
return val;
} else {
Object val = parseSingleFieldValue(ev);
return val;
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private Object parseSingleFieldValue(int ev) throws IOException {
switch (ev) {
case JSONParser.STRING:
return parser.getString();
case JSONParser.LONG:
case JSONParser.NUMBER:
case JSONParser.BIGNUMBER:
return parser.getNumberChars().toString();
case JSONParser.BOOLEAN:
return Boolean.toString(parser.getBoolean()); // for legacy reasons, single values s are expected to be strings
case JSONParser.NULL:
parser.getNull();
return null;
case JSONParser.ARRAY_START:
return parseArrayFieldValue(ev);
default:
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing JSON field value. Unexpected "+JSONParser.getEventString(ev) );
}
}
// in core/src/java/org/apache/solr/handler/loader/JsonLoader.java
private List<Object> parseArrayFieldValue(int ev) throws IOException {
assert ev == JSONParser.ARRAY_START;
ArrayList lst = new ArrayList(2);
for (;;) {
ev = parser.nextEvent();
if (ev == JSONParser.ARRAY_END) {
return lst;
}
Object val = parseSingleFieldValue(ev);
lst.add(val);
}
}
// in core/src/java/org/apache/solr/handler/loader/CSVLoader.java
Override
void addDoc(int line, String[] vals) throws IOException {
templateAdd.clear();
SolrInputDocument doc = new SolrInputDocument();
doAdd(line, vals, doc, templateAdd);
}
// in core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java
Override
public void load(SolrQueryRequest req, SolrQueryResponse rsp, ContentStream stream, UpdateRequestProcessor processor) throws IOException {
errHeader = "CSVLoader: input=" + stream.getSourceInfo();
Reader reader = null;
try {
reader = stream.getReader();
if (skipLines>0) {
if (!(reader instanceof BufferedReader)) {
reader = new BufferedReader(reader);
}
BufferedReader r = (BufferedReader)reader;
for (int i=0; i<skipLines; i++) {
r.readLine();
}
}
CSVParser parser = new CSVParser(reader, strategy);
// parse the fieldnames from the header of the file
if (fieldnames==null) {
fieldnames = parser.getLine();
if (fieldnames==null) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Expected fieldnames in CSV input");
}
prepareFields();
}
// read the rest of the CSV file
for(;;) {
int line = parser.getLineNumber(); // for error reporting in MT mode
String[] vals = null;
try {
vals = parser.getLine();
} catch (IOException e) {
//Catch the exception and rethrow it with more line information
input_err("can't read line: " + line, null, line, e);
}
if (vals==null) break;
if (vals.length != fields.length) {
input_err("expected "+fields.length+" values but got "+vals.length, vals, line);
}
addDoc(line,vals);
}
} finally{
if (reader != null) {
IOUtils.closeQuietly(reader);
}
}
}
// in core/src/java/org/apache/solr/handler/loader/CSVLoaderBase.java
void doAdd(int line, String[] vals, SolrInputDocument doc, AddUpdateCommand template) throws IOException {
// the line number is passed simply for error reporting in MT mode.
// first, create the lucene document
for (int i=0; i<vals.length; i++) {
if (fields[i]==null) continue; // ignore this field
String val = vals[i];
adders[i].add(doc, line, i, val);
}
// add any literals
for (SchemaField sf : literals.keySet()) {
String fn = sf.getName();
String val = literals.get(sf);
doc.addField(fn, val);
}
template.solrDoc = doc;
processor.processAdd(template);
}
// in core/src/java/org/apache/solr/handler/ReplicationHandler.java
private void registerFileStreamResponseWriter() {
core.registerResponseWriter(FILE_STREAM, new BinaryQueryResponseWriter() {
public void write(OutputStream out, SolrQueryRequest request, SolrQueryResponse resp) throws IOException {
FileStream stream = (FileStream) resp.getValues().get(FILE_STREAM);
stream.write(out);
}
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
throw new RuntimeException("This is a binary writer , Cannot write to a characterstream");
}
public String getContentType(SolrQueryRequest request, SolrQueryResponse response) {
return "application/octet-stream";
}
public void init(NamedList args) { /*no op*/ }
});
}
// in core/src/java/org/apache/solr/handler/ReplicationHandler.java
public void write(OutputStream out, SolrQueryRequest request, SolrQueryResponse resp) throws IOException {
FileStream stream = (FileStream) resp.getValues().get(FILE_STREAM);
stream.write(out);
}
// in core/src/java/org/apache/solr/handler/ReplicationHandler.java
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
throw new RuntimeException("This is a binary writer , Cannot write to a characterstream");
}
// in core/src/java/org/apache/solr/handler/ReplicationHandler.java
public void write(OutputStream out) throws IOException {
String fileName = params.get(FILE);
String cfileName = params.get(CONF_FILE_SHORT);
String sOffset = params.get(OFFSET);
String sLen = params.get(LEN);
String compress = params.get(COMPRESSION);
String sChecksum = params.get(CHECKSUM);
String sGen = params.get(GENERATION);
if (sGen != null) indexGen = Long.parseLong(sGen);
if (Boolean.parseBoolean(compress)) {
fos = new FastOutputStream(new DeflaterOutputStream(out));
} else {
fos = new FastOutputStream(out);
}
FileInputStream inputStream = null;
int packetsWritten = 0;
try {
long offset = -1;
int len = -1;
//check if checksum is requested
boolean useChecksum = Boolean.parseBoolean(sChecksum);
if (sOffset != null)
offset = Long.parseLong(sOffset);
if (sLen != null)
len = Integer.parseInt(sLen);
if (fileName == null && cfileName == null) {
//no filename do nothing
writeNothing();
}
File file = null;
if (cfileName != null) {
//if if is a conf file read from config diectory
file = new File(core.getResourceLoader().getConfigDir(), cfileName);
} else {
//else read from the indexdirectory
file = new File(core.getIndexDir(), fileName);
}
if (file.exists() && file.canRead()) {
inputStream = new FileInputStream(file);
FileChannel channel = inputStream.getChannel();
//if offset is mentioned move the pointer to that point
if (offset != -1)
channel.position(offset);
byte[] buf = new byte[(len == -1 || len > PACKET_SZ) ? PACKET_SZ : len];
Checksum checksum = null;
if (useChecksum)
checksum = new Adler32();
ByteBuffer bb = ByteBuffer.wrap(buf);
while (true) {
bb.clear();
long bytesRead = channel.read(bb);
if (bytesRead <= 0) {
writeNothing();
fos.close();
break;
}
fos.writeInt((int) bytesRead);
if (useChecksum) {
checksum.reset();
checksum.update(buf, 0, (int) bytesRead);
fos.writeLong(checksum.getValue());
}
fos.write(buf, 0, (int) bytesRead);
fos.flush();
if (indexGen != null && (packetsWritten % 5 == 0)) {
//after every 5 packets reserve the commitpoint for some time
delPolicy.setReserveDuration(indexGen, reserveCommitDuration);
}
packetsWritten++;
}
} else {
writeNothing();
}
} catch (IOException e) {
LOG.warn("Exception while writing response for params: " + params, e);
} finally {
IOUtils.closeQuietly(inputStream);
}
}
// in core/src/java/org/apache/solr/handler/ReplicationHandler.java
private void writeNothing() throws IOException {
fos.writeInt(0);
fos.flush();
}
// in core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
public static boolean handleCommit(SolrQueryRequest req, UpdateRequestProcessor processor, SolrParams params, boolean force ) throws IOException
{
if( params == null) {
params = new MapSolrParams( new HashMap<String, String>() );
}
boolean optimize = params.getBool( UpdateParams.OPTIMIZE, false );
boolean commit = params.getBool( UpdateParams.COMMIT, false );
boolean softCommit = params.getBool( UpdateParams.SOFT_COMMIT, false );
boolean prepareCommit = params.getBool( UpdateParams.PREPARE_COMMIT, false );
if( optimize || commit || softCommit || prepareCommit || force ) {
CommitUpdateCommand cmd = new CommitUpdateCommand(req, optimize );
updateCommit(cmd, params);
processor.processCommit( cmd );
return true;
}
return false;
}
// in core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
public static void updateCommit(CommitUpdateCommand cmd, SolrParams params) throws IOException
{
if( params == null ) return;
cmd.openSearcher = params.getBool( UpdateParams.OPEN_SEARCHER, cmd.openSearcher );
cmd.waitSearcher = params.getBool( UpdateParams.WAIT_SEARCHER, cmd.waitSearcher );
cmd.softCommit = params.getBool( UpdateParams.SOFT_COMMIT, cmd.softCommit );
cmd.expungeDeletes = params.getBool( UpdateParams.EXPUNGE_DELETES, cmd.expungeDeletes );
cmd.maxOptimizeSegments = params.getInt( UpdateParams.MAX_OPTIMIZE_SEGMENTS, cmd.maxOptimizeSegments );
cmd.prepareCommit = params.getBool( UpdateParams.PREPARE_COMMIT, cmd.prepareCommit );
}
// in core/src/java/org/apache/solr/handler/RequestHandlerUtils.java
public static boolean handleRollback(SolrQueryRequest req, UpdateRequestProcessor processor, SolrParams params, boolean force ) throws IOException
{
if( params == null ) {
params = new MapSolrParams( new HashMap<String, String>() );
}
boolean rollback = params.getBool( UpdateParams.ROLLBACK, false );
if( rollback || force ) {
RollbackUpdateCommand cmd = new RollbackUpdateCommand(req);
processor.processRollback( cmd );
return true;
}
return false;
}
// in core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
public DocListAndSet getMoreLikeThis( int id, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
{
Document doc = reader.document(id);
rawMLTQuery = mlt.like(id);
boostedMLTQuery = getBoostedQuery( rawMLTQuery );
if( terms != null ) {
fillInterestingTermsFromMLTQuery( rawMLTQuery, terms );
}
// exclude current document from results
realMLTQuery = new BooleanQuery();
realMLTQuery.add(boostedMLTQuery, BooleanClause.Occur.MUST);
realMLTQuery.add(
new TermQuery(new Term(uniqueKeyField.getName(), uniqueKeyField.getType().storedToIndexed(doc.getField(uniqueKeyField.getName())))),
BooleanClause.Occur.MUST_NOT);
DocListAndSet results = new DocListAndSet();
if (this.needDocSet) {
results = searcher.getDocListAndSet(realMLTQuery, filters, null, start, rows, flags);
} else {
results.docList = searcher.getDocList(realMLTQuery, filters, null, start, rows, flags);
}
return results;
}
// in core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
public DocListAndSet getMoreLikeThis( Reader reader, int start, int rows, List<Query> filters, List<InterestingTerm> terms, int flags ) throws IOException
{
// analyzing with the first field: previous (stupid) behavior
rawMLTQuery = mlt.like(reader, mlt.getFieldNames()[0]);
boostedMLTQuery = getBoostedQuery( rawMLTQuery );
if( terms != null ) {
fillInterestingTermsFromMLTQuery( boostedMLTQuery, terms );
}
DocListAndSet results = new DocListAndSet();
if (this.needDocSet) {
results = searcher.getDocListAndSet( boostedMLTQuery, filters, null, start, rows, flags);
} else {
results.docList = searcher.getDocList( boostedMLTQuery, filters, null, start, rows, flags);
}
return results;
}
// in core/src/java/org/apache/solr/handler/MoreLikeThisHandler.java
Deprecated
public NamedList<DocList> getMoreLikeThese( DocList docs, int rows, int flags ) throws IOException
{
IndexSchema schema = searcher.getSchema();
NamedList<DocList> mlt = new SimpleOrderedMap<DocList>();
DocIterator iterator = docs.iterator();
while( iterator.hasNext() ) {
int id = iterator.nextDoc();
DocListAndSet sim = getMoreLikeThis( id, 0, rows, null, null, flags );
String name = schema.printableUniqueKey( reader.document( id ) );
mlt.add(name, sim.docList);
}
return mlt;
}
// in core/src/java/org/apache/solr/handler/component/TermsComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (params.getBool(TermsParams.TERMS, false)) {
rb.doTerms = true;
}
// TODO: temporary... this should go in a different component.
String shards = params.get(ShardParams.SHARDS);
if (shards != null) {
rb.isDistrib = true;
if (params.get(ShardParams.SHARDS_QT) == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shards.qt parameter specified");
}
List<String> lst = StrUtils.splitSmart(shards, ",", true);
rb.shards = lst.toArray(new String[lst.size()]);
}
}
// in core/src/java/org/apache/solr/handler/component/TermsComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (!params.getBool(TermsParams.TERMS, false)) return;
String[] fields = params.getParams(TermsParams.TERMS_FIELD);
NamedList<Object> termsResult = new SimpleOrderedMap<Object>();
rb.rsp.add("terms", termsResult);
if (fields == null || fields.length==0) return;
int limit = params.getInt(TermsParams.TERMS_LIMIT, 10);
if (limit < 0) {
limit = Integer.MAX_VALUE;
}
String lowerStr = params.get(TermsParams.TERMS_LOWER);
String upperStr = params.get(TermsParams.TERMS_UPPER);
boolean upperIncl = params.getBool(TermsParams.TERMS_UPPER_INCLUSIVE, false);
boolean lowerIncl = params.getBool(TermsParams.TERMS_LOWER_INCLUSIVE, true);
boolean sort = !TermsParams.TERMS_SORT_INDEX.equals(
params.get(TermsParams.TERMS_SORT, TermsParams.TERMS_SORT_COUNT));
int freqmin = params.getInt(TermsParams.TERMS_MINCOUNT, 1);
int freqmax = params.getInt(TermsParams.TERMS_MAXCOUNT, UNLIMITED_MAX_COUNT);
if (freqmax<0) {
freqmax = Integer.MAX_VALUE;
}
String prefix = params.get(TermsParams.TERMS_PREFIX_STR);
String regexp = params.get(TermsParams.TERMS_REGEXP_STR);
Pattern pattern = regexp != null ? Pattern.compile(regexp, resolveRegexpFlags(params)) : null;
boolean raw = params.getBool(TermsParams.TERMS_RAW, false);
final AtomicReader indexReader = rb.req.getSearcher().getAtomicReader();
Fields lfields = indexReader.fields();
for (String field : fields) {
NamedList<Integer> fieldTerms = new NamedList<Integer>();
termsResult.add(field, fieldTerms);
Terms terms = lfields == null ? null : lfields.terms(field);
if (terms == null) {
// no terms for this field
continue;
}
FieldType ft = raw ? null : rb.req.getSchema().getFieldTypeNoEx(field);
if (ft==null) ft = new StrField();
// prefix must currently be text
BytesRef prefixBytes = prefix==null ? null : new BytesRef(prefix);
BytesRef upperBytes = null;
if (upperStr != null) {
upperBytes = new BytesRef();
ft.readableToIndexed(upperStr, upperBytes);
}
BytesRef lowerBytes;
if (lowerStr == null) {
// If no lower bound was specified, use the prefix
lowerBytes = prefixBytes;
} else {
lowerBytes = new BytesRef();
if (raw) {
// TODO: how to handle binary? perhaps we don't for "raw"... or if the field exists
// perhaps we detect if the FieldType is non-character and expect hex if so?
lowerBytes = new BytesRef(lowerStr);
} else {
lowerBytes = new BytesRef();
ft.readableToIndexed(lowerStr, lowerBytes);
}
}
TermsEnum termsEnum = terms.iterator(null);
BytesRef term = null;
if (lowerBytes != null) {
if (termsEnum.seekCeil(lowerBytes, true) == TermsEnum.SeekStatus.END) {
termsEnum = null;
} else {
term = termsEnum.term();
//Only advance the enum if we are excluding the lower bound and the lower Term actually matches
if (lowerIncl == false && term.equals(lowerBytes)) {
term = termsEnum.next();
}
}
} else {
// position termsEnum on first term
term = termsEnum.next();
}
int i = 0;
BoundedTreeSet<CountPair<BytesRef, Integer>> queue = (sort ? new BoundedTreeSet<CountPair<BytesRef, Integer>>(limit) : null);
CharsRef external = new CharsRef();
while (term != null && (i<limit || sort)) {
boolean externalized = false; // did we fill in "external" yet for this term?
// stop if the prefix doesn't match
if (prefixBytes != null && !StringHelper.startsWith(term, prefixBytes)) break;
if (pattern != null) {
// indexed text or external text?
// TODO: support "raw" mode?
ft.indexedToReadable(term, external);
externalized = true;
if (!pattern.matcher(external).matches()) {
term = termsEnum.next();
continue;
}
}
if (upperBytes != null) {
int upperCmp = term.compareTo(upperBytes);
// if we are past the upper term, or equal to it (when don't include upper) then stop.
if (upperCmp>0 || (upperCmp==0 && !upperIncl)) break;
}
// This is a good term in the range. Check if mincount/maxcount conditions are satisfied.
int docFreq = termsEnum.docFreq();
if (docFreq >= freqmin && docFreq <= freqmax) {
// add the term to the list
if (sort) {
queue.add(new CountPair<BytesRef, Integer>(BytesRef.deepCopyOf(term), docFreq));
} else {
// TODO: handle raw somehow
if (!externalized) {
ft.indexedToReadable(term, external);
}
fieldTerms.add(external.toString(), docFreq);
i++;
}
}
term = termsEnum.next();
}
if (sort) {
for (CountPair<BytesRef, Integer> item : queue) {
if (i >= limit) break;
ft.indexedToReadable(item.key, external);
fieldTerms.add(external.toString(), item.val);
i++;
}
}
}
}
// in core/src/java/org/apache/solr/handler/component/TermsComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
if (!rb.doTerms) {
return ResponseBuilder.STAGE_DONE;
}
if (rb.stage == ResponseBuilder.STAGE_EXECUTE_QUERY) {
TermsHelper th = rb._termsHelper;
if (th == null) {
th = rb._termsHelper = new TermsHelper();
th.init(rb.req.getParams());
}
ShardRequest sreq = createShardQuery(rb.req.getParams());
rb.addRequest(this, sreq);
}
if (rb.stage < ResponseBuilder.STAGE_EXECUTE_QUERY) {
return ResponseBuilder.STAGE_EXECUTE_QUERY;
} else {
return ResponseBuilder.STAGE_DONE;
}
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
private Map<String, ElevationObj> loadElevationMap(Config cfg) throws IOException {
XPath xpath = XPathFactory.newInstance().newXPath();
Map<String, ElevationObj> map = new HashMap<String, ElevationObj>();
NodeList nodes = (NodeList) cfg.evaluate("elevate/query", XPathConstants.NODESET);
for (int i = 0; i < nodes.getLength(); i++) {
Node node = nodes.item(i);
String qstr = DOMUtil.getAttr(node, "text", "missing query 'text'");
NodeList children = null;
try {
children = (NodeList) xpath.evaluate("doc", node, XPathConstants.NODESET);
} catch (XPathExpressionException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"query requires '<doc .../>' child");
}
ArrayList<String> include = new ArrayList<String>();
ArrayList<String> exclude = new ArrayList<String>();
for (int j = 0; j < children.getLength(); j++) {
Node child = children.item(j);
String id = DOMUtil.getAttr(child, "id", "missing 'id'");
String e = DOMUtil.getAttr(child, EXCLUDE, null);
if (e != null) {
if (Boolean.valueOf(e)) {
exclude.add(id);
continue;
}
}
include.add(id);
}
ElevationObj elev = new ElevationObj(qstr, include, exclude);
if (map.containsKey(elev.analyzed)) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Boosting query defined twice for query: '" + elev.text + "' (" + elev.analyzed + "')");
}
map.put(elev.analyzed, elev);
}
return map;
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
void setTopQueryResults(IndexReader reader, String query, String[] ids, String[] ex) throws IOException {
if (ids == null) {
ids = new String[0];
}
if (ex == null) {
ex = new String[0];
}
Map<String, ElevationObj> elev = elevationCache.get(reader);
if (elev == null) {
elev = new HashMap<String, ElevationObj>();
elevationCache.put(reader, elev);
}
ElevationObj obj = new ElevationObj(query, Arrays.asList(ids), Arrays.asList(ex));
elev.put(obj.analyzed, obj);
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
String getAnalyzedQuery(String query) throws IOException {
if (analyzer == null) {
return query;
}
StringBuilder norm = new StringBuilder();
TokenStream tokens = analyzer.tokenStream("", new StringReader(query));
tokens.reset();
CharTermAttribute termAtt = tokens.addAttribute(CharTermAttribute.class);
while (tokens.incrementToken()) {
norm.append(termAtt.buffer(), 0, termAtt.length());
}
tokens.end();
tokens.close();
return norm.toString();
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
// A runtime param can skip
if (!params.getBool(QueryElevationParams.ENABLE, true)) {
return;
}
boolean exclusive = params.getBool(QueryElevationParams.EXCLUSIVE, false);
// A runtime parameter can alter the config value for forceElevation
boolean force = params.getBool(QueryElevationParams.FORCE_ELEVATION, forceElevation);
boolean markExcludes = params.getBool(QueryElevationParams.MARK_EXCLUDES, false);
Query query = rb.getQuery();
String qstr = rb.getQueryString();
if (query == null || qstr == null) {
return;
}
qstr = getAnalyzedQuery(qstr);
IndexReader reader = req.getSearcher().getIndexReader();
ElevationObj booster = null;
try {
booster = getElevationMap(reader, req.getCore()).get(qstr);
} catch (Exception ex) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Error loading elevation", ex);
}
if (booster != null) {
rb.req.getContext().put(BOOSTED, booster.ids);
// Change the query to insert forced documents
if (exclusive == true) {
//we only want these results
rb.setQuery(booster.include);
} else {
BooleanQuery newq = new BooleanQuery(true);
newq.add(query, BooleanClause.Occur.SHOULD);
newq.add(booster.include, BooleanClause.Occur.SHOULD);
if (booster.exclude != null) {
if (markExcludes == false) {
for (TermQuery tq : booster.exclude) {
newq.add(new BooleanClause(tq, BooleanClause.Occur.MUST_NOT));
}
} else {
//we are only going to mark items as excluded, not actually exclude them. This works
//with the EditorialMarkerFactory
rb.req.getContext().put(EXCLUDED, booster.excludeIds);
for (TermQuery tq : booster.exclude) {
newq.add(new BooleanClause(tq, BooleanClause.Occur.SHOULD));
}
}
}
rb.setQuery(newq);
}
ElevationComparatorSource comparator = new ElevationComparatorSource(booster);
// if the sort is 'score desc' use a custom sorting method to
// insert documents in their proper place
SortSpec sortSpec = rb.getSortSpec();
if (sortSpec.getSort() == null) {
sortSpec.setSort(new Sort(new SortField[]{
new SortField("_elevate_", comparator, true),
new SortField(null, SortField.Type.SCORE, false)
}));
} else {
// Check if the sort is based on score
boolean modify = false;
SortField[] current = sortSpec.getSort().getSort();
ArrayList<SortField> sorts = new ArrayList<SortField>(current.length + 1);
// Perhaps force it to always sort by score
if (force && current[0].getType() != SortField.Type.SCORE) {
sorts.add(new SortField("_elevate_", comparator, true));
modify = true;
}
for (SortField sf : current) {
if (sf.getType() == SortField.Type.SCORE) {
sorts.add(new SortField("_elevate_", comparator, !sf.getReverse()));
modify = true;
}
sorts.add(sf);
}
if (modify) {
sortSpec.setSort(new Sort(sorts.toArray(new SortField[sorts.size()])));
}
}
}
// Add debugging information
if (rb.isDebug()) {
List<String> match = null;
if (booster != null) {
// Extract the elevated terms into a list
match = new ArrayList<String>(booster.priority.size());
for (Object o : booster.include.clauses()) {
TermQuery tq = (TermQuery) ((BooleanClause) o).getQuery();
match.add(tq.getTerm().text());
}
}
SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<Object>();
dbg.add("q", qstr);
dbg.add("match", match);
if (rb.isDebugQuery()) {
rb.addDebugInfo("queryBoosting", dbg);
}
}
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
// Do nothing -- the real work is modifying the input query
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public FieldComparator<Integer> newComparator(String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
return new FieldComparator<Integer>() {
private final int[] values = new int[numHits];
private int bottomVal;
private TermsEnum termsEnum;
private DocsEnum docsEnum;
Set<String> seen = new HashSet<String>(elevations.ids.size());
@Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2]; // values will be small enough that there is no overflow concern
}
@Override
public void setBottom(int slot) {
bottomVal = values[slot];
}
private int docVal(int doc) throws IOException {
if (ordSet.size() > 0) {
int slot = ordSet.find(doc);
if (slot >= 0) {
BytesRef id = termValues[slot];
Integer prio = elevations.priority.get(id);
return prio == null ? 0 : prio.intValue();
}
}
return 0;
}
@Override
public int compareBottom(int doc) throws IOException {
return bottomVal - docVal(doc);
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = docVal(doc);
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
//convert the ids to Lucene doc ids, the ordSet and termValues needs to be the same size as the number of elevation docs we have
ordSet.clear();
Fields fields = context.reader().fields();
if (fields == null) return this;
Terms terms = fields.terms(idField);
if (terms == null) return this;
termsEnum = terms.iterator(termsEnum);
BytesRef term = new BytesRef();
Bits liveDocs = context.reader().getLiveDocs();
for (String id : elevations.ids) {
term.copyChars(id);
if (seen.contains(id) == false && termsEnum.seekExact(term, false)) {
docsEnum = termsEnum.docs(liveDocs, docsEnum, false);
if (docsEnum != null) {
int docId = docsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS ) continue; // must have been deleted
termValues[ordSet.put(docId)] = BytesRef.deepCopyOf(term);
seen.add(id);
assert docsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
}
}
}
return this;
}
@Override
public Integer value(int slot) {
return values[slot];
}
@Override
public int compareDocToValue(int doc, Integer valueObj) throws IOException {
final int value = valueObj.intValue();
final int docValue = docVal(doc);
return docValue - value; // values will be small enough that there is no overflow concern
}
};
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
private int docVal(int doc) throws IOException {
if (ordSet.size() > 0) {
int slot = ordSet.find(doc);
if (slot >= 0) {
BytesRef id = termValues[slot];
Integer prio = elevations.priority.get(id);
return prio == null ? 0 : prio.intValue();
}
}
return 0;
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public int compareBottom(int doc) throws IOException {
return bottomVal - docVal(doc);
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public void copy(int slot, int doc) throws IOException {
values[slot] = docVal(doc);
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
//convert the ids to Lucene doc ids, the ordSet and termValues needs to be the same size as the number of elevation docs we have
ordSet.clear();
Fields fields = context.reader().fields();
if (fields == null) return this;
Terms terms = fields.terms(idField);
if (terms == null) return this;
termsEnum = terms.iterator(termsEnum);
BytesRef term = new BytesRef();
Bits liveDocs = context.reader().getLiveDocs();
for (String id : elevations.ids) {
term.copyChars(id);
if (seen.contains(id) == false && termsEnum.seekExact(term, false)) {
docsEnum = termsEnum.docs(liveDocs, docsEnum, false);
if (docsEnum != null) {
int docId = docsEnum.nextDoc();
if (docId == DocIdSetIterator.NO_MORE_DOCS ) continue; // must have been deleted
termValues[ordSet.put(docId)] = BytesRef.deepCopyOf(term);
seen.add(id);
assert docsEnum.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
}
}
}
return this;
}
// in core/src/java/org/apache/solr/handler/component/QueryElevationComponent.java
Override
public int compareDocToValue(int doc, Integer valueObj) throws IOException {
final int value = valueObj.intValue();
final int docValue = docVal(doc);
return docValue - value; // values will be small enough that there is no overflow concern
}
// in core/src/java/org/apache/solr/handler/component/SpellCheckComponent.java
private Collection<Token> getTokens(String q, Analyzer analyzer) throws IOException {
Collection<Token> result = new ArrayList<Token>();
assert analyzer != null;
TokenStream ts = analyzer.tokenStream("", new StringReader(q));
ts.reset();
// TODO: support custom attributes
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
OffsetAttribute offsetAtt = ts.addAttribute(OffsetAttribute.class);
TypeAttribute typeAtt = ts.addAttribute(TypeAttribute.class);
FlagsAttribute flagsAtt = ts.addAttribute(FlagsAttribute.class);
PayloadAttribute payloadAtt = ts.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = ts.addAttribute(PositionIncrementAttribute.class);
while (ts.incrementToken()){
Token token = new Token();
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setOffset(offsetAtt.startOffset(), offsetAtt.endOffset());
token.setType(typeAtt.type());
token.setFlags(flagsAtt.getFlags());
token.setPayload(payloadAtt.getPayload());
token.setPositionIncrement(posIncAtt.getPositionIncrement());
result.add(token);
}
ts.end();
ts.close();
return result;
}
// in core/src/java/org/apache/solr/handler/component/DebugComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException
{
}
// in core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
public SimpleOrderedMap<List<NamedList<Object>>> process(ResponseBuilder rb, SolrParams params, String[] pivots) throws IOException {
if (!rb.doFacets || pivots == null)
return null;
int minMatch = params.getInt( FacetParams.FACET_PIVOT_MINCOUNT, 1 );
SimpleOrderedMap<List<NamedList<Object>>> pivotResponse = new SimpleOrderedMap<List<NamedList<Object>>>();
for (String pivot : pivots) {
String[] fields = pivot.split(","); // only support two levels for now
if( fields.length < 2 ) {
throw new SolrException( ErrorCode.BAD_REQUEST,
"Pivot Facet needs at least two fields: "+pivot );
}
DocSet docs = rb.getResults().docSet;
String field = fields[0];
String subField = fields[1];
Deque<String> fnames = new LinkedList<String>();
for( int i=fields.length-1; i>1; i-- ) {
fnames.push( fields[i] );
}
SimpleFacets sf = getFacetImplementation(rb.req, rb.getResults().docSet, rb.req.getParams());
NamedList<Integer> superFacets = sf.getTermCounts(field);
pivotResponse.add(pivot, doPivots(superFacets, field, subField, fnames, rb, docs, minMatch));
}
return pivotResponse;
}
// in core/src/java/org/apache/solr/handler/component/PivotFacetHelper.java
protected List<NamedList<Object>> doPivots( NamedList<Integer> superFacets, String field, String subField, Deque<String> fnames, ResponseBuilder rb, DocSet docs, int minMatch ) throws IOException
{
SolrIndexSearcher searcher = rb.req.getSearcher();
// TODO: optimize to avoid converting to an external string and then having to convert back to internal below
SchemaField sfield = searcher.getSchema().getField(field);
FieldType ftype = sfield.getType();
String nextField = fnames.poll();
List<NamedList<Object>> values = new ArrayList<NamedList<Object>>( superFacets.size() );
for (Map.Entry<String, Integer> kv : superFacets) {
// Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though
if (kv.getValue() >= minMatch ) {
// don't reuse the same BytesRef each time since we will be constructing Term
// objects that will most likely be cached.
BytesRef termval = new BytesRef();
ftype.readableToIndexed(kv.getKey(), termval);
SimpleOrderedMap<Object> pivot = new SimpleOrderedMap<Object>();
pivot.add( "field", field );
pivot.add( "value", ftype.toObject(sfield, termval) );
pivot.add( "count", kv.getValue() );
if( subField == null ) {
values.add( pivot );
}
else {
Query query = new TermQuery(new Term(field, termval));
DocSet subset = searcher.getDocSet(query, docs);
SimpleFacets sf = getFacetImplementation(rb.req, subset, rb.req.getParams());
NamedList<Integer> nl = sf.getTermCounts(subField);
if (nl.size() >= minMatch ) {
pivot.add( "pivot", doPivots( nl, subField, nextField, fnames, rb, subset, minMatch ) );
values.add( pivot ); // only add response if there are some counts
}
}
}
}
// put the field back on the list
fnames.push( nextField );
return values;
}
// in core/src/java/org/apache/solr/handler/component/QueryComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
SolrQueryResponse rsp = rb.rsp;
// Set field flags
ReturnFields returnFields = new ReturnFields( req );
rsp.setReturnFields( returnFields );
int flags = 0;
if (returnFields.wantsScore()) {
flags |= SolrIndexSearcher.GET_SCORES;
}
rb.setFieldFlags( flags );
String defType = params.get(QueryParsing.DEFTYPE,QParserPlugin.DEFAULT_QTYPE);
// get it from the response builder to give a different component a chance
// to set it.
String queryString = rb.getQueryString();
if (queryString == null) {
// this is the normal way it's set.
queryString = params.get( CommonParams.Q );
rb.setQueryString(queryString);
}
try {
QParser parser = QParser.getParser(rb.getQueryString(), defType, req);
Query q = parser.getQuery();
if (q == null) {
// normalize a null query to a query that matches nothing
q = new BooleanQuery();
}
rb.setQuery( q );
rb.setSortSpec( parser.getSort(true) );
rb.setQparser(parser);
rb.setScoreDoc(parser.getPaging());
String[] fqs = req.getParams().getParams(CommonParams.FQ);
if (fqs!=null && fqs.length!=0) {
List<Query> filters = rb.getFilters();
if (filters==null) {
filters = new ArrayList<Query>(fqs.length);
}
for (String fq : fqs) {
if (fq != null && fq.trim().length()!=0) {
QParser fqp = QParser.getParser(fq, null, req);
filters.add(fqp.getQuery());
}
}
// only set the filters if they are not empty otherwise
// fq=&someotherParam= will trigger all docs filter for every request
// if filter cache is disabled
if (!filters.isEmpty()) {
rb.setFilters( filters );
}
}
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
boolean grouping = params.getBool(GroupParams.GROUP, false);
if (!grouping) {
return;
}
SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
SolrIndexSearcher searcher = rb.req.getSearcher();
GroupingSpecification groupingSpec = new GroupingSpecification();
rb.setGroupingSpec(groupingSpec);
//TODO: move weighting of sort
Sort groupSort = searcher.weightSort(cmd.getSort());
if (groupSort == null) {
groupSort = Sort.RELEVANCE;
}
// groupSort defaults to sort
String groupSortStr = params.get(GroupParams.GROUP_SORT);
//TODO: move weighting of sort
Sort sortWithinGroup = groupSortStr == null ? groupSort : searcher.weightSort(QueryParsing.parseSort(groupSortStr, req));
if (sortWithinGroup == null) {
sortWithinGroup = Sort.RELEVANCE;
}
groupingSpec.setSortWithinGroup(sortWithinGroup);
groupingSpec.setGroupSort(groupSort);
String formatStr = params.get(GroupParams.GROUP_FORMAT, Grouping.Format.grouped.name());
Grouping.Format responseFormat;
try {
responseFormat = Grouping.Format.valueOf(formatStr);
} catch (IllegalArgumentException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, String.format("Illegal %s parameter", GroupParams.GROUP_FORMAT));
}
groupingSpec.setResponseFormat(responseFormat);
groupingSpec.setFields(params.getParams(GroupParams.GROUP_FIELD));
groupingSpec.setQueries(params.getParams(GroupParams.GROUP_QUERY));
groupingSpec.setFunctions(params.getParams(GroupParams.GROUP_FUNC));
groupingSpec.setGroupOffset(params.getInt(GroupParams.GROUP_OFFSET, 0));
groupingSpec.setGroupLimit(params.getInt(GroupParams.GROUP_LIMIT, 1));
groupingSpec.setOffset(rb.getSortSpec().getOffset());
groupingSpec.setLimit(rb.getSortSpec().getCount());
groupingSpec.setIncludeGroupCount(params.getBool(GroupParams.GROUP_TOTAL_COUNT, false));
groupingSpec.setMain(params.getBool(GroupParams.GROUP_MAIN, false));
groupingSpec.setNeedScore((cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0);
groupingSpec.setTruncateGroups(params.getBool(GroupParams.GROUP_TRUNCATE, false));
}
// in core/src/java/org/apache/solr/handler/component/QueryComponent.java
Override
public void process(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
SolrIndexSearcher searcher = req.getSearcher();
if (rb.getQueryCommand().getOffset() < 0) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
}
// -1 as flag if not set.
long timeAllowed = (long)params.getInt( CommonParams.TIME_ALLOWED, -1 );
// Optional: This could also be implemented by the top-level searcher sending
// a filter that lists the ids... that would be transparent to
// the request handler, but would be more expensive (and would preserve score
// too if desired).
String ids = params.get(ShardParams.IDS);
if (ids != null) {
SchemaField idField = req.getSchema().getUniqueKeyField();
List<String> idArr = StrUtils.splitSmart(ids, ",", true);
int[] luceneIds = new int[idArr.size()];
int docs = 0;
for (int i=0; i<idArr.size(); i++) {
int id = req.getSearcher().getFirstMatch(
new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
if (id >= 0)
luceneIds[docs++] = id;
}
DocListAndSet res = new DocListAndSet();
res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
if (rb.isNeedDocSet()) {
// TODO: create a cache for this!
List<Query> queries = new ArrayList<Query>();
queries.add(rb.getQuery());
List<Query> filters = rb.getFilters();
if (filters != null) queries.addAll(filters);
res.docSet = searcher.getDocSet(queries);
}
rb.setResults(res);
ResultContext ctx = new ResultContext();
ctx.docs = rb.getResults().docList;
ctx.query = null; // anything?
rsp.add("response", ctx);
return;
}
SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
cmd.setTimeAllowed(timeAllowed);
SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();
//
// grouping / field collapsing
//
GroupingSpecification groupingSpec = rb.getGroupingSpec();
if (groupingSpec != null) {
try {
boolean needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
if (params.getBool(GroupParams.GROUP_DISTRIBUTED_FIRST, false)) {
CommandHandler.Builder topsGroupsActionBuilder = new CommandHandler.Builder()
.setQueryCommand(cmd)
.setNeedDocSet(false) // Order matters here
.setIncludeHitCount(true)
.setSearcher(searcher);
for (String field : groupingSpec.getFields()) {
topsGroupsActionBuilder.addCommandField(new SearchGroupsFieldCommand.Builder()
.setField(searcher.getSchema().getField(field))
.setGroupSort(groupingSpec.getGroupSort())
.setTopNGroups(cmd.getOffset() + cmd.getLen())
.setIncludeGroupCount(groupingSpec.isIncludeGroupCount())
.build()
);
}
CommandHandler commandHandler = topsGroupsActionBuilder.build();
commandHandler.execute();
SearchGroupsResultTransformer serializer = new SearchGroupsResultTransformer(searcher);
rsp.add("firstPhase", commandHandler.processResult(result, serializer));
rsp.add("totalHitCount", commandHandler.getTotalHitCount());
rb.setResult(result);
return;
} else if (params.getBool(GroupParams.GROUP_DISTRIBUTED_SECOND, false)) {
CommandHandler.Builder secondPhaseBuilder = new CommandHandler.Builder()
.setQueryCommand(cmd)
.setTruncateGroups(groupingSpec.isTruncateGroups() && groupingSpec.getFields().length > 0)
.setSearcher(searcher);
for (String field : groupingSpec.getFields()) {
String[] topGroupsParam = params.getParams(GroupParams.GROUP_DISTRIBUTED_TOPGROUPS_PREFIX + field);
if (topGroupsParam == null) {
topGroupsParam = new String[0];
}
List<SearchGroup<BytesRef>> topGroups = new ArrayList<SearchGroup<BytesRef>>(topGroupsParam.length);
for (String topGroup : topGroupsParam) {
SearchGroup<BytesRef> searchGroup = new SearchGroup<BytesRef>();
if (!topGroup.equals(TopGroupsShardRequestFactory.GROUP_NULL_VALUE)) {
searchGroup.groupValue = new BytesRef(searcher.getSchema().getField(field).getType().readableToIndexed(topGroup));
}
topGroups.add(searchGroup);
}
secondPhaseBuilder.addCommandField(
new TopGroupsFieldCommand.Builder()
.setField(searcher.getSchema().getField(field))
.setGroupSort(groupingSpec.getGroupSort())
.setSortWithinGroup(groupingSpec.getSortWithinGroup())
.setFirstPhaseGroups(topGroups)
.setMaxDocPerGroup(groupingSpec.getGroupOffset() + groupingSpec.getGroupLimit())
.setNeedScores(needScores)
.setNeedMaxScore(needScores)
.build()
);
}
for (String query : groupingSpec.getQueries()) {
secondPhaseBuilder.addCommandField(new QueryCommand.Builder()
.setDocsToCollect(groupingSpec.getOffset() + groupingSpec.getLimit())
.setSort(groupingSpec.getGroupSort())
.setQuery(query, rb.req)
.setDocSet(searcher)
.build()
);
}
CommandHandler commandHandler = secondPhaseBuilder.build();
commandHandler.execute();
TopGroupsResultTransformer serializer = new TopGroupsResultTransformer(rb);
rsp.add("secondPhase", commandHandler.processResult(result, serializer));
rb.setResult(result);
return;
}
int maxDocsPercentageToCache = params.getInt(GroupParams.GROUP_CACHE_PERCENTAGE, 0);
boolean cacheSecondPassSearch = maxDocsPercentageToCache >= 1 && maxDocsPercentageToCache <= 100;
Grouping.TotalCount defaultTotalCount = groupingSpec.isIncludeGroupCount() ?
Grouping.TotalCount.grouped : Grouping.TotalCount.ungrouped;
int limitDefault = cmd.getLen(); // this is normally from "rows"
Grouping grouping =
new Grouping(searcher, result, cmd, cacheSecondPassSearch, maxDocsPercentageToCache, groupingSpec.isMain());
grouping.setSort(groupingSpec.getGroupSort())
.setGroupSort(groupingSpec.getSortWithinGroup())
.setDefaultFormat(groupingSpec.getResponseFormat())
.setLimitDefault(limitDefault)
.setDefaultTotalCount(defaultTotalCount)
.setDocsPerGroupDefault(groupingSpec.getGroupLimit())
.setGroupOffsetDefault(groupingSpec.getGroupOffset())
.setGetGroupedDocSet(groupingSpec.isTruncateGroups());
if (groupingSpec.getFields() != null) {
for (String field : groupingSpec.getFields()) {
grouping.addFieldCommand(field, rb.req);
}
}
if (groupingSpec.getFunctions() != null) {
for (String groupByStr : groupingSpec.getFunctions()) {
grouping.addFunctionCommand(groupByStr, rb.req);
}
}
if (groupingSpec.getQueries() != null) {
for (String groupByStr : groupingSpec.getQueries()) {
grouping.addQueryCommand(groupByStr, rb.req);
}
}
if (rb.doHighlights || rb.isDebug() || params.getBool(MoreLikeThisParams.MLT, false)) {
// we need a single list of the returned docs
cmd.setFlags(SolrIndexSearcher.GET_DOCLIST);
}
grouping.execute();
if (grouping.isSignalCacheWarning()) {
rsp.add(
"cacheWarning",
String.format("Cache limit of %d percent relative to maxdoc has exceeded. Please increase cache size or disable caching.", maxDocsPercentageToCache)
);
}
rb.setResult(result);
if (grouping.mainResult != null) {
ResultContext ctx = new ResultContext();
ctx.docs = grouping.mainResult;
ctx.query = null; // TODO? add the query?
rsp.add("response", ctx);
rsp.getToLog().add("hits", grouping.mainResult.matches());
} else if (!grouping.getCommands().isEmpty()) { // Can never be empty since grouping.execute() checks for this.
rsp.add("grouped", result.groupedResults);
rsp.getToLog().add("hits", grouping.getCommands().get(0).getMatches());
}
return;
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
// normal search result
searcher.search(result,cmd);
rb.setResult( result );
ResultContext ctx = new ResultContext();
ctx.docs = rb.getResults().docList;
ctx.query = rb.getQuery();
rsp.add("response", ctx);
rsp.getToLog().add("hits", rb.getResults().docList.matches());
doFieldSortValues(rb, searcher);
doPrefetch(rb);
}
// in core/src/java/org/apache/solr/handler/component/QueryComponent.java
protected void doFieldSortValues(ResponseBuilder rb, SolrIndexSearcher searcher) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
final CharsRef spare = new CharsRef();
// The query cache doesn't currently store sort field values, and SolrIndexSearcher doesn't
// currently have an option to return sort field values. Because of this, we
// take the documents given and re-derive the sort values.
boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES,false);
if(fsv){
Sort sort = searcher.weightSort(rb.getSortSpec().getSort());
SortField[] sortFields = sort==null ? new SortField[]{SortField.FIELD_SCORE} : sort.getSort();
NamedList<Object[]> sortVals = new NamedList<Object[]>(); // order is important for the sort fields
Field field = new StringField("dummy", ""); // a dummy Field
IndexReaderContext topReaderContext = searcher.getTopReaderContext();
AtomicReaderContext[] leaves = topReaderContext.leaves();
AtomicReaderContext currentLeaf = null;
if (leaves.length==1) {
// if there is a single segment, use that subReader and avoid looking up each time
currentLeaf = leaves[0];
leaves=null;
}
DocList docList = rb.getResults().docList;
// sort ids from lowest to highest so we can access them in order
int nDocs = docList.size();
long[] sortedIds = new long[nDocs];
DocIterator it = rb.getResults().docList.iterator();
for (int i=0; i<nDocs; i++) {
sortedIds[i] = (((long)it.nextDoc()) << 32) | i;
}
Arrays.sort(sortedIds);
for (SortField sortField: sortFields) {
SortField.Type type = sortField.getType();
if (type==SortField.Type.SCORE || type==SortField.Type.DOC) continue;
FieldComparator comparator = null;
String fieldname = sortField.getField();
FieldType ft = fieldname==null ? null : req.getSchema().getFieldTypeNoEx(fieldname);
Object[] vals = new Object[nDocs];
int lastIdx = -1;
int idx = 0;
for (long idAndPos : sortedIds) {
int doc = (int)(idAndPos >>> 32);
int position = (int)idAndPos;
if (leaves != null) {
idx = ReaderUtil.subIndex(doc, leaves);
currentLeaf = leaves[idx];
if (idx != lastIdx) {
// we switched segments. invalidate comparator.
comparator = null;
}
}
if (comparator == null) {
comparator = sortField.getComparator(1,0);
comparator = comparator.setNextReader(currentLeaf);
}
doc -= currentLeaf.docBase; // adjust for what segment this is in
comparator.copy(0, doc);
Object val = comparator.value(0);
// Sortable float, double, int, long types all just use a string
// comparator. For these, we need to put the type into a readable
// format. One reason for this is that XML can't represent all
// string values (or even all unicode code points).
// indexedToReadable() should be a no-op and should
// thus be harmless anyway (for all current ways anyway)
if (val instanceof String) {
field.setStringValue((String)val);
val = ft.toObject(field);
}
// Must do the same conversion when sorting by a
// String field in Lucene, which returns the terms
// data as BytesRef:
if (val instanceof BytesRef) {
UnicodeUtil.UTF8toUTF16((BytesRef)val, spare);
field.setStringValue(spare.toString());
val = ft.toObject(field);
}
vals[position] = val;
}
sortVals.add(fieldname, vals);
}
rsp.add("sort_values", sortVals);
}
}
// in core/src/java/org/apache/solr/handler/component/QueryComponent.java
protected void doPrefetch(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
//pre-fetch returned documents
if (!req.getParams().getBool(ShardParams.IS_SHARD,false) && rb.getResults().docList != null && rb.getResults().docList.size()<=50) {
SolrPluginUtils.optimizePreFetchDocs(rb, rb.getResults().docList, rb.getQuery(), req, rsp);
}
}
// in core/src/java/org/apache/solr/handler/component/QueryComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
if (rb.grouping()) {
return groupedDistributedProcess(rb);
} else {
return regularDistributedProcess(rb);
}
}
// in core/src/java/org/apache/solr/handler/component/FacetComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException
{
if (rb.req.getParams().getBool(FacetParams.FACET,false)) {
rb.setNeedDocSet( true );
rb.doFacets = true;
}
}
// in core/src/java/org/apache/solr/handler/component/FacetComponent.java
Override
public void process(ResponseBuilder rb) throws IOException
{
if (rb.doFacets) {
SolrParams params = rb.req.getParams();
SimpleFacets f = new SimpleFacets(rb.req,
rb.getResults().docSet,
params,
rb );
NamedList<Object> counts = f.getFacetCounts();
String[] pivots = params.getParams( FacetParams.FACET_PIVOT );
if( pivots != null && pivots.length > 0 ) {
NamedList v = pivotHelper.process(rb, params, pivots);
if( v != null ) {
counts.add( PIVOT_KEY, v );
}
}
// TODO ???? add this directly to the response, or to the builder?
rb.rsp.add( "facet_counts", counts );
}
}
// in core/src/java/org/apache/solr/handler/component/FacetComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
if (!rb.doFacets) {
return ResponseBuilder.STAGE_DONE;
}
if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
// overlap facet refinement requests (those shards that we need a count for
// particular facet values from), where possible, with
// the requests to get fields (because we know that is the
// only other required phase).
// We do this in distributedProcess so we can look at all of the
// requests in the outgoing queue at once.
for (int shardNum=0; shardNum<rb.shards.length; shardNum++) {
List<String> refinements = null;
for (DistribFieldFacet dff : rb._facetInfo.facets.values()) {
if (!dff.needRefinements) continue;
List<String> refList = dff._toRefine[shardNum];
if (refList == null || refList.size()==0) continue;
String key = dff.getKey(); // reuse the same key that was used for the main facet
String termsKey = key + "__terms";
String termsVal = StrUtils.join(refList, ',');
String facetCommand;
// add terms into the original facet.field command
// do it via parameter reference to avoid another layer of encoding.
String termsKeyEncoded = QueryParsing.encodeLocalParamVal(termsKey);
if (dff.localParams != null) {
facetCommand = commandPrefix+termsKeyEncoded + " " + dff.facetStr.substring(2);
} else {
facetCommand = commandPrefix+termsKeyEncoded+'}'+dff.field;
}
if (refinements == null) {
refinements = new ArrayList<String>();
}
refinements.add(facetCommand);
refinements.add(termsKey);
refinements.add(termsVal);
}
if (refinements == null) continue;
String shard = rb.shards[shardNum];
ShardRequest refine = null;
boolean newRequest = false;
// try to find a request that is already going out to that shard.
// If nshards becomes to great, we way want to move to hashing for better
// scalability.
for (ShardRequest sreq : rb.outgoing) {
if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS)!=0
&& sreq.shards != null
&& sreq.shards.length==1
&& sreq.shards[0].equals(shard))
{
refine = sreq;
break;
}
}
if (refine == null) {
// we didn't find any other suitable requests going out to that shard, so
// create one ourselves.
newRequest = true;
refine = new ShardRequest();
refine.shards = new String[]{rb.shards[shardNum]};
refine.params = new ModifiableSolrParams(rb.req.getParams());
// don't request any documents
refine.params.remove(CommonParams.START);
refine.params.set(CommonParams.ROWS,"0");
}
refine.purpose |= ShardRequest.PURPOSE_REFINE_FACETS;
refine.params.set(FacetParams.FACET, "true");
refine.params.remove(FacetParams.FACET_FIELD);
refine.params.remove(FacetParams.FACET_QUERY);
for (int i=0; i<refinements.size();) {
String facetCommand=refinements.get(i++);
String termsKey=refinements.get(i++);
String termsVal=refinements.get(i++);
refine.params.add(FacetParams.FACET_FIELD, facetCommand);
refine.params.set(termsKey, termsVal);
}
if (newRequest) {
rb.addRequest(this, refine);
}
}
}
return ResponseBuilder.STAGE_DONE;
}
// in core/src/java/org/apache/solr/handler/component/HighlightComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
rb.doHighlights = highlighter.isHighlightingEnabled(params);
if(rb.doHighlights){
String hlq = params.get(HighlightParams.Q);
if(hlq != null){
try {
QParser parser = QParser.getParser(hlq, null, rb.req);
rb.setHighlightQuery(parser.getHighlightQuery());
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
}
}
}
// in core/src/java/org/apache/solr/handler/component/HighlightComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
if (rb.doHighlights) {
SolrQueryRequest req = rb.req;
SolrParams params = req.getParams();
String[] defaultHighlightFields; //TODO: get from builder by default?
if (rb.getQparser() != null) {
defaultHighlightFields = rb.getQparser().getDefaultHighlightFields();
} else {
defaultHighlightFields = params.getParams(CommonParams.DF);
}
Query highlightQuery = rb.getHighlightQuery();
if(highlightQuery==null) {
if (rb.getQparser() != null) {
try {
highlightQuery = rb.getQparser().getHighlightQuery();
rb.setHighlightQuery( highlightQuery );
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
} else {
highlightQuery = rb.getQuery();
rb.setHighlightQuery( highlightQuery );
}
}
if(highlightQuery != null) {
boolean rewrite = !(Boolean.valueOf(params.get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true")) &&
Boolean.valueOf(params.get(HighlightParams.HIGHLIGHT_MULTI_TERM, "true")));
highlightQuery = rewrite ? highlightQuery.rewrite(req.getSearcher().getIndexReader()) : highlightQuery;
}
// No highlighting if there is no query -- consider q.alt="*:*
if( highlightQuery != null ) {
NamedList sumData = highlighter.doHighlighting(
rb.getResults().docList,
highlightQuery,
req, defaultHighlightFields );
if(sumData != null) {
// TODO ???? add this directly to the response?
rb.rsp.add("highlighting", sumData);
}
}
}
}
// in core/src/java/org/apache/solr/handler/component/SearchComponent.java
public int distributedProcess(ResponseBuilder rb) throws IOException {
return ResponseBuilder.STAGE_DONE;
}
// in core/src/java/org/apache/solr/handler/component/StatsComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
if (rb.req.getParams().getBool(StatsParams.STATS,false)) {
rb.setNeedDocSet( true );
rb.doStats = true;
}
}
// in core/src/java/org/apache/solr/handler/component/StatsComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
if (rb.doStats) {
SolrParams params = rb.req.getParams();
SimpleStats s = new SimpleStats(rb.req,
rb.getResults().docSet,
params );
// TODO ???? add this directly to the response, or to the builder?
rb.rsp.add( "stats", s.getStatsCounts() );
}
}
// in core/src/java/org/apache/solr/handler/component/StatsComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
return ResponseBuilder.STAGE_DONE;
}
// in core/src/java/org/apache/solr/handler/component/StatsComponent.java
public NamedList<Object> getStatsCounts() throws IOException {
NamedList<Object> res = new SimpleOrderedMap<Object>();
res.add("stats_fields", getStatsFields());
return res;
}
// in core/src/java/org/apache/solr/handler/component/StatsComponent.java
public NamedList<Object> getStatsFields() throws IOException {
NamedList<Object> res = new SimpleOrderedMap<Object>();
String[] statsFs = params.getParams(StatsParams.STATS_FIELD);
boolean isShard = params.getBool(ShardParams.IS_SHARD, false);
if (null != statsFs) {
for (String f : statsFs) {
String[] facets = params.getFieldParams(f, StatsParams.STATS_FACET);
if (facets == null) {
facets = new String[0]; // make sure it is something...
}
SchemaField sf = searcher.getSchema().getField(f);
FieldType ft = sf.getType();
NamedList<?> stv;
// Currently, only UnInvertedField can deal with multi-part trie fields
String prefix = TrieField.getMainValuePrefix(ft);
if (sf.multiValued() || ft.multiValuedFieldCache() || prefix!=null) {
//use UnInvertedField for multivalued fields
UnInvertedField uif = UnInvertedField.getUnInvertedField(f, searcher);
stv = uif.getStats(searcher, docs, facets).getStatsValues();
} else {
stv = getFieldCacheStats(f, facets);
}
if (isShard == true || (Long) stv.get("count") > 0) {
res.add(f, stv);
} else {
res.add(f, null);
}
}
}
return res;
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
// Set field flags
ReturnFields returnFields = new ReturnFields( rb.req );
rb.rsp.setReturnFields( returnFields );
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
Override
public void process(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
String val = params.get("getVersions");
if (val != null) {
processGetVersions(rb);
return;
}
val = params.get("getUpdates");
if (val != null) {
processGetUpdates(rb);
return;
}
String id[] = params.getParams("id");
String ids[] = params.getParams("ids");
if (id == null && ids == null) {
return;
}
String[] allIds = id==null ? new String[0] : id;
if (ids != null) {
List<String> lst = new ArrayList<String>();
for (String s : allIds) {
lst.add(s);
}
for (String idList : ids) {
lst.addAll( StrUtils.splitSmart(idList, ",", true) );
}
allIds = lst.toArray(new String[lst.size()]);
}
SchemaField idField = req.getSchema().getUniqueKeyField();
FieldType fieldType = idField.getType();
SolrDocumentList docList = new SolrDocumentList();
UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
RefCounted<SolrIndexSearcher> searcherHolder = null;
DocTransformer transformer = rsp.getReturnFields().getTransformer();
if (transformer != null) {
TransformContext context = new TransformContext();
context.req = req;
transformer.setContext(context);
}
try {
SolrIndexSearcher searcher = null;
BytesRef idBytes = new BytesRef();
for (String idStr : allIds) {
fieldType.readableToIndexed(idStr, idBytes);
if (ulog != null) {
Object o = ulog.lookup(idBytes);
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List)o;
assert entry.size() >= 3;
int oper = (Integer)entry.get(0) & UpdateLog.OPERATION_MASK;
switch (oper) {
case UpdateLog.ADD:
SolrDocument doc = toSolrDoc((SolrInputDocument)entry.get(entry.size()-1), req.getSchema());
if(transformer!=null) {
transformer.transform(doc, -1); // unknown docID
}
docList.add(doc);
break;
case UpdateLog.DELETE:
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
continue;
}
}
// didn't find it in the update log, so it should be in the newest searcher opened
if (searcher == null) {
searcherHolder = req.getCore().getRealtimeSearcher();
searcher = searcherHolder.get();
}
// SolrCore.verbose("RealTimeGet using searcher ", searcher);
int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
if (docid < 0) continue;
Document luceneDocument = searcher.doc(docid);
SolrDocument doc = toSolrDoc(luceneDocument, req.getSchema());
if( transformer != null ) {
transformer.transform(doc, docid);
}
docList.add(doc);
}
} finally {
if (searcherHolder != null) {
searcherHolder.decref();
}
}
// if the client specified a single id=foo, then use "doc":{
// otherwise use a standard doclist
if (ids == null && allIds.length <= 1) {
// if the doc was not found, then use a value of null.
rsp.add("doc", docList.size() > 0 ? docList.get(0) : null);
} else {
docList.setNumFound(docList.size());
rsp.add("response", docList);
}
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes) throws IOException {
SolrInputDocument sid = null;
RefCounted<SolrIndexSearcher> searcherHolder = null;
try {
SolrIndexSearcher searcher = null;
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
if (ulog != null) {
Object o = ulog.lookup(idBytes);
if (o != null) {
// should currently be a List<Oper,Ver,Doc/Id>
List entry = (List)o;
assert entry.size() >= 3;
int oper = (Integer)entry.get(0) & UpdateLog.OPERATION_MASK;
switch (oper) {
case UpdateLog.ADD:
sid = (SolrInputDocument)entry.get(entry.size()-1);
break;
case UpdateLog.DELETE:
return null;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown Operation! " + oper);
}
}
}
if (sid == null) {
// didn't find it in the update log, so it should be in the newest searcher opened
if (searcher == null) {
searcherHolder = core.getRealtimeSearcher();
searcher = searcherHolder.get();
}
// SolrCore.verbose("RealTimeGet using searcher ", searcher);
SchemaField idField = core.getSchema().getUniqueKeyField();
int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
if (docid < 0) return null;
Document luceneDocument = searcher.doc(docid);
sid = toSolrInputDocument(luceneDocument, core.getSchema());
}
} finally {
if (searcherHolder != null) {
searcherHolder.decref();
}
}
return sid;
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
if (rb.stage < ResponseBuilder.STAGE_GET_FIELDS)
return ResponseBuilder.STAGE_GET_FIELDS;
if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
return createSubRequests(rb);
}
return ResponseBuilder.STAGE_DONE;
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
public int createSubRequests(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
String id1[] = params.getParams("id");
String ids[] = params.getParams("ids");
if (id1 == null && ids == null) {
return ResponseBuilder.STAGE_DONE;
}
List<String> allIds = new ArrayList<String>();
if (id1 != null) {
for (String s : id1) {
allIds.add(s);
}
}
if (ids != null) {
for (String s : ids) {
allIds.addAll( StrUtils.splitSmart(s, ",", true) );
}
}
// TODO: handle collection=...?
ZkController zkController = rb.req.getCore().getCoreDescriptor().getCoreContainer().getZkController();
// if shards=... then use that
if (zkController != null && params.get("shards") == null) {
SchemaField sf = rb.req.getSchema().getUniqueKeyField();
CloudDescriptor cloudDescriptor = rb.req.getCore().getCoreDescriptor().getCloudDescriptor();
String collection = cloudDescriptor.getCollectionName();
CloudState cloudState = zkController.getCloudState();
Map<String, List<String>> shardToId = new HashMap<String, List<String>>();
for (String id : allIds) {
BytesRef br = new BytesRef();
sf.getType().readableToIndexed(id, br);
int hash = Hash.murmurhash3_x86_32(br.bytes, br.offset, br.length, 0);
String shard = cloudState.getShard(hash, collection);
List<String> idsForShard = shardToId.get(shard);
if (idsForShard == null) {
idsForShard = new ArrayList<String>(2);
shardToId.put(shard, idsForShard);
}
idsForShard.add(id);
}
for (Map.Entry<String,List<String>> entry : shardToId.entrySet()) {
String shard = entry.getKey();
String shardIdList = StrUtils.join(entry.getValue(), ',');
ShardRequest sreq = new ShardRequest();
sreq.purpose = 1;
// sreq.shards = new String[]{shard}; // TODO: would be nice if this would work...
sreq.shards = sliceToShards(rb, collection, shard);
sreq.actualShards = sreq.shards;
sreq.params = new ModifiableSolrParams();
sreq.params.set(ShardParams.SHARDS_QT,"/get"); // TODO: how to avoid hardcoding this and hit the same handler?
sreq.params.set("distrib",false);
sreq.params.set("ids", shardIdList);
rb.addRequest(this, sreq);
}
} else {
String shardIdList = StrUtils.join(allIds, ',');
ShardRequest sreq = new ShardRequest();
sreq.purpose = 1;
sreq.shards = null; // ALL
sreq.actualShards = sreq.shards;
sreq.params = new ModifiableSolrParams();
sreq.params.set(ShardParams.SHARDS_QT,"/get"); // TODO: how to avoid hardcoding this and hit the same handler?
sreq.params.set("distrib",false);
sreq.params.set("ids", shardIdList);
rb.addRequest(this, sreq);
}
return ResponseBuilder.STAGE_DONE;
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
public void processGetVersions(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
int nVersions = params.getInt("getVersions", -1);
if (nVersions == -1) return;
String sync = params.get("sync");
if (sync != null) {
processSync(rb, nVersions, sync);
return;
}
UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
if (ulog == null) return;
UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates();
try {
rb.rsp.add("versions", recentUpdates.getVersions(nVersions));
} finally {
recentUpdates.close(); // cache this somehow?
}
}
// in core/src/java/org/apache/solr/handler/component/RealTimeGetComponent.java
public void processGetUpdates(ResponseBuilder rb) throws IOException
{
SolrQueryRequest req = rb.req;
SolrQueryResponse rsp = rb.rsp;
SolrParams params = req.getParams();
if (!params.getBool(COMPONENT_NAME, true)) {
return;
}
String versionsStr = params.get("getUpdates");
if (versionsStr == null) return;
UpdateLog ulog = req.getCore().getUpdateHandler().getUpdateLog();
if (ulog == null) return;
List<String> versions = StrUtils.splitSmart(versionsStr, ",", true);
// TODO: get this from cache instead of rebuilding?
UpdateLog.RecentUpdates recentUpdates = ulog.getRecentUpdates();
List<Object> updates = new ArrayList<Object>(versions.size());
long minVersion = Long.MAX_VALUE;
try {
for (String versionStr : versions) {
long version = Long.parseLong(versionStr);
try {
Object o = recentUpdates.lookup(version);
if (o == null) continue;
if (version > 0) {
minVersion = Math.min(minVersion, version);
}
// TODO: do any kind of validation here?
updates.add(o);
} catch (SolrException e) {
log.warn("Exception reading log for updates", e);
} catch (ClassCastException e) {
log.warn("Exception reading log for updates", e);
}
}
// Must return all delete-by-query commands that occur after the first add requested
// since they may apply.
updates.addAll( recentUpdates.getDeleteByQuery(minVersion));
rb.rsp.add("updates", updates);
} finally {
recentUpdates.close(); // cache this somehow?
}
}
// in core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException
{
}
// in core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
Override
public void process(ResponseBuilder rb) throws IOException
{
SolrParams p = rb.req.getParams();
if( p.getBool( MoreLikeThisParams.MLT, false ) ) {
SolrIndexSearcher searcher = rb.req.getSearcher();
NamedList<DocList> sim = getMoreLikeThese( rb, searcher,
rb.getResults().docList, rb.getFieldFlags() );
// TODO ???? add this directly to the response?
rb.rsp.add( "moreLikeThis", sim );
}
}
// in core/src/java/org/apache/solr/handler/component/MoreLikeThisComponent.java
NamedList<DocList> getMoreLikeThese( ResponseBuilder rb, SolrIndexSearcher searcher,
DocList docs, int flags ) throws IOException {
SolrParams p = rb.req.getParams();
IndexSchema schema = searcher.getSchema();
MoreLikeThisHandler.MoreLikeThisHelper mltHelper
= new MoreLikeThisHandler.MoreLikeThisHelper( p, searcher );
NamedList<DocList> mlt = new SimpleOrderedMap<DocList>();
DocIterator iterator = docs.iterator();
SimpleOrderedMap<Object> dbg = null;
if( rb.isDebug() ){
dbg = new SimpleOrderedMap<Object>();
}
while( iterator.hasNext() ) {
int id = iterator.nextDoc();
int rows = p.getInt( MoreLikeThisParams.DOC_COUNT, 5 );
DocListAndSet sim = mltHelper.getMoreLikeThis( id, 0, rows, null, null, flags );
String name = schema.printableUniqueKey( searcher.doc( id ) );
mlt.add(name, sim.docList);
if( dbg != null ){
SimpleOrderedMap<Object> docDbg = new SimpleOrderedMap<Object>();
docDbg.add( "rawMLTQuery", mltHelper.getRawMLTQuery().toString() );
docDbg.add( "boostedMLTQuery", mltHelper.getBoostedMLTQuery().toString() );
docDbg.add( "realMLTQuery", mltHelper.getRealMLTQuery().toString() );
SimpleOrderedMap<Object> explains = new SimpleOrderedMap<Object>();
DocIterator mltIte = sim.docList.iterator();
while( mltIte.hasNext() ){
int mltid = mltIte.nextDoc();
String key = schema.printableUniqueKey( searcher.doc( mltid ) );
explains.add( key, searcher.explain( mltHelper.getRealMLTQuery(), mltid ) );
}
docDbg.add( "explain", explains );
dbg.add( name, docDbg );
}
}
// add debug information
if( dbg != null ){
rb.addDebugInfo( "moreLikeThis", dbg );
}
return mlt;
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public void process(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (!params.getBool(COMPONENT_NAME, false)) {
return;
}
NamedList<Object> termVectors = new NamedList<Object>();
rb.rsp.add(TERM_VECTORS, termVectors);
FieldOptions allFields = new FieldOptions();
//figure out what options we have, and try to get the appropriate vector
allFields.termFreq = params.getBool(TermVectorParams.TF, false);
allFields.positions = params.getBool(TermVectorParams.POSITIONS, false);
allFields.offsets = params.getBool(TermVectorParams.OFFSETS, false);
allFields.docFreq = params.getBool(TermVectorParams.DF, false);
allFields.tfIdf = params.getBool(TermVectorParams.TF_IDF, false);
//boolean cacheIdf = params.getBool(TermVectorParams.IDF, false);
//short cut to all values.
if (params.getBool(TermVectorParams.ALL, false)) {
allFields.termFreq = true;
allFields.positions = true;
allFields.offsets = true;
allFields.docFreq = true;
allFields.tfIdf = true;
}
String fldLst = params.get(TermVectorParams.FIELDS);
if (fldLst == null) {
fldLst = params.get(CommonParams.FL);
}
//use this to validate our fields
IndexSchema schema = rb.req.getSchema();
//Build up our per field mapping
Map<String, FieldOptions> fieldOptions = new HashMap<String, FieldOptions>();
NamedList<List<String>> warnings = new NamedList<List<String>>();
List<String> noTV = new ArrayList<String>();
List<String> noPos = new ArrayList<String>();
List<String> noOff = new ArrayList<String>();
//we have specific fields to retrieve
if (fldLst != null) {
String [] fields = SolrPluginUtils.split(fldLst);
for (String field : fields) {
SchemaField sf = schema.getFieldOrNull(field);
if (sf != null) {
if (sf.storeTermVector()) {
FieldOptions option = fieldOptions.get(field);
if (option == null) {
option = new FieldOptions();
option.fieldName = field;
fieldOptions.put(field, option);
}
//get the per field mappings
option.termFreq = params.getFieldBool(field, TermVectorParams.TF, allFields.termFreq);
option.docFreq = params.getFieldBool(field, TermVectorParams.DF, allFields.docFreq);
option.tfIdf = params.getFieldBool(field, TermVectorParams.TF_IDF, allFields.tfIdf);
//Validate these are even an option
option.positions = params.getFieldBool(field, TermVectorParams.POSITIONS, allFields.positions);
if (option.positions && !sf.storeTermPositions()){
noPos.add(field);
}
option.offsets = params.getFieldBool(field, TermVectorParams.OFFSETS, allFields.offsets);
if (option.offsets && !sf.storeTermOffsets()){
noOff.add(field);
}
} else {//field doesn't have term vectors
noTV.add(field);
}
} else {
//field doesn't exist
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "undefined field: " + field);
}
}
} //else, deal with all fields
boolean hasWarnings = false;
if (!noTV.isEmpty()) {
warnings.add("noTermVectors", noTV);
hasWarnings = true;
}
if (!noPos.isEmpty()) {
warnings.add("noPositions", noPos);
hasWarnings = true;
}
if (!noOff.isEmpty()) {
warnings.add("noOffsets", noOff);
hasWarnings = true;
}
if (hasWarnings) {
termVectors.add("warnings", warnings);
}
DocListAndSet listAndSet = rb.getResults();
List<Integer> docIds = getInts(params.getParams(TermVectorParams.DOC_IDS));
Iterator<Integer> iter;
if (docIds != null && !docIds.isEmpty()) {
iter = docIds.iterator();
} else {
DocList list = listAndSet.docList;
iter = list.iterator();
}
SolrIndexSearcher searcher = rb.req.getSearcher();
IndexReader reader = searcher.getIndexReader();
//the TVMapper is a TermVectorMapper which can be used to optimize loading of Term Vectors
SchemaField keyField = schema.getUniqueKeyField();
String uniqFieldName = null;
if (keyField != null) {
uniqFieldName = keyField.getName();
}
//Only load the id field to get the uniqueKey of that
//field
final String finalUniqFieldName = uniqFieldName;
final List<String> uniqValues = new ArrayList<String>();
// TODO: is this required to be single-valued? if so, we should STOP
// once we find it...
final StoredFieldVisitor getUniqValue = new StoredFieldVisitor() {
@Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
uniqValues.add(value);
}
@Override
public void intField(FieldInfo fieldInfo, int value) throws IOException {
uniqValues.add(Integer.toString(value));
}
@Override
public void longField(FieldInfo fieldInfo, long value) throws IOException {
uniqValues.add(Long.toString(value));
}
@Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
return (fieldInfo.name.equals(finalUniqFieldName)) ? Status.YES : Status.NO;
}
};
TermsEnum termsEnum = null;
while (iter.hasNext()) {
Integer docId = iter.next();
NamedList<Object> docNL = new NamedList<Object>();
termVectors.add("doc-" + docId, docNL);
if (keyField != null) {
reader.document(docId, getUniqValue);
String uniqVal = null;
if (uniqValues.size() != 0) {
uniqVal = uniqValues.get(0);
uniqValues.clear();
docNL.add("uniqueKey", uniqVal);
termVectors.add("uniqueKeyFieldName", uniqFieldName);
}
}
if (!fieldOptions.isEmpty()) {
for (Map.Entry<String, FieldOptions> entry : fieldOptions.entrySet()) {
final String field = entry.getKey();
final Terms vector = reader.getTermVector(docId, field);
if (vector != null) {
termsEnum = vector.iterator(termsEnum);
mapOneVector(docNL, entry.getValue(), reader, docId, vector.iterator(termsEnum), field);
}
}
} else {
// extract all fields
final Fields vectors = reader.getTermVectors(docId);
final FieldsEnum fieldsEnum = vectors.iterator();
String field;
while((field = fieldsEnum.next()) != null) {
Terms terms = fieldsEnum.terms();
if (terms != null) {
termsEnum = terms.iterator(termsEnum);
mapOneVector(docNL, allFields, reader, docId, termsEnum, field);
}
}
}
}
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
uniqValues.add(value);
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public void intField(FieldInfo fieldInfo, int value) throws IOException {
uniqValues.add(Integer.toString(value));
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public void longField(FieldInfo fieldInfo, long value) throws IOException {
uniqValues.add(Long.toString(value));
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public Status needsField(FieldInfo fieldInfo) throws IOException {
return (fieldInfo.name.equals(finalUniqFieldName)) ? Status.YES : Status.NO;
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
private void mapOneVector(NamedList<Object> docNL, FieldOptions fieldOptions, IndexReader reader, int docID, TermsEnum termsEnum, String field) throws IOException {
NamedList<Object> fieldNL = new NamedList<Object>();
docNL.add(field, fieldNL);
BytesRef text;
DocsAndPositionsEnum dpEnum = null;
while((text = termsEnum.next()) != null) {
String term = text.utf8ToString();
NamedList<Object> termInfo = new NamedList<Object>();
fieldNL.add(term, termInfo);
final int freq = (int) termsEnum.totalTermFreq();
if (fieldOptions.termFreq == true) {
termInfo.add("tf", freq);
}
dpEnum = termsEnum.docsAndPositions(null, dpEnum, fieldOptions.offsets);
boolean useOffsets = fieldOptions.offsets;
if (dpEnum == null) {
useOffsets = false;
dpEnum = termsEnum.docsAndPositions(null, dpEnum, false);
}
boolean usePositions = false;
if (dpEnum != null) {
dpEnum.nextDoc();
usePositions = fieldOptions.positions;
}
NamedList<Number> theOffsets = null;
if (useOffsets) {
theOffsets = new NamedList<Number>();
termInfo.add("offsets", theOffsets);
}
NamedList<Integer> positionsNL = null;
if (usePositions || theOffsets != null) {
for (int i = 0; i < freq; i++) {
final int pos = dpEnum.nextPosition();
if (usePositions && pos >= 0) {
if (positionsNL == null) {
positionsNL = new NamedList<Integer>();
termInfo.add("positions", positionsNL);
}
positionsNL.add("position", pos);
}
if (theOffsets != null) {
theOffsets.add("start", dpEnum.startOffset());
theOffsets.add("end", dpEnum.endOffset());
}
}
}
if (fieldOptions.docFreq) {
termInfo.add("df", getDocFreq(reader, field, text));
}
if (fieldOptions.tfIdf) {
double tfIdfVal = ((double) freq) / getDocFreq(reader, field, text);
termInfo.add("tf-idf", tfIdfVal);
}
}
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public int distributedProcess(ResponseBuilder rb) throws IOException {
int result = ResponseBuilder.STAGE_DONE;
if (rb.stage == ResponseBuilder.STAGE_GET_FIELDS) {
//Go ask each shard for it's vectors
// for each shard, collect the documents for that shard.
HashMap<String, Collection<ShardDoc>> shardMap = new HashMap<String, Collection<ShardDoc>>();
for (ShardDoc sdoc : rb.resultIds.values()) {
Collection<ShardDoc> shardDocs = shardMap.get(sdoc.shard);
if (shardDocs == null) {
shardDocs = new ArrayList<ShardDoc>();
shardMap.put(sdoc.shard, shardDocs);
}
shardDocs.add(sdoc);
}
// Now create a request for each shard to retrieve the stored fields
for (Collection<ShardDoc> shardDocs : shardMap.values()) {
ShardRequest sreq = new ShardRequest();
sreq.purpose = ShardRequest.PURPOSE_GET_FIELDS;
sreq.shards = new String[]{shardDocs.iterator().next().shard};
sreq.params = new ModifiableSolrParams();
// add original params
sreq.params.add(rb.req.getParams());
sreq.params.remove(CommonParams.Q);//remove the query
ArrayList<String> ids = new ArrayList<String>(shardDocs.size());
for (ShardDoc shardDoc : shardDocs) {
ids.add(shardDoc.id.toString());
}
sreq.params.add(TermVectorParams.DOC_IDS, StrUtils.join(ids, ','));
rb.addRequest(this, sreq);
}
result = ResponseBuilder.STAGE_DONE;
}
return result;
}
// in core/src/java/org/apache/solr/handler/component/TermVectorComponent.java
Override
public void prepare(ResponseBuilder rb) throws IOException {
}
// in core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException, KeeperException, InterruptedException
{
CoreContainer coreContainer = req.getCore().getCoreDescriptor().getCoreContainer();
if (coreContainer.isZooKeeperAware()) {
showFromZooKeeper(req, rsp, coreContainer);
} else {
showFromFileSystem(req, rsp);
}
}
// in core/src/java/org/apache/solr/handler/admin/ShowFileRequestHandler.java
private void showFromFileSystem(SolrQueryRequest req, SolrQueryResponse rsp)
throws IOException {
File adminFile = null;
final SolrResourceLoader loader = req.getCore().getResourceLoader();
File configdir = new File( loader.getConfigDir() );
if (!configdir.exists()) {
// TODO: maybe we should just open it this way to start with?
try {
configdir = new File( loader.getClassLoader().getResource(loader.getConfigDir()).toURI() );
} catch (URISyntaxException e) {
throw new SolrException( ErrorCode.FORBIDDEN, "Can not access configuration directory!");
}
}
String fname = req.getParams().get("file", null);
if( fname == null ) {
adminFile = configdir;
}
else {
fname = fname.replace( '\\', '/' ); // normalize slashes
if( hiddenFiles.contains( fname.toUpperCase(Locale.ENGLISH) ) ) {
throw new SolrException( ErrorCode.FORBIDDEN, "Can not access: "+fname );
}
if( fname.indexOf( ".." ) >= 0 ) {
throw new SolrException( ErrorCode.FORBIDDEN, "Invalid path: "+fname );
}
adminFile = new File( configdir, fname );
}
// Make sure the file exists, is readable and is not a hidden file
if( !adminFile.exists() ) {
throw new SolrException( ErrorCode.BAD_REQUEST, "Can not find: "+adminFile.getName()
+ " ["+adminFile.getAbsolutePath()+"]" );
}
if( !adminFile.canRead() || adminFile.isHidden() ) {
throw new SolrException( ErrorCode.BAD_REQUEST, "Can not show: "+adminFile.getName()
+ " ["+adminFile.getAbsolutePath()+"]" );
}
// Show a directory listing
if( adminFile.isDirectory() ) {
int basePath = configdir.getAbsolutePath().length() + 1;
NamedList<SimpleOrderedMap<Object>> files = new SimpleOrderedMap<SimpleOrderedMap<Object>>();
for( File f : adminFile.listFiles() ) {
String path = f.getAbsolutePath().substring( basePath );
path = path.replace( '\\', '/' ); // normalize slashes
if( hiddenFiles.contains( path.toUpperCase(Locale.ENGLISH) ) ) {
continue; // don't show 'hidden' files
}
if( f.isHidden() || f.getName().startsWith( "." ) ) {
continue; // skip hidden system files...
}
SimpleOrderedMap<Object> fileInfo = new SimpleOrderedMap<Object>();
files.add( path, fileInfo );
if( f.isDirectory() ) {
fileInfo.add( "directory", true );
}
else {
// TODO? content type
fileInfo.add( "size", f.length() );
}
fileInfo.add( "modified", new Date( f.lastModified() ) );
}
rsp.add( "files", files );
}
else {
// Include the file contents
//The file logic depends on RawResponseWriter, so force its use.
ModifiableSolrParams params = new ModifiableSolrParams( req.getParams() );
params.set( CommonParams.WT, "raw" );
req.setParams(params);
ContentStreamBase content = new ContentStreamBase.FileStream( adminFile );
content.setContentType( req.getParams().get( USE_CONTENT_TYPE ) );
rsp.add(RawResponseWriter.CONTENT, content);
}
rsp.setHttpCaching(false);
}
// in core/src/java/org/apache/solr/handler/admin/PropertiesRequestHandler.java
Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException
{
Object props = null;
String name = req.getParams().get( "name" );
if( name != null ) {
NamedList<String> p = new SimpleOrderedMap<String>();
p.add( name, System.getProperty(name) );
props = p;
}
else {
props = System.getProperties();
}
rsp.add( "system.properties", props );
rsp.setHttpCaching(false);
}
// in core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
private static SimpleOrderedMap<Object> getDocumentFieldsInfo( Document doc, int docId, IndexReader reader,
IndexSchema schema ) throws IOException
{
final CharsRef spare = new CharsRef();
SimpleOrderedMap<Object> finfo = new SimpleOrderedMap<Object>();
for( Object o : doc.getFields() ) {
Field field = (Field)o;
SimpleOrderedMap<Object> f = new SimpleOrderedMap<Object>();
SchemaField sfield = schema.getFieldOrNull( field.name() );
FieldType ftype = (sfield==null)?null:sfield.getType();
f.add( "type", (ftype==null)?null:ftype.getTypeName() );
f.add( "schema", getFieldFlags( sfield ) );
f.add( "flags", getFieldFlags( field ) );
Term t = new Term(field.name(), ftype!=null ? ftype.storedToIndexed(field) : field.stringValue());
f.add( "value", (ftype==null)?null:ftype.toExternal( field ) );
// TODO: this really should be "stored"
f.add( "internal", field.stringValue() ); // may be a binary number
BytesRef bytes = field.binaryValue();
if (bytes != null) {
f.add( "binary", Base64.byteArrayToBase64(bytes.bytes, bytes.offset, bytes.length));
}
f.add( "boost", field.boost() );
f.add( "docFreq", t.text()==null ? 0 : reader.docFreq( t ) ); // this can be 0 for non-indexed fields
// If we have a term vector, return that
if( field.fieldType().storeTermVectors() ) {
try {
Terms v = reader.getTermVector( docId, field.name() );
if( v != null ) {
SimpleOrderedMap<Integer> tfv = new SimpleOrderedMap<Integer>();
final TermsEnum termsEnum = v.iterator(null);
BytesRef text;
while((text = termsEnum.next()) != null) {
final int freq = (int) termsEnum.totalTermFreq();
UnicodeUtil.UTF8toUTF16(text, spare);
tfv.add(spare.toString(), freq);
}
f.add( "termVector", tfv );
}
}
catch( Exception ex ) {
log.warn( "error writing term vector", ex );
}
}
finfo.add( field.name(), f );
}
return finfo;
}
// in core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
private static Document getFirstLiveDoc(AtomicReader reader, String fieldName, Terms terms) throws IOException {
DocsEnum docsEnum = null;
TermsEnum termsEnum = terms.iterator(null);
BytesRef text;
// Deal with the chance that the first bunch of terms are in deleted documents. Is there a better way?
for (int idx = 0; idx < 1000 && docsEnum == null; ++idx) {
text = termsEnum.next();
if (text == null) { // Ran off the end of the terms enum without finding any live docs with that field in them.
return null;
}
Term term = new Term(fieldName, text);
docsEnum = reader.termDocsEnum(reader.getLiveDocs(),
term.field(),
new BytesRef(term.text()),
false);
if (docsEnum != null) {
int docId;
if ((docId = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
return reader.document(docId);
}
}
}
return null;
}
// in core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader, boolean detail) throws IOException {
return getIndexInfo(reader);
}
// in core/src/java/org/apache/solr/handler/admin/LukeRequestHandler.java
public static SimpleOrderedMap<Object> getIndexInfo(DirectoryReader reader) throws IOException {
Directory dir = reader.directory();
SimpleOrderedMap<Object> indexInfo = new SimpleOrderedMap<Object>();
indexInfo.add("numDocs", reader.numDocs());
indexInfo.add("maxDoc", reader.maxDoc());
indexInfo.add("version", reader.getVersion()); // TODO? Is this different then: IndexReader.getCurrentVersion( dir )?
indexInfo.add("segmentCount", reader.getSequentialSubReaders().length);
indexInfo.add("current", reader.isCurrent() );
indexInfo.add("hasDeletions", reader.hasDeletions() );
indexInfo.add("directory", dir );
indexInfo.add("userData", reader.getIndexCommit().getUserData());
String s = reader.getIndexCommit().getUserData().get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (s != null) {
indexInfo.add("lastModified", new Date(Long.parseLong(s)));
}
return indexInfo;
}
// in core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
Override
public void handleRequestBody(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException
{
SimpleOrderedMap<Object> system = new SimpleOrderedMap<Object>();
rsp.add( "system", system );
ThreadMXBean tmbean = ManagementFactory.getThreadMXBean();
// Thread Count
SimpleOrderedMap<Object> nl = new SimpleOrderedMap<Object>();
nl.add( "current",tmbean.getThreadCount() );
nl.add( "peak", tmbean.getPeakThreadCount() );
nl.add( "daemon", tmbean.getDaemonThreadCount() );
system.add( "threadCount", nl );
// Deadlocks
ThreadInfo[] tinfos;
long[] tids = tmbean.findMonitorDeadlockedThreads();
if (tids != null) {
tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
NamedList<SimpleOrderedMap<Object>> lst = new NamedList<SimpleOrderedMap<Object>>();
for (ThreadInfo ti : tinfos) {
if (ti != null) {
lst.add( "thread", getThreadInfo( ti, tmbean ) );
}
}
system.add( "deadlocks", lst );
}
// Now show all the threads....
tids = tmbean.getAllThreadIds();
tinfos = tmbean.getThreadInfo(tids, Integer.MAX_VALUE);
NamedList<SimpleOrderedMap<Object>> lst = new NamedList<SimpleOrderedMap<Object>>();
for (ThreadInfo ti : tinfos) {
if (ti != null) {
lst.add( "thread", getThreadInfo( ti, tmbean ) );
}
}
system.add( "threadDump", lst );
rsp.setHttpCaching(false);
}
// in core/src/java/org/apache/solr/handler/admin/ThreadDumpHandler.java
private static SimpleOrderedMap<Object> getThreadInfo( ThreadInfo ti, ThreadMXBean tmbean ) throws IOException
{
SimpleOrderedMap<Object> info = new SimpleOrderedMap<Object>();
long tid = ti.getThreadId();
info.add( "id", tid );
info.add( "name", ti.getThreadName() );
info.add( "state", ti.getThreadState().toString() );
if (ti.getLockName() != null) {
info.add( "lock", ti.getLockName() );
}
if (ti.isSuspended()) {
info.add( "suspended", true );
}
if (ti.isInNative()) {
info.add( "native", true );
}
if (tmbean.isThreadCpuTimeSupported()) {
info.add( "cpuTime", formatNanos(tmbean.getThreadCpuTime(tid)) );
info.add( "userTime", formatNanos(tmbean.getThreadUserTime(tid)) );
}
if (ti.getLockOwnerName() != null) {
SimpleOrderedMap<Object> owner = new SimpleOrderedMap<Object>();
owner.add( "name", ti.getLockOwnerName() );
owner.add( "id", ti.getLockOwnerId() );
}
// Add the stack trace
int i=0;
String[] trace = new String[ti.getStackTrace().length];
for( StackTraceElement ste : ti.getStackTrace()) {
trace[i++] = ste.toString();
}
info.add( "stackTrace", trace );
return info;
}
// in core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
protected boolean handleMergeAction(SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
SolrParams params = req.getParams();
String cname = params.required().get(CoreAdminParams.CORE);
SolrCore core = coreContainer.getCore(cname);
SolrQueryRequest wrappedReq = null;
SolrCore[] sourceCores = null;
RefCounted<SolrIndexSearcher>[] searchers = null;
// stores readers created from indexDir param values
DirectoryReader[] readersToBeClosed = null;
Directory[] dirsToBeReleased = null;
if (core != null) {
try {
String[] dirNames = params.getParams(CoreAdminParams.INDEX_DIR);
if (dirNames == null || dirNames.length == 0) {
String[] sources = params.getParams("srcCore");
if (sources == null || sources.length == 0)
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,
"At least one indexDir or srcCore must be specified");
sourceCores = new SolrCore[sources.length];
for (int i = 0; i < sources.length; i++) {
String source = sources[i];
SolrCore srcCore = coreContainer.getCore(source);
if (srcCore == null)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Core: " + source + " does not exist");
sourceCores[i] = srcCore;
}
} else {
readersToBeClosed = new DirectoryReader[dirNames.length];
dirsToBeReleased = new Directory[dirNames.length];
DirectoryFactory dirFactory = core.getDirectoryFactory();
for (int i = 0; i < dirNames.length; i++) {
Directory dir = dirFactory.get(dirNames[i], core.getSolrConfig().indexConfig.lockType);
dirsToBeReleased[i] = dir;
// TODO: why doesn't this use the IR factory? what is going on here?
readersToBeClosed[i] = DirectoryReader.open(dir);
}
}
DirectoryReader[] readers = null;
if (readersToBeClosed != null) {
readers = readersToBeClosed;
} else {
readers = new DirectoryReader[sourceCores.length];
searchers = new RefCounted[sourceCores.length];
for (int i = 0; i < sourceCores.length; i++) {
SolrCore solrCore = sourceCores[i];
// record the searchers so that we can decref
searchers[i] = solrCore.getSearcher();
readers[i] = searchers[i].get().getIndexReader();
}
}
UpdateRequestProcessorChain processorChain =
core.getUpdateProcessingChain(params.get(UpdateParams.UPDATE_CHAIN));
wrappedReq = new LocalSolrQueryRequest(core, req.getParams());
UpdateRequestProcessor processor =
processorChain.createProcessor(wrappedReq, rsp);
processor.processMergeIndexes(new MergeIndexesCommand(readers, req));
} finally {
if (searchers != null) {
for (RefCounted<SolrIndexSearcher> searcher : searchers) {
if (searcher != null) searcher.decref();
}
}
if (sourceCores != null) {
for (SolrCore solrCore : sourceCores) {
if (solrCore != null) solrCore.close();
}
}
if (readersToBeClosed != null) IOUtils.closeWhileHandlingException(readersToBeClosed);
if (dirsToBeReleased != null) {
for (Directory dir : dirsToBeReleased) {
DirectoryFactory dirFactory = core.getDirectoryFactory();
dirFactory.release(dir);
}
}
if (wrappedReq != null) wrappedReq.close();
core.close();
}
}
return coreContainer.isPersistent();
}
// in core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
protected void handleRequestRecoveryAction(SolrQueryRequest req,
SolrQueryResponse rsp) throws IOException {
final SolrParams params = req.getParams();
String cname = params.get(CoreAdminParams.CORE);
if (cname == null) {
cname = "";
}
SolrCore core = null;
try {
core = coreContainer.getCore(cname);
if (core != null) {
core.getUpdateHandler().getSolrCoreState().doRecovery(coreContainer, cname);
} else {
SolrException.log(log, "Cound not find core to call recovery:" + cname);
}
} finally {
// no recoveryStrat close for now
if (core != null) {
core.close();
}
}
}
// in core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
protected void handleWaitForStateAction(SolrQueryRequest req,
SolrQueryResponse rsp) throws IOException, InterruptedException {
final SolrParams params = req.getParams();
String cname = params.get(CoreAdminParams.CORE);
if (cname == null) {
cname = "";
}
String nodeName = params.get("nodeName");
String coreNodeName = params.get("coreNodeName");
String waitForState = params.get("state");
Boolean checkLive = params.getBool("checkLive");
int pauseFor = params.getInt("pauseFor", 0);
String state = null;
boolean live = false;
int retry = 0;
while (true) {
SolrCore core = null;
try {
core = coreContainer.getCore(cname);
if (core == null && retry == 30) {
throw new SolrException(ErrorCode.BAD_REQUEST, "core not found:"
+ cname);
}
if (core != null) {
// wait until we are sure the recovering node is ready
// to accept updates
CloudDescriptor cloudDescriptor = core.getCoreDescriptor()
.getCloudDescriptor();
CloudState cloudState = coreContainer.getZkController()
.getCloudState();
String collection = cloudDescriptor.getCollectionName();
Slice slice = cloudState.getSlice(collection,
cloudDescriptor.getShardId());
if (slice != null) {
ZkNodeProps nodeProps = slice.getShards().get(coreNodeName);
if (nodeProps != null) {
state = nodeProps.get(ZkStateReader.STATE_PROP);
live = cloudState.liveNodesContain(nodeName);
if (nodeProps != null && state.equals(waitForState)) {
if (checkLive == null) {
break;
} else if (checkLive && live) {
break;
} else if (!checkLive && !live) {
break;
}
}
}
}
}
if (retry++ == 30) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"I was asked to wait on state " + waitForState + " for "
+ nodeName
+ " but I still do not see the request state. I see state: "
+ state + " live:" + live);
}
} finally {
if (core != null) {
core.close();
}
}
Thread.sleep(1000);
}
// small safety net for any updates that started with state that
// kept it from sending the update to be buffered -
// pause for a while to let any outstanding updates finish
// System.out.println("I saw state:" + state + " sleep for " + pauseFor +
// " live:" + live);
Thread.sleep(pauseFor);
// solrcloud_debug
// try {;
// LocalSolrQueryRequest r = new LocalSolrQueryRequest(core, new
// ModifiableSolrParams());
// CommitUpdateCommand commitCmd = new CommitUpdateCommand(r, false);
// commitCmd.softCommit = true;
// core.getUpdateHandler().commit(commitCmd);
// RefCounted<SolrIndexSearcher> searchHolder =
// core.getNewestSearcher(false);
// SolrIndexSearcher searcher = searchHolder.get();
// try {
// System.out.println(core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName()
// + " to replicate "
// + searcher.search(new MatchAllDocsQuery(), 1).totalHits + " gen:" +
// core.getDeletionPolicy().getLatestCommit().getGeneration() + " data:" +
// core.getDataDir());
// } finally {
// searchHolder.decref();
// }
// } catch (Exception e) {
//
// }
}
// in core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
protected void handleDistribUrlAction(SolrQueryRequest req,
SolrQueryResponse rsp) throws IOException, InterruptedException, SolrServerException {
// TODO: finish this and tests
SolrParams params = req.getParams();
final ModifiableSolrParams newParams = new ModifiableSolrParams(params);
newParams.remove("action");
SolrParams required = params.required();
final String subAction = required.get("subAction");
String collection = required.get("collection");
newParams.set(CoreAdminParams.ACTION, subAction);
SolrCore core = req.getCore();
ZkController zkController = core.getCoreDescriptor().getCoreContainer()
.getZkController();
CloudState cloudState = zkController.getCloudState();
Map<String,Slice> slices = cloudState.getCollectionStates().get(collection);
for (Map.Entry<String,Slice> entry : slices.entrySet()) {
Slice slice = entry.getValue();
Map<String,ZkNodeProps> shards = slice.getShards();
Set<Map.Entry<String,ZkNodeProps>> shardEntries = shards.entrySet();
for (Map.Entry<String,ZkNodeProps> shardEntry : shardEntries) {
final ZkNodeProps node = shardEntry.getValue();
if (cloudState.liveNodesContain(node.get(ZkStateReader.NODE_NAME_PROP))) {
newParams.set(CoreAdminParams.CORE, node.get(ZkStateReader.CORE_NAME_PROP));
String replica = node.get(ZkStateReader.BASE_URL_PROP);
ShardRequest sreq = new ShardRequest();
newParams.set("qt", "/admin/cores");
sreq.purpose = 1;
// TODO: this sucks
if (replica.startsWith("http://"))
replica = replica.substring(7);
sreq.shards = new String[]{replica};
sreq.actualShards = sreq.shards;
sreq.params = newParams;
shardHandler.submit(sreq, replica, sreq.params);
}
}
}
ShardResponse srsp;
do {
srsp = shardHandler.takeCompletedOrError();
if (srsp != null) {
Throwable e = srsp.getException();
if (e != null) {
log.error("Error talking to shard: " + srsp.getShard(), e);
}
}
} while(srsp != null);
}
// in core/src/java/org/apache/solr/handler/admin/CoreAdminHandler.java
protected NamedList<Object> getCoreStatus(CoreContainer cores, String cname) throws IOException {
NamedList<Object> info = new SimpleOrderedMap<Object>();
SolrCore core = cores.getCore(cname);
if (core != null) {
try {
info.add("name", core.getName());
info.add("isDefaultCore", core.getName().equals(cores.getDefaultCoreName()));
info.add("instanceDir", normalizePath(core.getResourceLoader().getInstanceDir()));
info.add("dataDir", normalizePath(core.getDataDir()));
info.add("config", core.getConfigResource());
info.add("schema", core.getSchemaResource());
info.add("startTime", new Date(core.getStartTime()));
info.add("uptime", System.currentTimeMillis() - core.getStartTime());
RefCounted<SolrIndexSearcher> searcher = core.getSearcher();
try {
SimpleOrderedMap<Object> indexInfo = LukeRequestHandler.getIndexInfo(searcher.get().getIndexReader());
long size = getIndexSize(core);
indexInfo.add("sizeInBytes", size);
indexInfo.add("size", NumberUtils.readableSize(size));
info.add("index", indexInfo);
} finally {
searcher.decref();
}
} finally {
core.close();
}
}
return info;
}
// in core/src/java/org/apache/solr/handler/SnapShooter.java
public void copyFiles(Collection<String> files, File destDir) throws IOException {
for (String indexFile : files) {
File source = new File(solrCore.getIndexDir(), indexFile);
copyFile(source, new File(destDir, source.getName()), true);
}
}
// in core/src/java/org/apache/solr/handler/SnapShooter.java
public void copyFile(File source, File destination, boolean preserveFileDate)
throws IOException {
// check source exists
if (!source.exists()) {
String message = "File " + source + " does not exist";
throw new FileNotFoundException(message);
}
// does destinations directory exist ?
if (destination.getParentFile() != null
&& !destination.getParentFile().exists()) {
destination.getParentFile().mkdirs();
}
// make sure we can write to destination
if (destination.exists() && !destination.canWrite()) {
String message = "Unable to open file " + destination + " for writing.";
throw new IOException(message);
}
FileInputStream input = null;
FileOutputStream output = null;
try {
input = new FileInputStream(source);
output = new FileOutputStream(destination);
int count = 0;
int n = 0;
int rcnt = 0;
while (-1 != (n = input.read(buffer))) {
output.write(buffer, 0, n);
count += n;
rcnt++;
/***
// reserve every 4.6875 MB
if (rcnt == 150) {
rcnt = 0;
delPolicy.setReserveDuration(indexCommit.getVersion(), reserveTime);
}
***/
}
} finally {
try {
IOUtils.closeQuietly(input);
} finally {
IOUtils.closeQuietly(output);
}
}
if (source.length() != destination.length()) {
String message = "Failed to copy full contents from " + source + " to "
+ destination;
throw new IOException(message);
}
if (preserveFileDate) {
// file copy should preserve file date
destination.setLastModified(source.lastModified());
}
}
// in core/src/java/org/apache/solr/analysis/SynonymFilterFactory.java
private SynonymMap loadSolrSynonyms(ResourceLoader loader, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
final boolean expand = getBoolean("expand", true);
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new InitializationException("Missing required argument 'synonyms'.");
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
SolrSynonymParser parser = new SolrSynonymParser(dedup, expand, analyzer);
File synonymFile = new File(synonyms);
if (synonymFile.exists()) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
} else {
List<String> files = StrUtils.splitFileNames(synonyms);
for (String file : files) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(file), decoder));
}
}
return parser.build();
}
// in core/src/java/org/apache/solr/analysis/SynonymFilterFactory.java
private SynonymMap loadWordnetSynonyms(ResourceLoader loader, boolean dedup, Analyzer analyzer) throws IOException, ParseException {
final boolean expand = getBoolean("expand", true);
String synonyms = args.get("synonyms");
if (synonyms == null)
throw new InitializationException("Missing required argument 'synonyms'.");
CharsetDecoder decoder = Charset.forName("UTF-8").newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT);
WordnetSynonymParser parser = new WordnetSynonymParser(dedup, expand, analyzer);
File synonymFile = new File(synonyms);
if (synonymFile.exists()) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(synonyms), decoder));
} else {
List<String> files = StrUtils.splitFileNames(synonyms);
for (String file : files) {
decoder.reset();
parser.add(new InputStreamReader(loader.openResource(file), decoder));
}
}
return parser.build();
}
// in core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
Override
public void reset(Reader input) throws IOException {
try {
super.reset(input);
input = super.input;
char[] buf = new char[32];
int len = input.read(buf);
this.startOfs = correctOffset(0);
this.endOfs = correctOffset(len);
String v = new String(buf, 0, len);
try {
switch (type) {
case INTEGER:
ts.setIntValue(Integer.parseInt(v));
break;
case FLOAT:
ts.setFloatValue(Float.parseFloat(v));
break;
case LONG:
ts.setLongValue(Long.parseLong(v));
break;
case DOUBLE:
ts.setDoubleValue(Double.parseDouble(v));
break;
case DATE:
ts.setLongValue(dateField.parseMath(null, v).getTime());
break;
default:
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unknown type for trie field");
}
} catch (NumberFormatException nfe) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Invalid Number: " + v);
}
} catch (IOException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to create TrieIndexTokenizer", e);
}
}
// in core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
Override
public void close() throws IOException {
super.close();
ts.close();
}
// in core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
Override
public void reset() throws IOException {
super.reset();
ts.reset();
}
// in core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
Override
public boolean incrementToken() throws IOException {
if (ts.incrementToken()) {
ofsAtt.setOffset(startOfs, endOfs);
return true;
}
return false;
}
// in core/src/java/org/apache/solr/analysis/TrieTokenizerFactory.java
Override
public void end() throws IOException {
ts.end();
ofsAtt.setOffset(endOfs, endOfs);
}
// in core/src/java/org/apache/solr/analysis/ReversedWildcardFilter.java
Override
public boolean incrementToken() throws IOException {
if( save != null ) {
// clearAttributes(); // not currently necessary
restoreState(save);
save = null;
return true;
}
if (!input.incrementToken()) return false;
// pass through zero-length terms
int oldLen = termAtt.length();
if (oldLen ==0) return true;
int origOffset = posAtt.getPositionIncrement();
if (withOriginal == true){
posAtt.setPositionIncrement(0);
save = captureState();
}
char [] buffer = termAtt.resizeBuffer(oldLen + 1);
buffer[oldLen] = markerChar;
reverse(buffer, 0, oldLen + 1);
posAtt.setPositionIncrement(origOffset);
termAtt.copyBuffer(buffer, 0, oldLen +1);
return true;
}
// in core/src/java/org/apache/solr/analysis/TokenizerChain.java
Override
protected void reset(Reader reader) throws IOException {
// the tokenizers are currently reset by the indexing process, so only
// the tokenizer needs to be reset.
Reader r = initReader(reader);
super.reset(r);
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
public static void main(String[] args) throws IOException {
Reader in = new LegacyHTMLStripCharFilter(
CharReader.get(new InputStreamReader(System.in)));
int ch;
while ( (ch=in.read()) != -1 ) System.out.print((char)ch);
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int next() throws IOException {
int len = pushed.length();
if (len>0) {
int ch = pushed.charAt(len-1);
pushed.setLength(len-1);
return ch;
}
numRead++;
return input.read();
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int nextSkipWS() throws IOException {
int ch=next();
while(isSpace(ch)) ch=next();
return ch;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int peek() throws IOException {
int len = pushed.length();
if (len>0) {
return pushed.charAt(len-1);
}
numRead++;
int ch = input.read();
push(ch);
return ch;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private void saveState() throws IOException {
lastMark = numRead;
input.mark(readAheadLimit);
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private void restoreState() throws IOException {
input.reset();
pushed.setLength(0);
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readNumericEntity() throws IOException {
// "&#" has already been read at this point
int eaten = 2;
// is this decimal, hex, or nothing at all.
int ch = next();
int base=10;
boolean invalid=false;
sb.setLength(0);
if (isDigit(ch)) {
// decimal character entity
sb.append((char)ch);
for (int i=0; i<10; i++) {
ch = next();
if (isDigit(ch)) {
sb.append((char)ch);
} else {
break;
}
}
} else if (ch=='x') {
eaten++;
// hex character entity
base=16;
sb.setLength(0);
for (int i=0; i<10; i++) {
ch = next();
if (isHex(ch)) {
sb.append((char)ch);
} else {
break;
}
}
} else {
return MISMATCH;
}
// In older HTML, an entity may not have always been terminated
// with a semicolon. We'll also treat EOF or whitespace as terminating
// the entity.
try {
if (ch==';' || ch==-1) {
// do not account for the eaten ";" due to the fact that we do output a char
numWhitespace = sb.length() + eaten;
return Integer.parseInt(sb.toString(), base);
}
// if whitespace terminated the entity, we need to return
// that whitespace on the next call to read().
if (isSpace(ch)) {
push(ch);
numWhitespace = sb.length() + eaten;
return Integer.parseInt(sb.toString(), base);
}
} catch (NumberFormatException e) {
return MISMATCH;
}
// Not an entity...
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readEntity() throws IOException {
int ch = next();
if (ch=='#') return readNumericEntity();
//read an entity reference
// for an entity reference, require the ';' for safety.
// otherwise we may try and convert part of some company
// names to an entity. "Alpha&Beta Corp" for instance.
//
// TODO: perhaps I should special case some of the
// more common ones like & to make the ';' optional...
sb.setLength(0);
sb.append((char)ch);
for (int i=0; i< safeReadAheadLimit; i++) {
ch=next();
if (Character.isLetter(ch)) {
sb.append((char)ch);
} else {
break;
}
}
if (ch==';') {
String entity=sb.toString();
Character entityChar = entityTable.get(entity);
if (entityChar!=null) {
numWhitespace = entity.length() + 1 ;
return entityChar.charValue();
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readBang(boolean inScript) throws IOException {
// at this point, "<!" has been read
int ret = readComment(inScript);
if (ret==MATCH) return MATCH;
if ((numRead - lastMark) < safeReadAheadLimit || peek() == '>' ) {
int ch = next();
if (ch=='>') return MATCH;
// if it starts with <! and isn't a comment,
// simply read until ">"
//since we did readComment already, it may be the case that we are already deep into the read ahead buffer
//so, we may need to abort sooner
while ((numRead - lastMark) < safeReadAheadLimit) {
ch = next();
if (ch=='>') {
return MATCH;
}
else if (ch<0) {
return MISMATCH;
}
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readComment(boolean inScript) throws IOException {
// at this point "<!" has been read
int ch = next();
if (ch!='-') {
// not a comment
push(ch);
return MISMATCH;
}
ch = next();
if (ch!='-') {
// not a comment
push(ch);
push('-');
return MISMATCH;
}
/*two extra calls to next() here, so make sure we don't read past our mark*/
while ((numRead - lastMark) < safeReadAheadLimit -3 ) {
ch = next();
if (ch<0) return MISMATCH;
if (ch=='-') {
ch = next();
if (ch<0) return MISMATCH;
if (ch!='-') {
push(ch);
continue;
}
ch = next();
if (ch<0) return MISMATCH;
if (ch!='>') {
push(ch);
push('-');
continue;
}
return MATCH;
} else if ((ch=='\'' || ch=='"') && inScript) {
push(ch);
int ret=readScriptString();
// if this wasn't a string, there's not much we can do
// at this point without having a stack of stream states in
// order to "undo" just the latest.
} else if (ch=='<') {
eatSSI();
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readTag() throws IOException {
// at this point '<' has already been read
int ch = next();
if (!isAlpha(ch)) {
push(ch);
return MISMATCH;
}
sb.setLength(0);
sb.append((char)ch);
while((numRead - lastMark) < safeReadAheadLimit) {
ch = next();
if (isIdChar(ch)) {
sb.append((char)ch);
} else if (ch=='/') {
// Hmmm, a tag can close with "/>" as well as "/ >"
// read end tag '/>' or '/ >', etc
return nextSkipWS()=='>' ? MATCH : MISMATCH;
} else {
break;
}
}
if (escapedTags!=null && escapedTags.contains(sb.toString())){
//if this is a reservedTag, then keep it
return MISMATCH;
}
// After the tag id, there needs to be either whitespace or
// '>'
if ( !(ch=='>' || isSpace(ch)) ) {
return MISMATCH;
}
if (ch!='>') {
// process attributes
while ((numRead - lastMark) < safeReadAheadLimit) {
ch=next();
if (isSpace(ch)) {
continue;
} else if (isFirstIdChar(ch)) {
push(ch);
int ret = readAttr2();
if (ret==MISMATCH) return ret;
} else if (ch=='/') {
// read end tag '/>' or '/ >', etc
return nextSkipWS()=='>' ? MATCH : MISMATCH;
} else if (ch=='>') {
break;
} else {
return MISMATCH;
}
}
if ((numRead - lastMark) >= safeReadAheadLimit){
return MISMATCH;//exit out if we exceeded the buffer
}
}
// We only get to this point after we have read the
// entire tag. Now let's see if it's a special tag.
String name=sb.toString();
if (name.equalsIgnoreCase("script") || name.equalsIgnoreCase("style")) {
// The content of script and style elements is
// CDATA in HTML 4 but PCDATA in XHTML.
/* From HTML4:
Although the STYLE and SCRIPT elements use CDATA for their data model,
for these elements, CDATA must be handled differently by user agents.
Markup and entities must be treated as raw text and passed to the application
as is. The first occurrence of the character sequence "</" (end-tag open
delimiter) is treated as terminating the end of the element's content. In
valid documents, this would be the end tag for the element.
*/
// discard everything until endtag is hit (except
// if it occurs in a comment.
// reset the stream mark to here, since we know that we sucessfully matched
// a tag, and if we can't find the end tag, this is where we will want
// to roll back to.
saveState();
pushed.setLength(0);
return findEndTag();
}
return MATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
int findEndTag() throws IOException {
while ((numRead - lastMark) < safeReadAheadLimit) {
int ch = next();
if (ch=='<') {
ch = next();
// skip looking for end-tag in comments
if (ch=='!') {
int ret = readBang(true);
if (ret==MATCH) continue;
// yikes... what now? It wasn't a comment, but I can't get
// back to the state I was at. Just continue from where I
// am I guess...
continue;
}
// did we match "</"
if (ch!='/') {
push(ch);
continue;
}
int ret = readName(false);
if (ret==MISMATCH) return MISMATCH;
ch=nextSkipWS();
if (ch!='>') return MISMATCH;
return MATCH;
} else if (ch=='\'' || ch=='"') {
// read javascript string to avoid a false match.
push(ch);
int ret = readScriptString();
// what to do about a non-match (non-terminated string?)
// play it safe and index the rest of the data I guess...
if (ret==MISMATCH) return MISMATCH;
} else if (ch<0) {
return MISMATCH;
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readScriptString() throws IOException {
int quoteChar = next();
if (quoteChar!='\'' && quoteChar!='"') return MISMATCH;
while((numRead - lastMark) < safeReadAheadLimit) {
int ch = next();
if (ch==quoteChar) return MATCH;
else if (ch=='\\') {
ch=next();
} else if (ch<0) {
return MISMATCH;
} else if (ch=='<') {
eatSSI();
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readName(boolean checkEscaped) throws IOException {
StringBuilder builder = (checkEscaped && escapedTags!=null) ? new StringBuilder() : null;
int ch = next();
if (builder!=null) builder.append((char)ch);
if (!isFirstIdChar(ch)) return MISMATCH;
ch = next();
if (builder!=null) builder.append((char)ch);
while(isIdChar(ch)) {
ch=next();
if (builder!=null) builder.append((char)ch);
}
if (ch!=-1) {
push(ch);
}
//strip off the trailing >
if (builder!=null && escapedTags.contains(builder.substring(0, builder.length() - 1))){
return MISMATCH;
}
return MATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readAttr2() throws IOException {
if ((numRead - lastMark < safeReadAheadLimit)) {
int ch = next();
if (!isFirstIdChar(ch)) return MISMATCH;
ch = next();
while(isIdChar(ch) && ((numRead - lastMark) < safeReadAheadLimit)){
ch=next();
}
if (isSpace(ch)) ch = nextSkipWS();
// attributes may not have a value at all!
// if (ch != '=') return MISMATCH;
if (ch != '=') {
push(ch);
return MATCH;
}
int quoteChar = nextSkipWS();
if (quoteChar=='"' || quoteChar=='\'') {
while ((numRead - lastMark) < safeReadAheadLimit) {
ch = next();
if (ch<0) return MISMATCH;
else if (ch=='<') {
eatSSI();
}
else if (ch==quoteChar) {
return MATCH;
//} else if (ch=='<') {
// return MISMATCH;
}
}
} else {
// unquoted attribute
while ((numRead - lastMark) < safeReadAheadLimit) {
ch = next();
if (ch<0) return MISMATCH;
else if (isSpace(ch)) {
push(ch);
return MATCH;
} else if (ch=='>') {
push(ch);
return MATCH;
} else if (ch=='<') {
eatSSI();
}
}
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int eatSSI() throws IOException {
// at this point, only a "<" was read.
// on a mismatch, push back the last char so that if it was
// a quote that closes the attribute, it will be re-read and matched.
int ch = next();
if (ch!='!') {
push(ch);
return MISMATCH;
}
ch=next();
if (ch!='-') {
push(ch);
return MISMATCH;
}
ch=next();
if (ch!='-') {
push(ch);
return MISMATCH;
}
ch=next();
if (ch!='#') {
push(ch);
return MISMATCH;
}
push('#'); push('-'); push('-');
return readComment(false);
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
private int readProcessingInstruction() throws IOException {
// "<?" has already been read
while ((numRead - lastMark) < safeReadAheadLimit) {
int ch = next();
if (ch=='?' && peek()=='>') {
next();
return MATCH;
} else if (ch==-1) {
return MISMATCH;
}
}
return MISMATCH;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
Override
public int read() throws IOException {
// TODO: Do we ever want to preserve CDATA sections?
// where do we have to worry about them?
// <![ CDATA [ unescaped markup ]]>
if (numWhitespace > 0){
numEaten += numWhitespace;
addOffCorrectMap(numReturned, numEaten);
numWhitespace = 0;
}
numReturned++;
//do not limit this one by the READAHEAD
while(true) {
int lastNumRead = numRead;
int ch = next();
switch (ch) {
case '&':
saveState();
ch = readEntity();
if (ch>=0) return ch;
if (ch==MISMATCH) {
restoreState();
return '&';
}
break;
case '<':
saveState();
ch = next();
int ret = MISMATCH;
if (ch=='!') {
ret = readBang(false);
} else if (ch=='/') {
ret = readName(true);
if (ret==MATCH) {
ch=nextSkipWS();
ret= ch=='>' ? MATCH : MISMATCH;
}
} else if (isAlpha(ch)) {
push(ch);
ret = readTag();
} else if (ch=='?') {
ret = readProcessingInstruction();
}
// matched something to be discarded, so break
// from this case and continue in the loop
if (ret==MATCH) {
//break;//was
//return whitespace from
numWhitespace = (numRead - lastNumRead) - 1;//tack on the -1 since we are returning a space right now
return ' ';
}
// didn't match any HTML constructs, so roll back
// the stream state and just return '<'
restoreState();
return '<';
default: return ch;
}
}
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
Override
public int read(char cbuf[], int off, int len) throws IOException {
int i=0;
for (i=0; i<len; i++) {
int ch = read();
if (ch==-1) break;
cbuf[off++] = (char)ch;
}
if (i==0) {
if (len==0) return 0;
return -1;
}
return i;
}
// in core/src/java/org/apache/solr/analysis/LegacyHTMLStripCharFilter.java
Override
public void close() throws IOException {
input.close();
}
// in core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
Override
public NamedList<Object> request(SolrRequest request) throws SolrServerException, IOException
{
String path = request.getPath();
if( path == null || !path.startsWith( "/" ) ) {
path = "/select";
}
// Check for cores action
SolrCore core = coreContainer.getCore( coreName );
if( core == null ) {
throw new SolrException( SolrException.ErrorCode.SERVER_ERROR,
"No such core: " + coreName );
}
SolrParams params = request.getParams();
if( params == null ) {
params = new ModifiableSolrParams();
}
// Extract the handler from the path or params
SolrRequestHandler handler = core.getRequestHandler( path );
if( handler == null ) {
if( "/select".equals( path ) || "/select/".equalsIgnoreCase( path) ) {
String qt = params.get( CommonParams.QT );
handler = core.getRequestHandler( qt );
if( handler == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+qt);
}
}
// Perhaps the path is to manage the cores
if( handler == null &&
coreContainer != null &&
path.equals( coreContainer.getAdminPath() ) ) {
handler = coreContainer.getMultiCoreHandler();
}
}
if( handler == null ) {
core.close();
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+path );
}
SolrQueryRequest req = null;
try {
req = _parser.buildRequestFrom( core, params, request.getContentStreams() );
req.getContext().put( "path", path );
SolrQueryResponse rsp = new SolrQueryResponse();
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
core.execute( handler, req, rsp );
if( rsp.getException() != null ) {
if(rsp.getException() instanceof SolrException) {
throw rsp.getException();
}
throw new SolrServerException( rsp.getException() );
}
// Check if this should stream results
if( request.getStreamingResponseCallback() != null ) {
try {
final StreamingResponseCallback callback = request.getStreamingResponseCallback();
BinaryResponseWriter.Resolver resolver =
new BinaryResponseWriter.Resolver( req, rsp.getReturnFields())
{
@Override
public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException {
// write an empty list...
SolrDocumentList docs = new SolrDocumentList();
docs.setNumFound( ctx.docs.matches() );
docs.setStart( ctx.docs.offset() );
docs.setMaxScore( ctx.docs.maxScore() );
codec.writeSolrDocumentList( docs );
// This will transform
writeResultsBody( ctx, codec );
}
};
ByteArrayOutputStream out = new ByteArrayOutputStream();
new JavaBinCodec(resolver) {
@Override
public void writeSolrDocument(SolrDocument doc) throws IOException {
callback.streamSolrDocument( doc );
//super.writeSolrDocument( doc, fields );
}
@Override
public void writeSolrDocumentList(SolrDocumentList docs) throws IOException {
if( docs.size() > 0 ) {
SolrDocumentList tmp = new SolrDocumentList();
tmp.setMaxScore( docs.getMaxScore() );
tmp.setNumFound( docs.getNumFound() );
tmp.setStart( docs.getStart() );
docs = tmp;
}
callback.streamDocListInfo( docs.getNumFound(), docs.getStart(), docs.getMaxScore() );
super.writeSolrDocumentList(docs);
}
}.marshal(rsp.getValues(), out);
InputStream in = new ByteArrayInputStream(out.toByteArray());
return (NamedList<Object>) new JavaBinCodec(resolver).unmarshal(in);
}
catch (Exception ex) {
throw new RuntimeException(ex);
}
}
// Now write it out
NamedList<Object> normalized = getParsedResponse(req, rsp);
return normalized;
}
catch( IOException iox ) {
throw iox;
}
catch( SolrException sx ) {
throw sx;
}
catch( Exception ex ) {
throw new SolrServerException( ex );
}
finally {
if (req != null) req.close();
core.close();
SolrRequestInfo.clearRequestInfo();
}
}
// in core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
Override
public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException {
// write an empty list...
SolrDocumentList docs = new SolrDocumentList();
docs.setNumFound( ctx.docs.matches() );
docs.setStart( ctx.docs.offset() );
docs.setMaxScore( ctx.docs.maxScore() );
codec.writeSolrDocumentList( docs );
// This will transform
writeResultsBody( ctx, codec );
}
// in core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
Override
public void writeSolrDocument(SolrDocument doc) throws IOException {
callback.streamSolrDocument( doc );
//super.writeSolrDocument( doc, fields );
}
// in core/src/java/org/apache/solr/client/solrj/embedded/EmbeddedSolrServer.java
Override
public void writeSolrDocumentList(SolrDocumentList docs) throws IOException {
if( docs.size() > 0 ) {
SolrDocumentList tmp = new SolrDocumentList();
tmp.setMaxScore( docs.getMaxScore() );
tmp.setNumFound( docs.getNumFound() );
tmp.setStart( docs.getStart() );
docs = tmp;
}
callback.streamDocListInfo( docs.getNumFound(), docs.getStart(), docs.getMaxScore() );
super.writeSolrDocumentList(docs);
}
// in core/src/java/org/apache/solr/client/solrj/embedded/JettySolrRunner.java
Override
public void service(HttpServletRequest req, HttpServletResponse res)
throws IOException {
res.sendError(404, "Can not find: " + req.getRequestURI());
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void close() throws IOException {
writer.flushBuffer();
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void indent() throws IOException {
if (doIndent) indent(level);
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void indent(int lev) throws IOException {
writer.write(indentChars, 0, Math.min((lev<<1)+1, indentChars.length));
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public final void writeVal(String name, Object val) throws IOException {
// if there get to be enough types, perhaps hashing on the type
// to get a handler might be faster (but types must be exact to do that...)
// go in order of most common to least common
if (val==null) {
writeNull(name);
} else if (val instanceof String) {
writeStr(name, val.toString(), true);
// micro-optimization... using toString() avoids a cast first
} else if (val instanceof IndexableField) {
IndexableField f = (IndexableField)val;
SchemaField sf = schema.getFieldOrNull( f.name() );
if( sf != null ) {
sf.getType().write(this, name, f);
}
else {
writeStr(name, f.stringValue(), true);
}
} else if (val instanceof Number) {
if (val instanceof Integer) {
writeInt(name, val.toString());
} else if (val instanceof Long) {
writeLong(name, val.toString());
} else if (val instanceof Float) {
// we pass the float instead of using toString() because
// it may need special formatting. same for double.
writeFloat(name, ((Float)val).floatValue());
} else if (val instanceof Double) {
writeDouble(name, ((Double)val).doubleValue());
} else if (val instanceof Short) {
writeInt(name, val.toString());
} else if (val instanceof Byte) {
writeInt(name, val.toString());
} else {
// default... for debugging only
writeStr(name, val.getClass().getName() + ':' + val.toString(), true);
}
} else if (val instanceof Boolean) {
writeBool(name, val.toString());
} else if (val instanceof Date) {
writeDate(name,(Date)val);
} else if (val instanceof Document) {
SolrDocument doc = toSolrDocument( (Document)val );
DocTransformer transformer = returnFields.getTransformer();
if( transformer != null ) {
TransformContext context = new TransformContext();
context.req = req;
transformer.setContext(context);
transformer.transform(doc, -1);
}
writeSolrDocument(name, doc, returnFields, 0 );
} else if (val instanceof SolrDocument) {
writeSolrDocument(name, (SolrDocument)val, returnFields, 0);
} else if (val instanceof ResultContext) {
// requires access to IndexReader
writeDocuments(name, (ResultContext)val, returnFields);
} else if (val instanceof DocList) {
// Should not happen normally
ResultContext ctx = new ResultContext();
ctx.docs = (DocList)val;
writeDocuments(name, ctx, returnFields);
// }
// else if (val instanceof DocSet) {
// how do we know what fields to read?
// todo: have a DocList/DocSet wrapper that
// restricts the fields to write...?
} else if (val instanceof SolrDocumentList) {
writeSolrDocumentList(name, (SolrDocumentList)val, returnFields);
} else if (val instanceof Map) {
writeMap(name, (Map)val, false, true);
} else if (val instanceof NamedList) {
writeNamedList(name, (NamedList)val);
} else if (val instanceof Iterable) {
writeArray(name,((Iterable)val).iterator());
} else if (val instanceof Object[]) {
writeArray(name,(Object[])val);
} else if (val instanceof Iterator) {
writeArray(name,(Iterator)val);
} else if (val instanceof byte[]) {
byte[] arr = (byte[])val;
writeByteArr(name, arr, 0, arr.length);
} else if (val instanceof BytesRef) {
BytesRef arr = (BytesRef)val;
writeByteArr(name, arr.bytes, arr.offset, arr.length);
} else {
// default... for debugging only
writeStr(name, val.getClass().getName() + ':' + val.toString(), true);
}
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public final void writeSolrDocumentList(String name, SolrDocumentList docs, ReturnFields returnFields) throws IOException
{
writeStartDocumentList(name, docs.getStart(), docs.size(), docs.getNumFound(), docs.getMaxScore() );
for( int i=0; i<docs.size(); i++ ) {
writeSolrDocument( null, docs.get(i), returnFields, i );
}
writeEndDocumentList();
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public final void writeDocuments(String name, ResultContext res, ReturnFields fields ) throws IOException {
DocList ids = res.docs;
TransformContext context = new TransformContext();
context.query = res.query;
context.wantsScores = fields.wantsScore() && ids.hasScores();
context.req = req;
writeStartDocumentList(name, ids.offset(), ids.size(), ids.matches(),
context.wantsScores ? new Float(ids.maxScore()) : null );
DocTransformer transformer = fields.getTransformer();
context.searcher = req.getSearcher();
context.iterator = ids.iterator();
if( transformer != null ) {
transformer.setContext( context );
}
int sz = ids.size();
Set<String> fnames = fields.getLuceneFieldNames();
for (int i=0; i<sz; i++) {
int id = context.iterator.nextDoc();
Document doc = context.searcher.doc(id, fnames);
SolrDocument sdoc = toSolrDocument( doc );
if( transformer != null ) {
transformer.transform( sdoc, id);
}
writeSolrDocument( null, sdoc, returnFields, i );
}
if( transformer != null ) {
transformer.setContext( null );
}
writeEndDocumentList();
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeArray(String name, Object[] val) throws IOException {
writeArray(name, Arrays.asList(val).iterator());
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeInt(String name, int val) throws IOException {
writeInt(name,Integer.toString(val));
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeLong(String name, long val) throws IOException {
writeLong(name,Long.toString(val));
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeBool(String name, boolean val) throws IOException {
writeBool(name,Boolean.toString(val));
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeFloat(String name, float val) throws IOException {
String s = Float.toString(val);
// If it's not a normal number, write the value as a string instead.
// The following test also handles NaN since comparisons are always false.
if (val > Float.NEGATIVE_INFINITY && val < Float.POSITIVE_INFINITY) {
writeFloat(name,s);
} else {
writeStr(name,s,false);
}
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeDouble(String name, double val) throws IOException {
String s = Double.toString(val);
// If it's not a normal number, write the value as a string instead.
// The following test also handles NaN since comparisons are always false.
if (val > Double.NEGATIVE_INFINITY && val < Double.POSITIVE_INFINITY) {
writeDouble(name,s);
} else {
writeStr(name,s,false);
}
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeDate(String name, Date val) throws IOException {
writeDate(name, DateField.formatExternal(val));
}
// in core/src/java/org/apache/solr/response/TextResponseWriter.java
public void writeByteArr(String name, byte[] buf, int offset, int len) throws IOException {
writeStr(name, Base64.byteArrayToBase64(buf, offset, len), false);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
JSONWriter w = new JSONWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeResponse() throws IOException {
if(wrapperFunction!=null) {
writer.write(wrapperFunction + "(");
}
Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER);
if(omitHeader != null && omitHeader) rsp.getValues().remove("responseHeader");
writeNamedList(null, rsp.getValues());
if(wrapperFunction!=null) {
writer.write(')');
}
if (doIndent) writer.write('\n'); // ending with a newline looks much better from the command line
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeKey(String fname, boolean needsEscaping) throws IOException {
writeStr(null, fname, needsEscaping);
writer.write(':');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeNamedListAsMapMangled(String name, NamedList val) throws IOException {
int sz = val.size();
writeMapOpener(sz);
incLevel();
// In JSON objects (maps) we can't have null keys or duplicates...
// map null to "" and append a qualifier to duplicates.
//
// a=123,a=456 will be mapped to {a=1,a__1=456}
// Disad: this is ambiguous since a real key could be called a__1
//
// Another possible mapping could aggregate multiple keys to an array:
// a=123,a=456 maps to a=[123,456]
// Disad: this is ambiguous with a real single value that happens to be an array
//
// Both of these mappings have ambiguities.
HashMap<String,Integer> repeats = new HashMap<String,Integer>(4);
boolean first=true;
for (int i=0; i<sz; i++) {
String key = val.getName(i);
if (key==null) key="";
if (first) {
first=false;
repeats.put(key,0);
} else {
writeMapSeparator();
Integer repeatCount = repeats.get(key);
if (repeatCount==null) {
repeats.put(key,0);
} else {
String newKey = key;
int newCount = repeatCount;
do { // avoid generated key clashing with a real key
newKey = key + ' ' + (++newCount);
repeatCount = repeats.get(newKey);
} while (repeatCount != null);
repeats.put(key,newCount);
key = newKey;
}
}
indent();
writeKey(key, true);
writeVal(key,val.getVal(i));
}
decLevel();
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeNamedListAsMapWithDups(String name, NamedList val) throws IOException {
int sz = val.size();
writeMapOpener(sz);
incLevel();
for (int i=0; i<sz; i++) {
if (i!=0) {
writeMapSeparator();
}
String key = val.getName(i);
if (key==null) key="";
indent();
writeKey(key, true);
writeVal(key,val.getVal(i));
}
decLevel();
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeNamedListAsArrMap(String name, NamedList val) throws IOException {
int sz = val.size();
indent();
writeArrayOpener(sz);
incLevel();
boolean first=true;
for (int i=0; i<sz; i++) {
String key = val.getName(i);
if (first) {
first=false;
} else {
writeArraySeparator();
}
indent();
if (key==null) {
writeVal(null,val.getVal(i));
} else {
writeMapOpener(1);
writeKey(key, true);
writeVal(key,val.getVal(i));
writeMapCloser();
}
}
decLevel();
writeArrayCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeNamedListAsArrArr(String name, NamedList val) throws IOException {
int sz = val.size();
indent();
writeArrayOpener(sz);
incLevel();
boolean first=true;
for (int i=0; i<sz; i++) {
String key = val.getName(i);
if (first) {
first=false;
} else {
writeArraySeparator();
}
indent();
/*** if key is null, just write value???
if (key==null) {
writeVal(null,val.getVal(i));
} else {
***/
writeArrayOpener(1);
incLevel();
if (key==null) {
writeNull(null);
} else {
writeStr(null, key, true);
}
writeArraySeparator();
writeVal(key,val.getVal(i));
decLevel();
writeArrayCloser();
}
decLevel();
writeArrayCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected void writeNamedListAsFlat(String name, NamedList val) throws IOException {
int sz = val.size();
writeArrayOpener(sz);
incLevel();
for (int i=0; i<sz; i++) {
if (i!=0) {
writeArraySeparator();
}
String key = val.getName(i);
indent();
if (key==null) {
writeNull(null);
} else {
writeStr(null, key, true);
}
writeArraySeparator();
writeVal(key, val.getVal(i));
}
decLevel();
writeArrayCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeNamedList(String name, NamedList val) throws IOException {
if (val instanceof SimpleOrderedMap) {
writeNamedListAsMapWithDups(name,val);
} else if (namedListStyle==JSON_NL_FLAT) {
writeNamedListAsFlat(name,val);
} else if (namedListStyle==JSON_NL_MAP){
writeNamedListAsMapWithDups(name,val);
} else if (namedListStyle==JSON_NL_ARROFARR) {
writeNamedListAsArrArr(name,val);
} else if (namedListStyle==JSON_NL_ARROFMAP) {
writeNamedListAsArrMap(name,val);
}
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx) throws IOException {
if( idx > 0 ) {
writeArraySeparator();
}
indent();
writeMapOpener(doc.size());
incLevel();
boolean first=true;
for (String fname : doc.getFieldNames()) {
if (!returnFields.wantsField(fname)) {
continue;
}
if (first) {
first=false;
}
else {
writeMapSeparator();
}
indent();
writeKey(fname, true);
Object val = doc.getFieldValue(fname);
if (val instanceof Collection) {
writeVal(fname, val);
} else {
// if multivalued field, write single value as an array
SchemaField sf = schema.getFieldOrNull(fname);
if (sf != null && sf.multiValued()) {
writeArrayOpener(-1); // no trivial way to determine array size
writeVal(fname, val);
writeArrayCloser();
} else {
writeVal(fname, val);
}
}
}
decLevel();
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeStartDocumentList(String name,
long start, int size, long numFound, Float maxScore) throws IOException
{
writeMapOpener((maxScore==null) ? 3 : 4);
incLevel();
writeKey("numFound",false);
writeLong(null,numFound);
writeMapSeparator();
writeKey("start",false);
writeLong(null,start);
if (maxScore!=null) {
writeMapSeparator();
writeKey("maxScore",false);
writeFloat(null,maxScore);
}
writeMapSeparator();
// indent();
writeKey("docs",false);
writeArrayOpener(size);
incLevel();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeEndDocumentList() throws IOException
{
decLevel();
writeArrayCloser();
decLevel();
indent();
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeMapOpener(int size) throws IOException, IllegalArgumentException {
writer.write('{');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeMapSeparator() throws IOException {
writer.write(',');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeMapCloser() throws IOException {
writer.write('}');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeArrayOpener(int size) throws IOException, IllegalArgumentException {
writer.write('[');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeArraySeparator() throws IOException {
writer.write(',');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
public void writeArrayCloser() throws IOException {
writer.write(']');
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
// it might be more efficient to use a stringbuilder or write substrings
// if writing chars to the stream is slow.
if (needsEscaping) {
/* http://www.ietf.org/internet-drafts/draft-crockford-jsonorg-json-04.txt
All Unicode characters may be placed within
the quotation marks except for the characters which must be
escaped: quotation mark, reverse solidus, and the control
characters (U+0000 through U+001F).
*/
writer.write('"');
for (int i=0; i<val.length(); i++) {
char ch = val.charAt(i);
if ((ch > '#' && ch != '\\' && ch < '\u2028') || ch == ' ') { // fast path
writer.write(ch);
continue;
}
switch(ch) {
case '"':
case '\\':
writer.write('\\');
writer.write(ch);
break;
case '\r': writer.write('\\'); writer.write('r'); break;
case '\n': writer.write('\\'); writer.write('n'); break;
case '\t': writer.write('\\'); writer.write('t'); break;
case '\b': writer.write('\\'); writer.write('b'); break;
case '\f': writer.write('\\'); writer.write('f'); break;
case '\u2028': // fallthrough
case '\u2029':
unicodeEscape(writer,ch);
break;
// case '/':
default: {
if (ch <= 0x1F) {
unicodeEscape(writer,ch);
} else {
writer.write(ch);
}
}
}
}
writer.write('"');
} else {
writer.write('"');
writer.write(val);
writer.write('"');
}
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeMap(String name, Map val, boolean excludeOuter, boolean isFirstVal) throws IOException {
if (!excludeOuter) {
writeMapOpener(val.size());
incLevel();
isFirstVal=true;
}
boolean doIndent = excludeOuter || val.size() > 1;
for (Map.Entry entry : (Set<Map.Entry>)val.entrySet()) {
Object e = entry.getKey();
String k = e==null ? "" : e.toString();
Object v = entry.getValue();
if (isFirstVal) {
isFirstVal=false;
} else {
writeMapSeparator();
}
if (doIndent) indent();
writeKey(k,true);
writeVal(k,v);
}
if (!excludeOuter) {
decLevel();
writeMapCloser();
}
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeArray(String name, Iterator val) throws IOException {
writeArrayOpener(-1); // no trivial way to determine array size
incLevel();
boolean first=true;
while( val.hasNext() ) {
if( !first ) indent();
writeVal(null, val.next());
if( val.hasNext() ) {
writeArraySeparator();
}
first=false;
}
decLevel();
writeArrayCloser();
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeNull(String name) throws IOException {
writer.write("null");
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeInt(String name, String val) throws IOException {
writer.write(val);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeLong(String name, String val) throws IOException {
writer.write(val);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeBool(String name, String val) throws IOException {
writer.write(val);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeFloat(String name, String val) throws IOException {
writer.write(val);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeDouble(String name, String val) throws IOException {
writer.write(val);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeDate(String name, String val) throws IOException {
writeStr(name, val, false);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
protected static void unicodeEscape(Appendable out, int ch) throws IOException {
out.append('\\');
out.append('u');
out.append(hexdigits[(ch>>>12) ]);
out.append(hexdigits[(ch>>>8) & 0xf]);
out.append(hexdigits[(ch>>>4) & 0xf]);
out.append(hexdigits[(ch) & 0xf]);
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeFloat(String name, float val) throws IOException {
if (Float.isNaN(val)) {
writer.write(getNaN());
} else if (Float.isInfinite(val)) {
if (val < 0.0f)
writer.write('-');
writer.write(getInf());
} else {
writeFloat(name, Float.toString(val));
}
}
// in core/src/java/org/apache/solr/response/JSONResponseWriter.java
Override
public void writeDouble(String name, double val) throws IOException {
if (Double.isNaN(val)) {
writer.write(getNaN());
} else if (Double.isInfinite(val)) {
if (val < 0.0)
writer.write('-');
writer.write(getInf());
} else {
writeDouble(name, Double.toString(val));
}
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
PHPWriter w = new PHPWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeNamedList(String name, NamedList val) throws IOException {
writeNamedListAsMapMangled(name,val);
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeMapOpener(int size) throws IOException {
writer.write("array(");
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeMapCloser() throws IOException {
writer.write(')');
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeArrayOpener(int size) throws IOException {
writer.write("array(");
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeArrayCloser() throws IOException {
writer.write(')');
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeNull(String name) throws IOException {
writer.write("null");
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
protected void writeKey(String fname, boolean needsEscaping) throws IOException {
writeStr(null, fname, needsEscaping);
writer.write('=');
writer.write('>');
}
// in core/src/java/org/apache/solr/response/PHPResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
if (needsEscaping) {
writer.write('\'');
for (int i=0; i<val.length(); i++) {
char ch = val.charAt(i);
switch (ch) {
case '\'':
case '\\': writer.write('\\'); writer.write(ch); break;
default:
writer.write(ch);
}
}
writer.write('\'');
} else {
writer.write('\'');
writer.write(val);
writer.write('\'');
}
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
CSVWriter w = new CSVWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
public void freeze() throws IOException {
if (cw.size() > 0) {
flush();
result = cw.getInternalBuf();
resultLen = cw.size();
} else {
result = buf;
resultLen = pos;
}
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
public void writeResponse() throws IOException {
SolrParams params = req.getParams();
strategy = new CSVStrategy(',', '"', CSVStrategy.COMMENTS_DISABLED, CSVStrategy.ESCAPE_DISABLED, false, false, false, true);
CSVStrategy strat = strategy;
String sep = params.get(CSV_SEPARATOR);
if (sep!=null) {
if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid separator:'"+sep+"'");
strat.setDelimiter(sep.charAt(0));
}
String nl = params.get(CSV_NEWLINE);
if (nl!=null) {
if (nl.length()==0) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid newline:'"+nl+"'");
strat.setPrinterNewline(nl);
}
String encapsulator = params.get(CSV_ENCAPSULATOR);
String escape = params.get(CSV_ESCAPE);
if (encapsulator!=null) {
if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid encapsulator:'"+encapsulator+"'");
strat.setEncapsulator(encapsulator.charAt(0));
}
if (escape!=null) {
if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid escape:'"+escape+"'");
strat.setEscape(escape.charAt(0));
if (encapsulator == null) {
strat.setEncapsulator( CSVStrategy.ENCAPSULATOR_DISABLED);
}
}
if (strat.getEscape() == '\\') {
// If the escape is the standard backslash, then also enable
// unicode escapes (it's harmless since 'u' would not otherwise
// be escaped.
strat.setUnicodeEscapeInterpretation(true);
}
printer = new CSVPrinter(writer, strategy);
CSVStrategy mvStrategy = new CSVStrategy(strategy.getDelimiter(), CSVStrategy.ENCAPSULATOR_DISABLED, CSVStrategy.COMMENTS_DISABLED, '\\', false, false, false, false);
strat = mvStrategy;
sep = params.get(MV_SEPARATOR);
if (sep!=null) {
if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv separator:'"+sep+"'");
strat.setDelimiter(sep.charAt(0));
}
encapsulator = params.get(MV_ENCAPSULATOR);
escape = params.get(MV_ESCAPE);
if (encapsulator!=null) {
if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv encapsulator:'"+encapsulator+"'");
strat.setEncapsulator(encapsulator.charAt(0));
if (escape == null) {
strat.setEscape(CSVStrategy.ESCAPE_DISABLED);
}
}
escape = params.get(MV_ESCAPE);
if (escape!=null) {
if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv escape:'"+escape+"'");
strat.setEscape(escape.charAt(0));
// encapsulator will already be disabled if it wasn't specified
}
Collection<String> fields = returnFields.getLuceneFieldNames();
Object responseObj = rsp.getValues().get("response");
boolean returnOnlyStored = false;
if (fields==null) {
if (responseObj instanceof SolrDocumentList) {
// get the list of fields from the SolrDocumentList
fields = new LinkedHashSet<String>();
for (SolrDocument sdoc: (SolrDocumentList)responseObj) {
fields.addAll(sdoc.getFieldNames());
}
} else {
// get the list of fields from the index
fields = req.getSearcher().getFieldNames();
}
if (returnFields.wantsScore()) {
fields.add("score");
} else {
fields.remove("score");
}
returnOnlyStored = true;
}
CSVSharedBufPrinter csvPrinterMV = new CSVSharedBufPrinter(mvWriter, mvStrategy);
for (String field : fields) {
if (!returnFields.wantsField(field)) {
continue;
}
if (field.equals("score")) {
CSVField csvField = new CSVField();
csvField.name = "score";
csvFields.put("score", csvField);
continue;
}
SchemaField sf = schema.getFieldOrNull(field);
if (sf == null) {
FieldType ft = new StrField();
sf = new SchemaField(field, ft);
}
// Return only stored fields, unless an explicit field list is specified
if (returnOnlyStored && sf != null && !sf.stored()) {
continue;
}
// check for per-field overrides
sep = params.get("f." + field + '.' + CSV_SEPARATOR);
encapsulator = params.get("f." + field + '.' + CSV_ENCAPSULATOR);
escape = params.get("f." + field + '.' + CSV_ESCAPE);
CSVSharedBufPrinter csvPrinter = csvPrinterMV;
if (sep != null || encapsulator != null || escape != null) {
// create a new strategy + printer if there were any per-field overrides
strat = (CSVStrategy)mvStrategy.clone();
if (sep!=null) {
if (sep.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv separator:'"+sep+"'");
strat.setDelimiter(sep.charAt(0));
}
if (encapsulator!=null) {
if (encapsulator.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv encapsulator:'"+encapsulator+"'");
strat.setEncapsulator(encapsulator.charAt(0));
if (escape == null) {
strat.setEscape(CSVStrategy.ESCAPE_DISABLED);
}
}
if (escape!=null) {
if (escape.length()!=1) throw new SolrException( SolrException.ErrorCode.BAD_REQUEST,"Invalid mv escape:'"+escape+"'");
strat.setEscape(escape.charAt(0));
if (encapsulator == null) {
strat.setEncapsulator(CSVStrategy.ENCAPSULATOR_DISABLED);
}
}
csvPrinter = new CSVSharedBufPrinter(mvWriter, strat);
}
CSVField csvField = new CSVField();
csvField.name = field;
csvField.sf = sf;
csvField.mvPrinter = csvPrinter;
csvFields.put(field, csvField);
}
NullValue = params.get(CSV_NULL, "");
if (params.getBool(CSV_HEADER, true)) {
for (CSVField csvField : csvFields.values()) {
printer.print(csvField.name);
}
printer.println();
}
if (responseObj instanceof ResultContext ) {
writeDocuments(null, (ResultContext)responseObj, returnFields );
}
else if (responseObj instanceof DocList) {
ResultContext ctx = new ResultContext();
ctx.docs = (DocList)responseObj;
writeDocuments(null, ctx, returnFields );
} else if (responseObj instanceof SolrDocumentList) {
writeSolrDocumentList(null, (SolrDocumentList)responseObj, returnFields );
}
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void close() throws IOException {
if (printer != null) printer.flush();
super.close();
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeNamedList(String name, NamedList val) throws IOException {
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
public void writeStartDocumentList(String name,
long start, int size, long numFound, Float maxScore) throws IOException
{
// nothing
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
public void writeEndDocumentList() throws IOException
{
// nothing
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx ) throws IOException {
if (tmpList == null) {
tmpList = new ArrayList(1);
tmpList.add(null);
}
for (CSVField csvField : csvFields.values()) {
Object val = doc.getFieldValue(csvField.name);
int nVals = val instanceof Collection ? ((Collection)val).size() : (val==null ? 0 : 1);
if (nVals == 0) {
writeNull(csvField.name);
continue;
}
if ((csvField.sf != null && csvField.sf.multiValued()) || nVals > 1) {
Collection values;
// normalize to a collection
if (val instanceof Collection) {
values = (Collection)val;
} else {
tmpList.set(0, val);
values = tmpList;
}
mvWriter.reset();
csvField.mvPrinter.reset();
// switch the printer to use the multi-valued one
CSVPrinter tmp = printer;
printer = csvField.mvPrinter;
for (Object fval : values) {
writeVal(csvField.name, fval);
}
printer = tmp; // restore the original printer
mvWriter.freeze();
printer.print(mvWriter.getFrozenBuf(), 0, mvWriter.getFrozenSize(), true);
} else {
// normalize to first value
if (val instanceof Collection) {
Collection values = (Collection)val;
val = values.iterator().next();
}
writeVal(csvField.name, val);
}
}
printer.println();
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
printer.print(val, needsEscaping);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeMap(String name, Map val, boolean excludeOuter, boolean isFirstVal) throws IOException {
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeArray(String name, Iterator val) throws IOException {
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeNull(String name) throws IOException {
printer.print(NullValue);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeInt(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeLong(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeBool(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeFloat(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeDouble(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeDate(String name, Date val) throws IOException {
StringBuilder sb = new StringBuilder(25);
cal = DateUtil.formatDate(val, cal, sb);
writeDate(name, sb.toString());
}
// in core/src/java/org/apache/solr/response/CSVResponseWriter.java
Override
public void writeDate(String name, String val) throws IOException {
printer.print(val, false);
}
// in core/src/java/org/apache/solr/response/PythonResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
PythonWriter w = new PythonWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/PythonResponseWriter.java
Override
public void writeNull(String name) throws IOException {
writer.write("None");
}
// in core/src/java/org/apache/solr/response/PythonResponseWriter.java
Override
public void writeBool(String name, boolean val) throws IOException {
writer.write(val ? "True" : "False");
}
// in core/src/java/org/apache/solr/response/PythonResponseWriter.java
Override
public void writeBool(String name, String val) throws IOException {
writeBool(name,val.charAt(0)=='t');
}
// in core/src/java/org/apache/solr/response/PythonResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
if (!needsEscaping) {
writer.write('\'');
writer.write(val);
writer.write('\'');
return;
}
// use python unicode strings...
// python doesn't tolerate newlines in strings in it's eval(), so we must escape them.
StringBuilder sb = new StringBuilder(val.length());
boolean needUnicode=false;
for (int i=0; i<val.length(); i++) {
char ch = val.charAt(i);
switch(ch) {
case '\'':
case '\\': sb.append('\\'); sb.append(ch); break;
case '\r': sb.append("\\r"); break;
case '\n': sb.append("\\n"); break;
case '\t': sb.append("\\t"); break;
default:
// we don't strictly have to escape these chars, but it will probably increase
// portability to stick to visible ascii
if (ch<' ' || ch>127) {
unicodeEscape(sb, ch);
needUnicode=true;
} else {
sb.append(ch);
}
}
}
if (needUnicode) {
writer.write('u');
}
writer.write('\'');
writer.append(sb);
writer.write('\'');
}
// in core/src/java/org/apache/solr/response/RubyResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
RubyWriter w = new RubyWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/RubyResponseWriter.java
Override
public void writeNull(String name) throws IOException {
writer.write("nil");
}
// in core/src/java/org/apache/solr/response/RubyResponseWriter.java
Override
protected void writeKey(String fname, boolean needsEscaping) throws IOException {
writeStr(null, fname, needsEscaping);
writer.write('=');
writer.write('>');
}
// in core/src/java/org/apache/solr/response/RubyResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
// Ruby doesn't do unicode escapes... so let the servlet container write raw UTF-8
// bytes into the string.
//
// Use single quoted strings for safety since no evaluation is done within them.
// Also, there are very few escapes recognized in a single quoted string, so
// only escape the backslash and single quote.
writer.write('\'');
if (needsEscaping) {
for (int i=0; i<val.length(); i++) {
char ch = val.charAt(i);
if (ch=='\'' || ch=='\\') {
writer.write('\\');
}
writer.write(ch);
}
} else {
writer.write(val);
}
writer.write('\'');
}
// in core/src/java/org/apache/solr/response/XSLTResponseWriter.java
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
final Transformer t = getTransformer(request);
// capture the output of the XMLWriter
final CharArrayWriter w = new CharArrayWriter();
XMLWriter.writeResponse(w,request,response);
// and write transformed result to our writer
final Reader r = new BufferedReader(new CharArrayReader(w.toCharArray()));
final StreamSource source = new StreamSource(r);
final StreamResult result = new StreamResult(writer);
try {
t.transform(source, result);
} catch(TransformerException te) {
final IOException ioe = new IOException("XSLT transformation error");
ioe.initCause(te);
throw ioe;
}
}
// in core/src/java/org/apache/solr/response/XSLTResponseWriter.java
protected Transformer getTransformer(SolrQueryRequest request) throws IOException {
final String xslt = request.getParams().get(CommonParams.TR,null);
if(xslt==null) {
throw new IOException("'" + CommonParams.TR + "' request parameter is required to use the XSLTResponseWriter");
}
// not the cleanest way to achieve this
SolrConfig solrConfig = request.getCore().getSolrConfig();
// no need to synchronize access to context, right?
// Nothing else happens with it at the same time
final Map<Object,Object> ctx = request.getContext();
Transformer result = (Transformer)ctx.get(CONTEXT_TRANSFORMER_KEY);
if(result==null) {
result = TransformerProvider.instance.getTransformer(solrConfig, xslt,xsltCacheLifetimeSeconds.intValue());
result.setErrorListener(xmllog);
ctx.put(CONTEXT_TRANSFORMER_KEY,result);
}
return result;
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
public static void writeResponse(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
XMLWriter xmlWriter = null;
try {
xmlWriter = new XMLWriter(writer, req, rsp);
xmlWriter.writeResponse();
} finally {
xmlWriter.close();
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
public void writeResponse() throws IOException {
writer.write(XML_START1);
String stylesheet = req.getParams().get("stylesheet");
if (stylesheet != null && stylesheet.length() > 0) {
writer.write(XML_STYLESHEET);
XML.escapeAttributeValue(stylesheet, writer);
writer.write(XML_STYLESHEET_END);
}
/***
String noSchema = req.getParams().get("noSchema");
// todo - change when schema becomes available?
if (false && noSchema == null)
writer.write(XML_START2_SCHEMA);
else
writer.write(XML_START2_NOSCHEMA);
***/
writer.write(XML_START2_NOSCHEMA);
// dump response values
NamedList<?> lst = rsp.getValues();
Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER);
if(omitHeader != null && omitHeader) lst.remove("responseHeader");
int sz = lst.size();
int start=0;
for (int i=start; i<sz; i++) {
writeVal(lst.getName(i),lst.getVal(i));
}
writer.write("\n</response>\n");
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
private void writeAttr(String name, String val) throws IOException {
writeAttr(name, val, true);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
public void writeAttr(String name, String val, boolean escape) throws IOException{
if (val != null) {
writer.write(' ');
writer.write(name);
writer.write("=\"");
if(escape){
XML.escapeAttributeValue(val, writer);
} else {
writer.write(val);
}
writer.write('"');
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
void startTag(String tag, String name, boolean closeTag) throws IOException {
if (doIndent) indent();
writer.write('<');
writer.write(tag);
if (name!=null) {
writeAttr("name", name);
if (closeTag) {
writer.write("/>");
} else {
writer.write(">");
}
} else {
if (closeTag) {
writer.write("/>");
} else {
writer.write('>');
}
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeStartDocumentList(String name,
long start, int size, long numFound, Float maxScore) throws IOException
{
if (doIndent) indent();
writer.write("<result");
writeAttr("name",name);
writeAttr("numFound",Long.toString(numFound));
writeAttr("start",Long.toString(start));
if(maxScore!=null) {
writeAttr("maxScore",Float.toString(maxScore));
}
writer.write(">");
incLevel();
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx ) throws IOException {
startTag("doc", name, false);
incLevel();
for (String fname : doc.getFieldNames()) {
if (!returnFields.wantsField(fname)) {
continue;
}
Object val = doc.getFieldValue(fname);
if( "_explain_".equals( fname ) ) {
System.out.println( val );
}
writeVal(fname, val);
}
decLevel();
writer.write("</doc>");
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeEndDocumentList() throws IOException
{
decLevel();
if (doIndent) indent();
writer.write("</result>");
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeNamedList(String name, NamedList val) throws IOException {
int sz = val.size();
startTag("lst", name, sz<=0);
incLevel();
for (int i=0; i<sz; i++) {
writeVal(val.getName(i),val.getVal(i));
}
decLevel();
if (sz > 0) {
if (doIndent) indent();
writer.write("</lst>");
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeMap(String name, Map map, boolean excludeOuter, boolean isFirstVal) throws IOException {
int sz = map.size();
if (!excludeOuter) {
startTag("lst", name, sz<=0);
incLevel();
}
for (Map.Entry entry : (Set<Map.Entry>)map.entrySet()) {
Object k = entry.getKey();
Object v = entry.getValue();
// if (sz<indentThreshold) indent();
writeVal( null == k ? null : k.toString(), v);
}
if (!excludeOuter) {
decLevel();
if (sz > 0) {
if (doIndent) indent();
writer.write("</lst>");
}
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeArray(String name, Object[] val) throws IOException {
writeArray(name, Arrays.asList(val).iterator());
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeArray(String name, Iterator iter) throws IOException {
if( iter.hasNext() ) {
startTag("arr", name, false );
incLevel();
while( iter.hasNext() ) {
writeVal(null, iter.next());
}
decLevel();
if (doIndent) indent();
writer.write("</arr>");
}
else {
startTag("arr", name, true );
}
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeNull(String name) throws IOException {
writePrim("null",name,"",false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeStr(String name, String val, boolean escape) throws IOException {
writePrim("str",name,val,escape);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeInt(String name, String val) throws IOException {
writePrim("int",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeLong(String name, String val) throws IOException {
writePrim("long",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeBool(String name, String val) throws IOException {
writePrim("bool",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeFloat(String name, String val) throws IOException {
writePrim("float",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeFloat(String name, float val) throws IOException {
writeFloat(name,Float.toString(val));
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeDouble(String name, String val) throws IOException {
writePrim("double",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeDouble(String name, double val) throws IOException {
writeDouble(name,Double.toString(val));
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
Override
public void writeDate(String name, String val) throws IOException {
writePrim("date",name,val,false);
}
// in core/src/java/org/apache/solr/response/XMLWriter.java
private void writePrim(String tag, String name, String val, boolean escape) throws IOException {
int contentLen = val==null ? 0 : val.length();
startTag(tag, name, contentLen==0);
if (contentLen==0) return;
if (escape) {
XML.escapeCharData(val,writer);
} else {
writer.write(val,0,contentLen);
}
writer.write('<');
writer.write('/');
writer.write(tag);
writer.write('>');
}
// in core/src/java/org/apache/solr/response/XMLResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
XMLWriter w = new XMLWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/transform/DocTransformers.java
Override
public void transform(SolrDocument doc, int docid) throws IOException {
for( DocTransformer a : children ) {
a.transform( doc, docid);
}
}
// in core/src/java/org/apache/solr/response/BinaryResponseWriter.java
public void write(OutputStream out, SolrQueryRequest req, SolrQueryResponse response) throws IOException {
Resolver resolver = new Resolver(req, response.getReturnFields());
Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER);
if (omitHeader != null && omitHeader) response.getValues().remove("responseHeader");
JavaBinCodec codec = new JavaBinCodec(resolver);
codec.marshal(response.getValues(), out);
}
// in core/src/java/org/apache/solr/response/BinaryResponseWriter.java
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
throw new RuntimeException("This is a binary writer , Cannot write to a characterstream");
}
// in core/src/java/org/apache/solr/response/BinaryResponseWriter.java
public Object resolve(Object o, JavaBinCodec codec) throws IOException {
if (o instanceof ResultContext) {
writeResults((ResultContext) o, codec);
return null; // null means we completely handled it
}
if (o instanceof DocList) {
ResultContext ctx = new ResultContext();
ctx.docs = (DocList) o;
writeResults(ctx, codec);
return null; // null means we completely handled it
}
if( o instanceof IndexableField ) {
if(schema == null) schema = solrQueryRequest.getSchema();
IndexableField f = (IndexableField)o;
SchemaField sf = schema.getFieldOrNull(f.name());
try {
o = getValue(sf, f);
}
catch (Exception e) {
LOG.warn("Error reading a field : " + o, e);
}
}
if (o instanceof SolrDocument) {
// Remove any fields that were not requested.
// This typically happens when distributed search adds
// extra fields to an internal request
SolrDocument doc = (SolrDocument)o;
Iterator<Map.Entry<String, Object>> i = doc.iterator();
while ( i.hasNext() ) {
String fname = i.next().getKey();
if ( !returnFields.wantsField( fname ) ) {
i.remove();
}
}
return doc;
}
return o;
}
// in core/src/java/org/apache/solr/response/BinaryResponseWriter.java
protected void writeResultsBody( ResultContext res, JavaBinCodec codec ) throws IOException
{
DocList ids = res.docs;
int sz = ids.size();
codec.writeTag(JavaBinCodec.ARR, sz);
if(searcher == null) searcher = solrQueryRequest.getSearcher();
if(schema == null) schema = solrQueryRequest.getSchema();
DocTransformer transformer = returnFields.getTransformer();
TransformContext context = new TransformContext();
context.query = res.query;
context.wantsScores = returnFields.wantsScore() && ids.hasScores();
context.req = solrQueryRequest;
context.searcher = searcher;
if( transformer != null ) {
transformer.setContext( context );
}
Set<String> fnames = returnFields.getLuceneFieldNames();
context.iterator = ids.iterator();
for (int i = 0; i < sz; i++) {
int id = context.iterator.nextDoc();
Document doc = searcher.doc(id, fnames);
SolrDocument sdoc = getDoc(doc);
if( transformer != null ) {
transformer.transform(sdoc, id);
}
codec.writeSolrDocument(sdoc);
}
if( transformer != null ) {
transformer.setContext( null );
}
}
// in core/src/java/org/apache/solr/response/BinaryResponseWriter.java
public void writeResults(ResultContext ctx, JavaBinCodec codec) throws IOException {
codec.writeTag(JavaBinCodec.SOLRDOCLST);
boolean wantsScores = returnFields.wantsScore() && ctx.docs.hasScores();
List l = new ArrayList(3);
l.add((long) ctx.docs.matches());
l.add((long) ctx.docs.offset());
Float maxScore = null;
if (wantsScores) {
maxScore = ctx.docs.maxScore();
}
l.add(maxScore);
codec.writeArray(l);
// this is a seprate function so that streaming responses can use just that part
writeResultsBody( ctx, codec );
}
// in core/src/java/org/apache/solr/response/RawResponseWriter.java
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException
{
Object obj = response.getValues().get( CONTENT );
if( obj != null && (obj instanceof ContentStream ) ) {
// copy the contents to the writer...
ContentStream content = (ContentStream)obj;
Reader reader = content.getReader();
try {
IOUtils.copy( reader, writer );
} finally {
reader.close();
}
}
else {
getBaseWriter( request ).write( writer, request, response );
}
}
// in core/src/java/org/apache/solr/response/RawResponseWriter.java
public void write(OutputStream out, SolrQueryRequest request,
SolrQueryResponse response) throws IOException {
Object obj = response.getValues().get( CONTENT );
if( obj != null && (obj instanceof ContentStream ) ) {
// copy the contents to the writer...
ContentStream content = (ContentStream)obj;
java.io.InputStream in = content.getStream();
try {
IOUtils.copy( in, out );
} finally {
in.close();
}
}
else {
//getBaseWriter( request ).write( writer, request, response );
throw new IOException("did not find a CONTENT object");
}
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
public void write(Writer writer, SolrQueryRequest req, SolrQueryResponse rsp) throws IOException {
PHPSerializedWriter w = new PHPSerializedWriter(writer, req, rsp);
try {
w.writeResponse();
} finally {
w.close();
}
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeResponse() throws IOException {
Boolean omitHeader = req.getParams().getBool(CommonParams.OMIT_HEADER);
if(omitHeader != null && omitHeader) rsp.getValues().remove("responseHeader");
writeNamedList(null, rsp.getValues());
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeNamedList(String name, NamedList val) throws IOException {
writeNamedListAsMapMangled(name,val);
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
public void writeStartDocumentList(String name,
long start, int size, long numFound, Float maxScore) throws IOException
{
writeMapOpener((maxScore==null) ? 3 : 4);
writeKey("numFound",false);
writeLong(null,numFound);
writeKey("start",false);
writeLong(null,start);
if (maxScore!=null) {
writeKey("maxScore",false);
writeFloat(null,maxScore);
}
writeKey("docs",false);
writeArrayOpener(size);
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
public void writeEndDocumentList() throws IOException
{
writeArrayCloser(); // doc list
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeSolrDocument(String name, SolrDocument doc, ReturnFields returnFields, int idx) throws IOException
{
writeKey(idx, false);
LinkedHashMap <String,Object> single = new LinkedHashMap<String, Object>();
LinkedHashMap <String,Object> multi = new LinkedHashMap<String, Object>();
for (String fname : doc.getFieldNames()) {
if(!returnFields.wantsField(fname)){
continue;
}
Object val = doc.getFieldValue(fname);
if (val instanceof Collection) {
multi.put(fname, val);
}else{
single.put(fname, val);
}
}
writeMapOpener(single.size() + multi.size());
for(String fname: single.keySet()){
Object val = single.get(fname);
writeKey(fname, true);
writeVal(fname, val);
}
for(String fname: multi.keySet()){
writeKey(fname, true);
Object val = multi.get(fname);
if (!(val instanceof Collection)) {
// should never be reached if multivalued fields are stored as a Collection
// so I'm assuming a size of 1 just to wrap the single value
writeArrayOpener(1);
writeVal(fname, val);
writeArrayCloser();
}else{
writeVal(fname, val);
}
}
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeArray(String name, Object[] val) throws IOException {
writeMapOpener(val.length);
for(int i=0; i < val.length; i++) {
writeKey(i, false);
writeVal(String.valueOf(i), val[i]);
}
writeMapCloser();
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeArray(String name, Iterator val) throws IOException {
ArrayList vals = new ArrayList();
while( val.hasNext() ) {
vals.add(val.next());
}
writeArray(name, vals.toArray());
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeMapOpener(int size) throws IOException, IllegalArgumentException {
// negative size value indicates that something has gone wrong
if (size < 0) {
throw new IllegalArgumentException("Map size must not be negative");
}
writer.write("a:"+size+":{");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeMapSeparator() throws IOException {
/* NOOP */
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeMapCloser() throws IOException {
writer.write('}');
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeArrayOpener(int size) throws IOException, IllegalArgumentException {
// negative size value indicates that something has gone wrong
if (size < 0) {
throw new IllegalArgumentException("Array size must not be negative");
}
writer.write("a:"+size+":{");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeArraySeparator() throws IOException {
/* NOOP */
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeArrayCloser() throws IOException {
writer.write('}');
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeNull(String name) throws IOException {
writer.write("N;");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
protected void writeKey(String fname, boolean needsEscaping) throws IOException {
writeStr(null, fname, needsEscaping);
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
void writeKey(int val, boolean needsEscaping) throws IOException {
writeInt(null, String.valueOf(val));
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeBool(String name, boolean val) throws IOException {
writer.write(val ? "b:1;" : "b:0;");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeBool(String name, String val) throws IOException {
writeBool(name, val.charAt(0) == 't');
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeInt(String name, String val) throws IOException {
writer.write("i:"+val+";");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeLong(String name, String val) throws IOException {
writeInt(name,val);
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeFloat(String name, String val) throws IOException {
writeDouble(name,val);
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeDouble(String name, String val) throws IOException {
writer.write("d:"+val+";");
}
// in core/src/java/org/apache/solr/response/PHPSerializedResponseWriter.java
Override
public void writeStr(String name, String val, boolean needsEscaping) throws IOException {
// serialized PHP strings don't need to be escaped at all, however the
// string size reported needs be the number of bytes rather than chars.
UnicodeUtil.UTF16toUTF8(val, 0, val.length(), utf8);
int nBytes = utf8.length;
writer.write("s:");
writer.write(Integer.toString(nBytes));
writer.write(":\"");
writer.write(val);
writer.write("\";");
}
// in core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
NamedList<Integer> getFacetCounts(Executor executor) throws IOException {
CompletionService<SegFacet> completionService = new ExecutorCompletionService<SegFacet>(executor);
// reuse the translation logic to go from top level set to per-segment set
baseSet = docs.getTopFilter();
final AtomicReaderContext[] leaves = searcher.getTopReaderContext().leaves();
// The list of pending tasks that aren't immediately submitted
// TODO: Is there a completion service, or a delegating executor that can
// limit the number of concurrent tasks submitted to a bigger executor?
LinkedList<Callable<SegFacet>> pending = new LinkedList<Callable<SegFacet>>();
int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads;
for (int i=0; i<leaves.length; i++) {
final SegFacet segFacet = new SegFacet(leaves[i]);
Callable<SegFacet> task = new Callable<SegFacet>() {
public SegFacet call() throws Exception {
segFacet.countTerms();
return segFacet;
}
};
// TODO: if limiting threads, submit by largest segment first?
if (--threads >= 0) {
completionService.submit(task);
} else {
pending.add(task);
}
}
// now merge the per-segment results
PriorityQueue<SegFacet> queue = new PriorityQueue<SegFacet>(leaves.length) {
@Override
protected boolean lessThan(SegFacet a, SegFacet b) {
return a.tempBR.compareTo(b.tempBR) < 0;
}
};
boolean hasMissingCount=false;
int missingCount=0;
for (int i=0; i<leaves.length; i++) {
SegFacet seg = null;
try {
Future<SegFacet> future = completionService.take();
seg = future.get();
if (!pending.isEmpty()) {
completionService.submit(pending.removeFirst());
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
} catch (ExecutionException e) {
Throwable cause = e.getCause();
if (cause instanceof RuntimeException) {
throw (RuntimeException)cause;
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in per-segment faceting on field: " + fieldName, cause);
}
}
if (seg.startTermIndex < seg.endTermIndex) {
if (seg.startTermIndex==0) {
hasMissingCount=true;
missingCount += seg.counts[0];
seg.pos = 1;
} else {
seg.pos = seg.startTermIndex;
}
if (seg.pos < seg.endTermIndex) {
seg.tenum = seg.si.getTermsEnum();
seg.tenum.seekExact(seg.pos);
seg.tempBR = seg.tenum.term();
queue.add(seg);
}
}
}
FacetCollector collector;
if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
collector = new CountSortedFacetCollector(offset, limit, mincount);
} else {
collector = new IndexSortedFacetCollector(offset, limit, mincount);
}
BytesRef val = new BytesRef();
while (queue.size() > 0) {
SegFacet seg = queue.top();
// make a shallow copy
val.bytes = seg.tempBR.bytes;
val.offset = seg.tempBR.offset;
val.length = seg.tempBR.length;
int count = 0;
do {
count += seg.counts[seg.pos - seg.startTermIndex];
// TODO: OPTIMIZATION...
// if mincount>0 then seg.pos++ can skip ahead to the next non-zero entry.
seg.pos++;
if (seg.pos >= seg.endTermIndex) {
queue.pop();
seg = queue.top();
} else {
seg.tempBR = seg.tenum.next();
seg = queue.updateTop();
}
} while (seg != null && val.compareTo(seg.tempBR) == 0);
boolean stop = collector.collect(val, count);
if (stop) break;
}
NamedList<Integer> res = collector.getFacetCounts();
// convert labels to readable form
FieldType ft = searcher.getSchema().getFieldType(fieldName);
int sz = res.size();
for (int i=0; i<sz; i++) {
res.setName(i, ft.indexedToReadable(res.getName(i)));
}
if (missing) {
if (!hasMissingCount) {
missingCount = SimpleFacets.getFieldMissingCount(searcher,docs,fieldName);
}
res.add(null, missingCount);
}
return res;
}
// in core/src/java/org/apache/solr/request/PerSegmentSingleValuedFaceting.java
void countTerms() throws IOException {
si = FieldCache.DEFAULT.getTermsIndex(context.reader(), fieldName);
// SolrCore.log.info("reader= " + reader + " FC=" + System.identityHashCode(si));
if (prefix!=null) {
BytesRef prefixRef = new BytesRef(prefix);
startTermIndex = si.binarySearchLookup(prefixRef, tempBR);
if (startTermIndex<0) startTermIndex=-startTermIndex-1;
prefixRef.append(UnicodeUtil.BIG_TERM);
// TODO: we could constrain the lower endpoint if we had a binarySearch method that allowed passing start/end
endTermIndex = si.binarySearchLookup(prefixRef, tempBR);
assert endTermIndex < 0;
endTermIndex = -endTermIndex-1;
} else {
startTermIndex=0;
endTermIndex=si.numOrd();
}
final int nTerms=endTermIndex-startTermIndex;
if (nTerms>0) {
// count collection array only needs to be as big as the number of terms we are
// going to collect counts for.
final int[] counts = this.counts = new int[nTerms];
DocIdSet idSet = baseSet.getDocIdSet(context, null); // this set only includes live docs
DocIdSetIterator iter = idSet.iterator();
////
PackedInts.Reader ordReader = si.getDocToOrd();
int doc;
final Object arr;
if (ordReader.hasArray()) {
arr = ordReader.getArray();
} else {
arr = null;
}
if (arr instanceof int[]) {
int[] ords = (int[]) arr;
if (prefix==null) {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
counts[ords[doc]]++;
}
} else {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
int term = ords[doc];
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else if (arr instanceof short[]) {
short[] ords = (short[]) arr;
if (prefix==null) {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
counts[ords[doc] & 0xffff]++;
}
} else {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
int term = ords[doc] & 0xffff;
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else if (arr instanceof byte[]) {
byte[] ords = (byte[]) arr;
if (prefix==null) {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
counts[ords[doc] & 0xff]++;
}
} else {
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
int term = ords[doc] & 0xff;
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else {
if (prefix==null) {
// specialized version when collecting counts for all terms
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
counts[si.getOrd(doc)]++;
}
} else {
// version that adjusts term numbers because we aren't collecting the full range
while ((doc = iter.nextDoc()) < DocIdSetIterator.NO_MORE_DOCS) {
int term = si.getOrd(doc);
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
}
}
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
void parseParams(String type, String param) throws ParseException, IOException {
localParams = QueryParsing.getLocalParams(param, req.getParams());
base = docs;
facetValue = param;
key = param;
threads = -1;
if (localParams == null) return;
// remove local params unless it's a query
if (type != FacetParams.FACET_QUERY) { // TODO Cut over to an Enum here
facetValue = localParams.get(CommonParams.VALUE);
}
// reset set the default key now that localParams have been removed
key = facetValue;
// allow explicit set of the key
key = localParams.get(CommonParams.OUTPUT_KEY, key);
String threadStr = localParams.get(CommonParams.THREADS);
if (threadStr != null) {
threads = Integer.parseInt(threadStr);
}
// figure out if we need a new base DocSet
String excludeStr = localParams.get(CommonParams.EXCLUDE);
if (excludeStr == null) return;
Map<?,?> tagMap = (Map<?,?>)req.getContext().get("tags");
if (tagMap != null && rb != null) {
List<String> excludeTagList = StrUtils.splitSmart(excludeStr,',');
IdentityHashMap<Query,Boolean> excludeSet = new IdentityHashMap<Query,Boolean>();
for (String excludeTag : excludeTagList) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection)) continue;
for (Object o : (Collection<?>)olst) {
if (!(o instanceof QParser)) continue;
QParser qp = (QParser)o;
excludeSet.put(qp.getQuery(), Boolean.TRUE);
}
}
if (excludeSet.size() == 0) return;
List<Query> qlist = new ArrayList<Query>();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// get the new base docset for this facet
DocSet base = searcher.getDocSet(qlist);
if (rb.grouping() && rb.getGroupingSpec().isTruncateGroups()) {
Grouping grouping = new Grouping(searcher, null, rb.getQueryCommand(), false, 0, false);
if (rb.getGroupingSpec().getFields().length > 0) {
grouping.addFieldCommand(rb.getGroupingSpec().getFields()[0], req);
} else if (rb.getGroupingSpec().getFunctions().length > 0) {
grouping.addFunctionCommand(rb.getGroupingSpec().getFunctions()[0], req);
} else {
this.base = base;
return;
}
AbstractAllGroupHeadsCollector allGroupHeadsCollector = grouping.getCommands().get(0).createAllGroupCollector();
searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), allGroupHeadsCollector);
int maxDoc = searcher.maxDoc();
FixedBitSet fixedBitSet = allGroupHeadsCollector.retrieveGroupHeads(maxDoc);
long[] bits = fixedBitSet.getBits();
this.base = new BitDocSet(new OpenBitSet(bits, bits.length));
} else {
this.base = base;
}
}
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Integer> getFacetQueryCounts() throws IOException,ParseException {
NamedList<Integer> res = new SimpleOrderedMap<Integer>();
/* Ignore CommonParams.DF - could have init param facet.query assuming
* the schema default with query param DF intented to only affect Q.
* If user doesn't want schema default for facet.query, they should be
* explicit.
*/
// SolrQueryParser qp = searcher.getSchema().getSolrQueryParser(null);
String[] facetQs = params.getParams(FacetParams.FACET_QUERY);
if (null != facetQs && 0 != facetQs.length) {
for (String q : facetQs) {
parseParams(FacetParams.FACET_QUERY, q);
// TODO: slight optimization would prevent double-parsing of any localParams
Query qobj = QParser.getParser(q, null, req).getQuery();
res.add(key, searcher.numDocs(qobj, base));
}
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Integer> getTermCounts(String field) throws IOException {
int offset = params.getFieldInt(field, FacetParams.FACET_OFFSET, 0);
int limit = params.getFieldInt(field, FacetParams.FACET_LIMIT, 100);
if (limit == 0) return new NamedList<Integer>();
Integer mincount = params.getFieldInt(field, FacetParams.FACET_MINCOUNT);
if (mincount==null) {
Boolean zeros = params.getFieldBool(field, FacetParams.FACET_ZEROS);
// mincount = (zeros!=null && zeros) ? 0 : 1;
mincount = (zeros!=null && !zeros) ? 1 : 0;
// current default is to include zeros.
}
boolean missing = params.getFieldBool(field, FacetParams.FACET_MISSING, false);
// default to sorting if there is a limit.
String sort = params.getFieldParam(field, FacetParams.FACET_SORT, limit>0 ? FacetParams.FACET_SORT_COUNT : FacetParams.FACET_SORT_INDEX);
String prefix = params.getFieldParam(field,FacetParams.FACET_PREFIX);
NamedList<Integer> counts;
SchemaField sf = searcher.getSchema().getField(field);
FieldType ft = sf.getType();
// determine what type of faceting method to use
String method = params.getFieldParam(field, FacetParams.FACET_METHOD);
boolean enumMethod = FacetParams.FACET_METHOD_enum.equals(method);
// TODO: default to per-segment or not?
boolean per_segment = FacetParams.FACET_METHOD_fcs.equals(method);
if (method == null && ft instanceof BoolField) {
// Always use filters for booleans... we know the number of values is very small.
enumMethod = true;
}
boolean multiToken = sf.multiValued() || ft.multiValuedFieldCache();
if (TrieField.getMainValuePrefix(ft) != null) {
// A TrieField with multiple parts indexed per value... currently only
// UnInvertedField can handle this case, so force it's use.
enumMethod = false;
multiToken = true;
}
if (params.getFieldBool(field, GroupParams.GROUP_FACET, false)) {
counts = getGroupedCounts(searcher, base, field, multiToken, offset,limit, mincount, missing, sort, prefix);
} else {
// unless the enum method is explicitly specified, use a counting method.
if (enumMethod) {
counts = getFacetTermEnumCounts(searcher, base, field, offset, limit, mincount,missing,sort,prefix);
} else {
if (multiToken) {
UnInvertedField uif = UnInvertedField.getUnInvertedField(field, searcher);
counts = uif.getCounts(searcher, base, offset, limit, mincount,missing,sort,prefix);
} else {
// TODO: future logic could use filters instead of the fieldcache if
// the number of terms in the field is small enough.
if (per_segment) {
PerSegmentSingleValuedFaceting ps = new PerSegmentSingleValuedFaceting(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
Executor executor = threads == 0 ? directExecutor : facetExecutor;
ps.setNumThreads(threads);
counts = ps.getFacetCounts(executor);
} else {
counts = getFieldCacheCounts(searcher, base, field, offset,limit, mincount, missing, sort, prefix);
}
}
}
}
return counts;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Integer> getGroupedCounts(SolrIndexSearcher searcher,
DocSet base,
String field,
boolean multiToken,
int offset,
int limit,
int mincount,
boolean missing,
String sort,
String prefix) throws IOException {
GroupingSpecification groupingSpecification = rb.getGroupingSpec();
String groupField = groupingSpecification != null ? groupingSpecification.getFields()[0] : null;
if (groupField == null) {
throw new SolrException (
SolrException.ErrorCode.BAD_REQUEST,
"Specify the group.field as parameter or local parameter"
);
}
BytesRef prefixBR = prefix != null ? new BytesRef(prefix) : null;
TermGroupFacetCollector collector = TermGroupFacetCollector.createTermGroupFacetCollector(groupField, field, multiToken, prefixBR, 128);
searcher.search(new MatchAllDocsQuery(), base.getTopFilter(), collector);
boolean orderByCount = sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY);
TermGroupFacetCollector.GroupedFacetResult result = collector.mergeSegmentResults(offset + limit, mincount, orderByCount);
CharsRef charsRef = new CharsRef();
FieldType facetFieldType = searcher.getSchema().getFieldType(field);
NamedList<Integer> facetCounts = new NamedList<Integer>();
List<TermGroupFacetCollector.FacetEntry> scopedEntries = result.getFacetEntries(offset, limit);
for (TermGroupFacetCollector.FacetEntry facetEntry : scopedEntries) {
facetFieldType.indexedToReadable(facetEntry.getValue(), charsRef);
facetCounts.add(charsRef.toString(), facetEntry.getCount());
}
if (missing) {
facetCounts.add(null, result.getTotalMissingCount());
}
return facetCounts;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Object> getFacetFieldCounts()
throws IOException, ParseException {
NamedList<Object> res = new SimpleOrderedMap<Object>();
String[] facetFs = params.getParams(FacetParams.FACET_FIELD);
if (null != facetFs) {
for (String f : facetFs) {
parseParams(FacetParams.FACET_FIELD, f);
String termList = localParams == null ? null : localParams.get(CommonParams.TERMS);
if (termList != null) {
res.add(key, getListedTermCounts(facetValue, termList));
} else {
res.add(key, getTermCounts(facetValue));
}
}
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
private NamedList<Integer> getListedTermCounts(String field, String termList) throws IOException {
FieldType ft = searcher.getSchema().getFieldType(field);
List<String> terms = StrUtils.splitSmart(termList, ",", true);
NamedList<Integer> res = new NamedList<Integer>();
for (String term : terms) {
String internal = ft.toInternal(term);
int count = searcher.numDocs(new TermQuery(new Term(field, internal)), base);
res.add(term, count);
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public static int getFieldMissingCount(SolrIndexSearcher searcher, DocSet docs, String fieldName)
throws IOException {
DocSet hasVal = searcher.getDocSet
(new TermRangeQuery(fieldName, null, null, false, false));
return docs.andNotSize(hasVal);
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public static NamedList<Integer> getFieldCacheCounts(SolrIndexSearcher searcher, DocSet docs, String fieldName, int offset, int limit, int mincount, boolean missing, String sort, String prefix) throws IOException {
// TODO: If the number of terms is high compared to docs.size(), and zeros==false,
// we should use an alternate strategy to avoid
// 1) creating another huge int[] for the counts
// 2) looping over that huge int[] looking for the rare non-zeros.
//
// Yet another variation: if docs.size() is small and termvectors are stored,
// then use them instead of the FieldCache.
//
// TODO: this function is too big and could use some refactoring, but
// we also need a facet cache, and refactoring of SimpleFacets instead of
// trying to pass all the various params around.
FieldType ft = searcher.getSchema().getFieldType(fieldName);
NamedList<Integer> res = new NamedList<Integer>();
FieldCache.DocTermsIndex si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), fieldName);
final BytesRef prefixRef;
if (prefix == null) {
prefixRef = null;
} else if (prefix.length()==0) {
prefix = null;
prefixRef = null;
} else {
prefixRef = new BytesRef(prefix);
}
final BytesRef br = new BytesRef();
int startTermIndex, endTermIndex;
if (prefix!=null) {
startTermIndex = si.binarySearchLookup(prefixRef, br);
if (startTermIndex<0) startTermIndex=-startTermIndex-1;
prefixRef.append(UnicodeUtil.BIG_TERM);
endTermIndex = si.binarySearchLookup(prefixRef, br);
assert endTermIndex < 0;
endTermIndex = -endTermIndex-1;
} else {
startTermIndex=0;
endTermIndex=si.numOrd();
}
final int nTerms=endTermIndex-startTermIndex;
int missingCount = -1;
final CharsRef charsRef = new CharsRef(10);
if (nTerms>0 && docs.size() >= mincount) {
// count collection array only needs to be as big as the number of terms we are
// going to collect counts for.
final int[] counts = new int[nTerms];
DocIterator iter = docs.iterator();
PackedInts.Reader ordReader = si.getDocToOrd();
final Object arr;
if (ordReader.hasArray()) {
arr = ordReader.getArray();
} else {
arr = null;
}
if (arr instanceof int[]) {
int[] ords = (int[]) arr;
if (prefix==null) {
while (iter.hasNext()) {
counts[ords[iter.nextDoc()]]++;
}
} else {
while (iter.hasNext()) {
int term = ords[iter.nextDoc()];
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else if (arr instanceof short[]) {
short[] ords = (short[]) arr;
if (prefix==null) {
while (iter.hasNext()) {
counts[ords[iter.nextDoc()] & 0xffff]++;
}
} else {
while (iter.hasNext()) {
int term = ords[iter.nextDoc()] & 0xffff;
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else if (arr instanceof byte[]) {
byte[] ords = (byte[]) arr;
if (prefix==null) {
while (iter.hasNext()) {
counts[ords[iter.nextDoc()] & 0xff]++;
}
} else {
while (iter.hasNext()) {
int term = ords[iter.nextDoc()] & 0xff;
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
} else {
while (iter.hasNext()) {
int term = si.getOrd(iter.nextDoc());
int arrIdx = term-startTermIndex;
if (arrIdx>=0 && arrIdx<nTerms) counts[arrIdx]++;
}
}
if (startTermIndex == 0) {
missingCount = counts[0];
}
// IDEA: we could also maintain a count of "other"... everything that fell outside
// of the top 'N'
int off=offset;
int lim=limit>=0 ? limit : Integer.MAX_VALUE;
if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
int maxsize = limit>0 ? offset+limit : Integer.MAX_VALUE-1;
maxsize = Math.min(maxsize, nTerms);
LongPriorityQueue queue = new LongPriorityQueue(Math.min(maxsize,1000), maxsize, Long.MIN_VALUE);
int min=mincount-1; // the smallest value in the top 'N' values
for (int i=(startTermIndex==0)?1:0; i<nTerms; i++) {
int c = counts[i];
if (c>min) {
// NOTE: we use c>min rather than c>=min as an optimization because we are going in
// index order, so we already know that the keys are ordered. This can be very
// important if a lot of the counts are repeated (like zero counts would be).
// smaller term numbers sort higher, so subtract the term number instead
long pair = (((long)c)<<32) + (Integer.MAX_VALUE - i);
boolean displaced = queue.insert(pair);
if (displaced) min=(int)(queue.top() >>> 32);
}
}
// if we are deep paging, we don't have to order the highest "offset" counts.
int collectCount = Math.max(0, queue.size() - off);
assert collectCount <= lim;
// the start and end indexes of our list "sorted" (starting with the highest value)
int sortedIdxStart = queue.size() - (collectCount - 1);
int sortedIdxEnd = queue.size() + 1;
final long[] sorted = queue.sort(collectCount);
for (int i=sortedIdxStart; i<sortedIdxEnd; i++) {
long pair = sorted[i];
int c = (int)(pair >>> 32);
int tnum = Integer.MAX_VALUE - (int)pair;
ft.indexedToReadable(si.lookup(startTermIndex+tnum, br), charsRef);
res.add(charsRef.toString(), c);
}
} else {
// add results in index order
int i=(startTermIndex==0)?1:0;
if (mincount<=0) {
// if mincount<=0, then we won't discard any terms and we know exactly
// where to start.
i+=off;
off=0;
}
for (; i<nTerms; i++) {
int c = counts[i];
if (c<mincount || --off>=0) continue;
if (--lim<0) break;
ft.indexedToReadable(si.lookup(startTermIndex+i, br), charsRef);
res.add(charsRef.toString(), c);
}
}
}
if (missing) {
if (missingCount < 0) {
missingCount = getFieldMissingCount(searcher,docs,fieldName);
}
res.add(null, missingCount);
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, String sort, String prefix)
throws IOException {
/* :TODO: potential optimization...
* cache the Terms with the highest docFreq and try them first
* don't enum if we get our max from them
*/
// Minimum term docFreq in order to use the filterCache for that term.
int minDfFilterCache = params.getFieldInt(field, FacetParams.FACET_ENUM_CACHE_MINDF, 0);
// make sure we have a set that is fast for random access, if we will use it for that
DocSet fastForRandomSet = docs;
if (minDfFilterCache>0 && docs instanceof SortedIntDocSet) {
SortedIntDocSet sset = (SortedIntDocSet)docs;
fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
}
IndexSchema schema = searcher.getSchema();
AtomicReader r = searcher.getAtomicReader();
FieldType ft = schema.getFieldType(field);
boolean sortByCount = sort.equals("count") || sort.equals("true");
final int maxsize = limit>=0 ? offset+limit : Integer.MAX_VALUE-1;
final BoundedTreeSet<CountPair<BytesRef,Integer>> queue = sortByCount ? new BoundedTreeSet<CountPair<BytesRef,Integer>>(maxsize) : null;
final NamedList<Integer> res = new NamedList<Integer>();
int min=mincount-1; // the smallest value in the top 'N' values
int off=offset;
int lim=limit>=0 ? limit : Integer.MAX_VALUE;
BytesRef startTermBytes = null;
if (prefix != null) {
String indexedPrefix = ft.toInternal(prefix);
startTermBytes = new BytesRef(indexedPrefix);
}
Fields fields = r.fields();
Terms terms = fields==null ? null : fields.terms(field);
TermsEnum termsEnum = null;
SolrIndexSearcher.DocsEnumState deState = null;
BytesRef term = null;
if (terms != null) {
termsEnum = terms.iterator(null);
// TODO: OPT: if seek(ord) is supported for this termsEnum, then we could use it for
// facet.offset when sorting by index order.
if (startTermBytes != null) {
if (termsEnum.seekCeil(startTermBytes, true) == TermsEnum.SeekStatus.END) {
termsEnum = null;
} else {
term = termsEnum.term();
}
} else {
// position termsEnum on first term
term = termsEnum.next();
}
}
DocsEnum docsEnum = null;
CharsRef charsRef = new CharsRef(10);
if (docs.size() >= mincount) {
while (term != null) {
if (startTermBytes != null && !StringHelper.startsWith(term, startTermBytes))
break;
int df = termsEnum.docFreq();
// If we are sorting, we can use df>min (rather than >=) since we
// are going in index order. For certain term distributions this can
// make a large difference (for example, many terms with df=1).
if (df>0 && df>min) {
int c;
if (df >= minDfFilterCache) {
// use the filter cache
if (deState==null) {
deState = new SolrIndexSearcher.DocsEnumState();
deState.fieldName = field;
deState.liveDocs = r.getLiveDocs();
deState.termsEnum = termsEnum;
deState.docsEnum = docsEnum;
}
c = searcher.numDocs(docs, deState);
docsEnum = deState.docsEnum;
} else {
// iterate over TermDocs to calculate the intersection
// TODO: specialize when base docset is a bitset or hash set (skipDocs)? or does it matter for this?
// TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl)
// TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet?
docsEnum = termsEnum.docs(null, docsEnum, false);
c=0;
if (docsEnum instanceof MultiDocsEnum) {
MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs();
int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs();
for (int subindex = 0; subindex<numSubs; subindex++) {
MultiDocsEnum.EnumWithSlice sub = subs[subindex];
if (sub.docsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid+base)) c++;
}
}
} else {
int docid;
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid)) c++;
}
}
}
if (sortByCount) {
if (c>min) {
BytesRef termCopy = BytesRef.deepCopyOf(term);
queue.add(new CountPair<BytesRef,Integer>(termCopy, c));
if (queue.size()>=maxsize) min=queue.last().val;
}
} else {
if (c >= mincount && --off<0) {
if (--lim<0) break;
ft.indexedToReadable(term, charsRef);
res.add(charsRef.toString(), c);
}
}
}
term = termsEnum.next();
}
}
if (sortByCount) {
for (CountPair<BytesRef,Integer> p : queue) {
if (--off>=0) continue;
if (--lim<0) break;
ft.indexedToReadable(p.key, charsRef);
res.add(charsRef.toString(), p.val);
}
}
if (missing) {
res.add(null, getFieldMissingCount(searcher,docs,field));
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
Deprecated
public NamedList<Object> getFacetDateCounts()
throws IOException, ParseException {
final NamedList<Object> resOuter = new SimpleOrderedMap<Object>();
final String[] fields = params.getParams(FacetParams.FACET_DATE);
if (null == fields || 0 == fields.length) return resOuter;
for (String f : fields) {
getFacetDateCounts(f, resOuter);
}
return resOuter;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
Deprecated
public void getFacetDateCounts(String dateFacet, NamedList<Object> resOuter)
throws IOException, ParseException {
final IndexSchema schema = searcher.getSchema();
parseParams(FacetParams.FACET_DATE, dateFacet);
String f = facetValue;
final NamedList<Object> resInner = new SimpleOrderedMap<Object>();
resOuter.add(key, resInner);
final SchemaField sf = schema.getField(f);
if (! (sf.getType() instanceof DateField)) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"Can not date facet on a field which is not a DateField: " + f);
}
final DateField ft = (DateField) sf.getType();
final String startS
= required.getFieldParam(f,FacetParams.FACET_DATE_START);
final Date start;
try {
start = ft.parseMath(null, startS);
} catch (SolrException e) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet 'start' is not a valid Date string: " + startS, e);
}
final String endS
= required.getFieldParam(f,FacetParams.FACET_DATE_END);
Date end; // not final, hardend may change this
try {
end = ft.parseMath(null, endS);
} catch (SolrException e) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet 'end' is not a valid Date string: " + endS, e);
}
if (end.before(start)) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet 'end' comes before 'start': "+endS+" < "+startS);
}
final String gap = required.getFieldParam(f,FacetParams.FACET_DATE_GAP);
final DateMathParser dmp = new DateMathParser();
final int minCount = params.getFieldInt(f,FacetParams.FACET_MINCOUNT, 0);
String[] iStrs = params.getFieldParams(f,FacetParams.FACET_DATE_INCLUDE);
// Legacy support for default of [lower,upper,edge] for date faceting
// this is not handled by FacetRangeInclude.parseParam because
// range faceting has differnet defaults
final EnumSet<FacetRangeInclude> include =
(null == iStrs || 0 == iStrs.length ) ?
EnumSet.of(FacetRangeInclude.LOWER,
FacetRangeInclude.UPPER,
FacetRangeInclude.EDGE)
: FacetRangeInclude.parseParam(iStrs);
try {
Date low = start;
while (low.before(end)) {
dmp.setNow(low);
String label = ft.toExternal(low);
Date high = dmp.parseMath(gap);
if (end.before(high)) {
if (params.getFieldBool(f,FacetParams.FACET_DATE_HARD_END,false)) {
high = end;
} else {
end = high;
}
}
if (high.before(low)) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet infinite loop (is gap negative?)");
}
final boolean includeLower =
(include.contains(FacetRangeInclude.LOWER) ||
(include.contains(FacetRangeInclude.EDGE) && low.equals(start)));
final boolean includeUpper =
(include.contains(FacetRangeInclude.UPPER) ||
(include.contains(FacetRangeInclude.EDGE) && high.equals(end)));
final int count = rangeCount(sf,low,high,includeLower,includeUpper);
if (count >= minCount) {
resInner.add(label, count);
}
low = high;
}
} catch (java.text.ParseException e) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"date facet 'gap' is not a valid Date Math string: " + gap, e);
}
// explicitly return the gap and end so all the counts
// (including before/after/between) are meaningful - even if mincount
// has removed the neighboring ranges
resInner.add("gap", gap);
resInner.add("start", start);
resInner.add("end", end);
final String[] othersP =
params.getFieldParams(f,FacetParams.FACET_DATE_OTHER);
if (null != othersP && 0 < othersP.length ) {
final Set<FacetRangeOther> others = EnumSet.noneOf(FacetRangeOther.class);
for (final String o : othersP) {
others.add(FacetRangeOther.get(o));
}
// no matter what other values are listed, we don't do
// anything if "none" is specified.
if (! others.contains(FacetRangeOther.NONE) ) {
boolean all = others.contains(FacetRangeOther.ALL);
if (all || others.contains(FacetRangeOther.BEFORE)) {
// include upper bound if "outer" or if first gap doesn't already include it
resInner.add(FacetRangeOther.BEFORE.toString(),
rangeCount(sf,null,start,
false,
(include.contains(FacetRangeInclude.OUTER) ||
(! (include.contains(FacetRangeInclude.LOWER) ||
include.contains(FacetRangeInclude.EDGE))))));
}
if (all || others.contains(FacetRangeOther.AFTER)) {
// include lower bound if "outer" or if last gap doesn't already include it
resInner.add(FacetRangeOther.AFTER.toString(),
rangeCount(sf,end,null,
(include.contains(FacetRangeInclude.OUTER) ||
(! (include.contains(FacetRangeInclude.UPPER) ||
include.contains(FacetRangeInclude.EDGE)))),
false));
}
if (all || others.contains(FacetRangeOther.BETWEEN)) {
resInner.add(FacetRangeOther.BETWEEN.toString(),
rangeCount(sf,start,end,
(include.contains(FacetRangeInclude.LOWER) ||
include.contains(FacetRangeInclude.EDGE)),
(include.contains(FacetRangeInclude.UPPER) ||
include.contains(FacetRangeInclude.EDGE))));
}
}
}
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
public NamedList<Object> getFacetRangeCounts() throws IOException, ParseException {
final NamedList<Object> resOuter = new SimpleOrderedMap<Object>();
final String[] fields = params.getParams(FacetParams.FACET_RANGE);
if (null == fields || 0 == fields.length) return resOuter;
for (String f : fields) {
getFacetRangeCounts(f, resOuter);
}
return resOuter;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
void getFacetRangeCounts(String facetRange, NamedList<Object> resOuter)
throws IOException, ParseException {
final IndexSchema schema = searcher.getSchema();
parseParams(FacetParams.FACET_RANGE, facetRange);
String f = facetValue;
final SchemaField sf = schema.getField(f);
final FieldType ft = sf.getType();
RangeEndpointCalculator<?> calc = null;
if (ft instanceof TrieField) {
final TrieField trie = (TrieField)ft;
switch (trie.getType()) {
case FLOAT:
calc = new FloatRangeEndpointCalculator(sf);
break;
case DOUBLE:
calc = new DoubleRangeEndpointCalculator(sf);
break;
case INTEGER:
calc = new IntegerRangeEndpointCalculator(sf);
break;
case LONG:
calc = new LongRangeEndpointCalculator(sf);
break;
default:
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"Unable to range facet on tried field of unexpected type:" + f);
}
} else if (ft instanceof DateField) {
calc = new DateRangeEndpointCalculator(sf, null);
} else if (ft instanceof SortableIntField) {
calc = new IntegerRangeEndpointCalculator(sf);
} else if (ft instanceof SortableLongField) {
calc = new LongRangeEndpointCalculator(sf);
} else if (ft instanceof SortableFloatField) {
calc = new FloatRangeEndpointCalculator(sf);
} else if (ft instanceof SortableDoubleField) {
calc = new DoubleRangeEndpointCalculator(sf);
} else {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"Unable to range facet on field:" + sf);
}
resOuter.add(key, getFacetRangeCounts(sf, calc));
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
private <T extends Comparable<T>> NamedList getFacetRangeCounts
(final SchemaField sf,
final RangeEndpointCalculator<T> calc) throws IOException {
final String f = sf.getName();
final NamedList<Object> res = new SimpleOrderedMap<Object>();
final NamedList<Integer> counts = new NamedList<Integer>();
res.add("counts", counts);
final T start = calc.getValue(required.getFieldParam(f,FacetParams.FACET_RANGE_START));
// not final, hardend may change this
T end = calc.getValue(required.getFieldParam(f,FacetParams.FACET_RANGE_END));
if (end.compareTo(start) < 0) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"range facet 'end' comes before 'start': "+end+" < "+start);
}
final String gap = required.getFieldParam(f, FacetParams.FACET_RANGE_GAP);
// explicitly return the gap. compute this early so we are more
// likely to catch parse errors before attempting math
res.add("gap", calc.getGap(gap));
final int minCount = params.getFieldInt(f,FacetParams.FACET_MINCOUNT, 0);
final EnumSet<FacetRangeInclude> include = FacetRangeInclude.parseParam
(params.getFieldParams(f,FacetParams.FACET_RANGE_INCLUDE));
T low = start;
while (low.compareTo(end) < 0) {
T high = calc.addGap(low, gap);
if (end.compareTo(high) < 0) {
if (params.getFieldBool(f,FacetParams.FACET_RANGE_HARD_END,false)) {
high = end;
} else {
end = high;
}
}
if (high.compareTo(low) < 0) {
throw new SolrException
(SolrException.ErrorCode.BAD_REQUEST,
"range facet infinite loop (is gap negative? did the math overflow?)");
}
final boolean includeLower =
(include.contains(FacetRangeInclude.LOWER) ||
(include.contains(FacetRangeInclude.EDGE) &&
0 == low.compareTo(start)));
final boolean includeUpper =
(include.contains(FacetRangeInclude.UPPER) ||
(include.contains(FacetRangeInclude.EDGE) &&
0 == high.compareTo(end)));
final String lowS = calc.formatValue(low);
final String highS = calc.formatValue(high);
final int count = rangeCount(sf, lowS, highS,
includeLower,includeUpper);
if (count >= minCount) {
counts.add(lowS, count);
}
low = high;
}
// explicitly return the start and end so all the counts
// (including before/after/between) are meaningful - even if mincount
// has removed the neighboring ranges
res.add("start", start);
res.add("end", end);
final String[] othersP =
params.getFieldParams(f,FacetParams.FACET_RANGE_OTHER);
if (null != othersP && 0 < othersP.length ) {
Set<FacetRangeOther> others = EnumSet.noneOf(FacetRangeOther.class);
for (final String o : othersP) {
others.add(FacetRangeOther.get(o));
}
// no matter what other values are listed, we don't do
// anything if "none" is specified.
if (! others.contains(FacetRangeOther.NONE) ) {
boolean all = others.contains(FacetRangeOther.ALL);
final String startS = calc.formatValue(start);
final String endS = calc.formatValue(end);
if (all || others.contains(FacetRangeOther.BEFORE)) {
// include upper bound if "outer" or if first gap doesn't already include it
res.add(FacetRangeOther.BEFORE.toString(),
rangeCount(sf,null,startS,
false,
(include.contains(FacetRangeInclude.OUTER) ||
(! (include.contains(FacetRangeInclude.LOWER) ||
include.contains(FacetRangeInclude.EDGE))))));
}
if (all || others.contains(FacetRangeOther.AFTER)) {
// include lower bound if "outer" or if last gap doesn't already include it
res.add(FacetRangeOther.AFTER.toString(),
rangeCount(sf,endS,null,
(include.contains(FacetRangeInclude.OUTER) ||
(! (include.contains(FacetRangeInclude.UPPER) ||
include.contains(FacetRangeInclude.EDGE)))),
false));
}
if (all || others.contains(FacetRangeOther.BETWEEN)) {
res.add(FacetRangeOther.BETWEEN.toString(),
rangeCount(sf,startS,endS,
(include.contains(FacetRangeInclude.LOWER) ||
include.contains(FacetRangeInclude.EDGE)),
(include.contains(FacetRangeInclude.UPPER) ||
include.contains(FacetRangeInclude.EDGE))));
}
}
}
return res;
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
protected int rangeCount(SchemaField sf, String low, String high,
boolean iLow, boolean iHigh) throws IOException {
Query rangeQ = sf.getType().getRangeQuery(null, sf,low,high,iLow,iHigh);
return searcher.numDocs(rangeQ ,base);
}
// in core/src/java/org/apache/solr/request/SimpleFacets.java
Deprecated
protected int rangeCount(SchemaField sf, Date low, Date high,
boolean iLow, boolean iHigh) throws IOException {
Query rangeQ = ((DateField)(sf.getType())).getRangeQuery(null, sf,low,high,iLow,iHigh);
return searcher.numDocs(rangeQ ,base);
}
// in core/src/java/org/apache/solr/request/UnInvertedField.java
Override
protected void visitTerm(TermsEnum te, int termNum) throws IOException {
if (termNum >= maxTermCounts.length) {
// resize by doubling - for very large number of unique terms, expanding
// by 4K and resultant GC will dominate uninvert times. Resize at end if material
int[] newMaxTermCounts = new int[maxTermCounts.length*2];
System.arraycopy(maxTermCounts, 0, newMaxTermCounts, 0, termNum);
maxTermCounts = newMaxTermCounts;
}
final BytesRef term = te.term();
if (te.docFreq() > maxTermDocFreq) {
TopTerm topTerm = new TopTerm();
topTerm.term = BytesRef.deepCopyOf(term);
topTerm.termNum = termNum;
bigTerms.put(topTerm.termNum, topTerm);
if (deState == null) {
deState = new SolrIndexSearcher.DocsEnumState();
deState.fieldName = field;
// deState.termsEnum = te.tenum;
deState.termsEnum = te; // TODO: check for MultiTermsEnum in SolrIndexSearcher could now fail?
deState.docsEnum = docsEnum;
deState.minSetSizeCached = maxTermDocFreq;
}
docsEnum = deState.docsEnum;
DocSet set = searcher.getDocSet(deState);
maxTermCounts[termNum] = set.size();
}
}
// in core/src/java/org/apache/solr/request/UnInvertedField.java
public NamedList<Integer> getCounts(SolrIndexSearcher searcher, DocSet baseDocs, int offset, int limit, Integer mincount, boolean missing, String sort, String prefix) throws IOException {
use.incrementAndGet();
FieldType ft = searcher.getSchema().getFieldType(field);
NamedList<Integer> res = new NamedList<Integer>(); // order is important
DocSet docs = baseDocs;
int baseSize = docs.size();
int maxDoc = searcher.maxDoc();
//System.out.println("GET COUNTS field=" + field + " baseSize=" + baseSize + " minCount=" + mincount + " maxDoc=" + maxDoc + " numTermsInField=" + numTermsInField);
if (baseSize >= mincount) {
final int[] index = this.index;
// tricky: we add more more element than we need because we will reuse this array later
// for ordering term ords before converting to term labels.
final int[] counts = new int[numTermsInField + 1];
//
// If there is prefix, find it's start and end term numbers
//
int startTerm = 0;
int endTerm = numTermsInField; // one past the end
TermsEnum te = getOrdTermsEnum(searcher.getAtomicReader());
if (te != null && prefix != null && prefix.length() > 0) {
final BytesRef prefixBr = new BytesRef(prefix);
if (te.seekCeil(prefixBr, true) == TermsEnum.SeekStatus.END) {
startTerm = numTermsInField;
} else {
startTerm = (int) te.ord();
}
prefixBr.append(UnicodeUtil.BIG_TERM);
if (te.seekCeil(prefixBr, true) == TermsEnum.SeekStatus.END) {
endTerm = numTermsInField;
} else {
endTerm = (int) te.ord();
}
}
/***********
// Alternative 2: get the docSet of the prefix (could take a while) and
// then do the intersection with the baseDocSet first.
if (prefix != null && prefix.length() > 0) {
docs = searcher.getDocSet(new ConstantScorePrefixQuery(new Term(field, ft.toInternal(prefix))), docs);
// The issue with this method are problems of returning 0 counts for terms w/o
// the prefix. We can't just filter out those terms later because it may
// mean that we didn't collect enough terms in the queue (in the sorted case).
}
***********/
boolean doNegative = baseSize > maxDoc >> 1 && termInstances > 0
&& startTerm==0 && endTerm==numTermsInField
&& docs instanceof BitDocSet;
if (doNegative) {
OpenBitSet bs = (OpenBitSet)((BitDocSet)docs).getBits().clone();
bs.flip(0, maxDoc);
// TODO: when iterator across negative elements is available, use that
// instead of creating a new bitset and inverting.
docs = new BitDocSet(bs, maxDoc - baseSize);
// simply negating will mean that we have deleted docs in the set.
// that should be OK, as their entries in our table should be empty.
//System.out.println(" NEG");
}
// For the biggest terms, do straight set intersections
for (TopTerm tt : bigTerms.values()) {
//System.out.println(" do big termNum=" + tt.termNum + " term=" + tt.term.utf8ToString());
// TODO: counts could be deferred if sorted==false
if (tt.termNum >= startTerm && tt.termNum < endTerm) {
counts[tt.termNum] = searcher.numDocs(new TermQuery(new Term(field, tt.term)), docs);
//System.out.println(" count=" + counts[tt.termNum]);
} else {
//System.out.println("SKIP term=" + tt.termNum);
}
}
// TODO: we could short-circuit counting altogether for sorted faceting
// where we already have enough terms from the bigTerms
// TODO: we could shrink the size of the collection array, and
// additionally break when the termNumber got above endTerm, but
// it would require two extra conditionals in the inner loop (although
// they would be predictable for the non-prefix case).
// Perhaps a different copy of the code would be warranted.
if (termInstances > 0) {
DocIterator iter = docs.iterator();
while (iter.hasNext()) {
int doc = iter.nextDoc();
//System.out.println("iter doc=" + doc);
int code = index[doc];
if ((code & 0xff)==1) {
//System.out.println(" ptr");
int pos = code>>>8;
int whichArray = (doc >>> 16) & 0xff;
byte[] arr = tnums[whichArray];
int tnum = 0;
for(;;) {
int delta = 0;
for(;;) {
byte b = arr[pos++];
delta = (delta << 7) | (b & 0x7f);
if ((b & 0x80) == 0) break;
}
if (delta == 0) break;
tnum += delta - TNUM_OFFSET;
//System.out.println(" tnum=" + tnum);
counts[tnum]++;
}
} else {
//System.out.println(" inlined");
int tnum = 0;
int delta = 0;
for (;;) {
delta = (delta << 7) | (code & 0x7f);
if ((code & 0x80)==0) {
if (delta==0) break;
tnum += delta - TNUM_OFFSET;
//System.out.println(" tnum=" + tnum);
counts[tnum]++;
delta = 0;
}
code >>>= 8;
}
}
}
}
final CharsRef charsRef = new CharsRef();
int off=offset;
int lim=limit>=0 ? limit : Integer.MAX_VALUE;
if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
int maxsize = limit>0 ? offset+limit : Integer.MAX_VALUE-1;
maxsize = Math.min(maxsize, numTermsInField);
LongPriorityQueue queue = new LongPriorityQueue(Math.min(maxsize,1000), maxsize, Long.MIN_VALUE);
int min=mincount-1; // the smallest value in the top 'N' values
//System.out.println("START=" + startTerm + " END=" + endTerm);
for (int i=startTerm; i<endTerm; i++) {
int c = doNegative ? maxTermCounts[i] - counts[i] : counts[i];
if (c>min) {
// NOTE: we use c>min rather than c>=min as an optimization because we are going in
// index order, so we already know that the keys are ordered. This can be very
// important if a lot of the counts are repeated (like zero counts would be).
// smaller term numbers sort higher, so subtract the term number instead
long pair = (((long)c)<<32) + (Integer.MAX_VALUE - i);
boolean displaced = queue.insert(pair);
if (displaced) min=(int)(queue.top() >>> 32);
}
}
// now select the right page from the results
// if we are deep paging, we don't have to order the highest "offset" counts.
int collectCount = Math.max(0, queue.size() - off);
assert collectCount <= lim;
// the start and end indexes of our list "sorted" (starting with the highest value)
int sortedIdxStart = queue.size() - (collectCount - 1);
int sortedIdxEnd = queue.size() + 1;
final long[] sorted = queue.sort(collectCount);
final int[] indirect = counts; // reuse the counts array for the index into the tnums array
assert indirect.length >= sortedIdxEnd;
for (int i=sortedIdxStart; i<sortedIdxEnd; i++) {
long pair = sorted[i];
int c = (int)(pair >>> 32);
int tnum = Integer.MAX_VALUE - (int)pair;
indirect[i] = i; // store the index for indirect sorting
sorted[i] = tnum; // reuse the "sorted" array to store the term numbers for indirect sorting
// add a null label for now... we'll fill it in later.
res.add(null, c);
}
// now sort the indexes by the term numbers
PrimUtils.sort(sortedIdxStart, sortedIdxEnd, indirect, new PrimUtils.IntComparator() {
@Override
public int compare(int a, int b) {
return (int)sorted[a] - (int)sorted[b];
}
@Override
public boolean lessThan(int a, int b) {
return sorted[a] < sorted[b];
}
@Override
public boolean equals(int a, int b) {
return sorted[a] == sorted[b];
}
});
// convert the term numbers to term values and set
// as the label
//System.out.println("sortStart=" + sortedIdxStart + " end=" + sortedIdxEnd);
for (int i=sortedIdxStart; i<sortedIdxEnd; i++) {
int idx = indirect[i];
int tnum = (int)sorted[idx];
final String label = getReadableValue(getTermValue(te, tnum), ft, charsRef);
//System.out.println(" label=" + label);
res.setName(idx - sortedIdxStart, label);
}
} else {
// add results in index order
int i=startTerm;
if (mincount<=0) {
// if mincount<=0, then we won't discard any terms and we know exactly
// where to start.
i=startTerm+off;
off=0;
}
for (; i<endTerm; i++) {
int c = doNegative ? maxTermCounts[i] - counts[i] : counts[i];
if (c<mincount || --off>=0) continue;
if (--lim<0) break;
final String label = getReadableValue(getTermValue(te, i), ft, charsRef);
res.add(label, c);
}
}
}
// in core/src/java/org/apache/solr/request/UnInvertedField.java
public StatsValues getStats(SolrIndexSearcher searcher, DocSet baseDocs, String[] facet) throws IOException {
//this function is ripped off nearly wholesale from the getCounts function to use
//for multiValued fields within the StatsComponent. may be useful to find common
//functionality between the two and refactor code somewhat
use.incrementAndGet();
SchemaField sf = searcher.getSchema().getField(field);
// FieldType ft = sf.getType();
StatsValues allstats = StatsValuesFactory.createStatsValues(sf);
DocSet docs = baseDocs;
int baseSize = docs.size();
int maxDoc = searcher.maxDoc();
if (baseSize <= 0) return allstats;
DocSet missing = docs.andNot( searcher.getDocSet(new TermRangeQuery(field, null, null, false, false)) );
int i = 0;
final FieldFacetStats[] finfo = new FieldFacetStats[facet.length];
//Initialize facetstats, if facets have been passed in
FieldCache.DocTermsIndex si;
for (String f : facet) {
SchemaField facet_sf = searcher.getSchema().getField(f);
try {
si = FieldCache.DEFAULT.getTermsIndex(searcher.getAtomicReader(), f);
}
catch (IOException e) {
throw new RuntimeException("failed to open field cache for: " + f, e);
}
finfo[i] = new FieldFacetStats(f, si, sf, facet_sf, numTermsInField);
i++;
}
final int[] index = this.index;
final int[] counts = new int[numTermsInField];//keep track of the number of times we see each word in the field for all the documents in the docset
TermsEnum te = getOrdTermsEnum(searcher.getAtomicReader());
boolean doNegative = false;
if (finfo.length == 0) {
//if we're collecting statistics with a facet field, can't do inverted counting
doNegative = baseSize > maxDoc >> 1 && termInstances > 0
&& docs instanceof BitDocSet;
}
if (doNegative) {
OpenBitSet bs = (OpenBitSet) ((BitDocSet) docs).getBits().clone();
bs.flip(0, maxDoc);
// TODO: when iterator across negative elements is available, use that
// instead of creating a new bitset and inverting.
docs = new BitDocSet(bs, maxDoc - baseSize);
// simply negating will mean that we have deleted docs in the set.
// that should be OK, as their entries in our table should be empty.
}
// For the biggest terms, do straight set intersections
for (TopTerm tt : bigTerms.values()) {
// TODO: counts could be deferred if sorted==false
if (tt.termNum >= 0 && tt.termNum < numTermsInField) {
final Term t = new Term(field, tt.term);
if (finfo.length == 0) {
counts[tt.termNum] = searcher.numDocs(new TermQuery(t), docs);
} else {
//COULD BE VERY SLOW
//if we're collecting stats for facet fields, we need to iterate on all matching documents
DocSet bigTermDocSet = searcher.getDocSet(new TermQuery(t)).intersection(docs);
DocIterator iter = bigTermDocSet.iterator();
while (iter.hasNext()) {
int doc = iter.nextDoc();
counts[tt.termNum]++;
for (FieldFacetStats f : finfo) {
f.facetTermNum(doc, tt.termNum);
}
}
}
}
}
if (termInstances > 0) {
DocIterator iter = docs.iterator();
while (iter.hasNext()) {
int doc = iter.nextDoc();
int code = index[doc];
if ((code & 0xff) == 1) {
int pos = code >>> 8;
int whichArray = (doc >>> 16) & 0xff;
byte[] arr = tnums[whichArray];
int tnum = 0;
for (; ;) {
int delta = 0;
for (; ;) {
byte b = arr[pos++];
delta = (delta << 7) | (b & 0x7f);
if ((b & 0x80) == 0) break;
}
if (delta == 0) break;
tnum += delta - TNUM_OFFSET;
counts[tnum]++;
for (FieldFacetStats f : finfo) {
f.facetTermNum(doc, tnum);
}
}
} else {
int tnum = 0;
int delta = 0;
for (; ;) {
delta = (delta << 7) | (code & 0x7f);
if ((code & 0x80) == 0) {
if (delta == 0) break;
tnum += delta - TNUM_OFFSET;
counts[tnum]++;
for (FieldFacetStats f : finfo) {
f.facetTermNum(doc, tnum);
}
delta = 0;
}
code >>>= 8;
}
}
}
}
// add results in index order
for (i = 0; i < numTermsInField; i++) {
int c = doNegative ? maxTermCounts[i] - counts[i] : counts[i];
if (c == 0) continue;
BytesRef value = getTermValue(te, i);
allstats.accumulate(value, c);
//as we've parsed the termnum into a value, lets also accumulate fieldfacet statistics
for (FieldFacetStats f : finfo) {
f.accumulateTermNum(i, value);
}
}
int c = missing.size();
allstats.addMissing(c);
if (finfo.length > 0) {
for (FieldFacetStats f : finfo) {
Map<String, StatsValues> facetStatsValues = f.facetStatsValues;
FieldType facetType = searcher.getSchema().getFieldType(f.name);
for (Map.Entry<String,StatsValues> entry : facetStatsValues.entrySet()) {
String termLabel = entry.getKey();
int missingCount = searcher.numDocs(new TermQuery(new Term(f.name, facetType.toInternal(termLabel))), missing);
entry.getValue().addMissing(missingCount);
}
allstats.addFacet(f.name, facetStatsValues);
}
}
return allstats;
}
// in core/src/java/org/apache/solr/request/UnInvertedField.java
BytesRef getTermValue(TermsEnum te, int termNum) throws IOException {
//System.out.println("getTermValue termNum=" + termNum + " this=" + this + " numTerms=" + numTermsInField);
if (bigTerms.size() > 0) {
// see if the term is one of our big terms.
TopTerm tt = bigTerms.get(termNum);
if (tt != null) {
//System.out.println(" return big " + tt.term);
return tt.term;
}
}
return lookupTerm(te, termNum);
}
// in core/src/java/org/apache/solr/request/UnInvertedField.java
public static UnInvertedField getUnInvertedField(String field, SolrIndexSearcher searcher) throws IOException {
SolrCache<String,UnInvertedField> cache = searcher.getFieldValueCache();
if (cache == null) {
return new UnInvertedField(field, searcher);
}
UnInvertedField uif = cache.get(field);
if (uif == null) {
synchronized (cache) {
uif = cache.get(field);
if (uif == null) {
uif = new UnInvertedField(field, searcher);
cache.put(field, uif);
}
}
}
return uif;
}
// in core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException {
if( abortErrorMessage != null ) {
((HttpServletResponse)response).sendError( 500, abortErrorMessage );
return;
}
if (this.cores == null) {
((HttpServletResponse)response).sendError( 403, "Server is shutting down" );
return;
}
CoreContainer cores = this.cores;
SolrCore core = null;
SolrQueryRequest solrReq = null;
if( request instanceof HttpServletRequest) {
HttpServletRequest req = (HttpServletRequest)request;
HttpServletResponse resp = (HttpServletResponse)response;
SolrRequestHandler handler = null;
String corename = "";
try {
// put the core container in request attribute
req.setAttribute("org.apache.solr.CoreContainer", cores);
String path = req.getServletPath();
if( req.getPathInfo() != null ) {
// this lets you handle /update/commit when /update is a servlet
path += req.getPathInfo();
}
if( pathPrefix != null && path.startsWith( pathPrefix ) ) {
path = path.substring( pathPrefix.length() );
}
// check for management path
String alternate = cores.getManagementPath();
if (alternate != null && path.startsWith(alternate)) {
path = path.substring(0, alternate.length());
}
// unused feature ?
int idx = path.indexOf( ':' );
if( idx > 0 ) {
// save the portion after the ':' for a 'handler' path parameter
path = path.substring( 0, idx );
}
// Check for the core admin page
if( path.equals( cores.getAdminPath() ) ) {
handler = cores.getMultiCoreHandler();
solrReq = adminRequestParser.parse(null,path, req);
handleAdminRequest(req, response, handler, solrReq);
return;
}
else {
//otherwise, we should find a core from the path
idx = path.indexOf( "/", 1 );
if( idx > 1 ) {
// try to get the corename as a request parameter first
corename = path.substring( 1, idx );
core = cores.getCore(corename);
if (core != null) {
path = path.substring( idx );
}
}
if (core == null) {
if (!cores.isZooKeeperAware() ) {
core = cores.getCore("");
}
}
}
if (core == null && cores.isZooKeeperAware()) {
// we couldn't find the core - lets make sure a collection was not specified instead
core = getCoreByCollection(cores, corename, path);
if (core != null) {
// we found a core, update the path
path = path.substring( idx );
} else {
// try the default core
core = cores.getCore("");
}
// TODO: if we couldn't find it locally, look on other nodes
}
// With a valid core...
if( core != null ) {
final SolrConfig config = core.getSolrConfig();
// get or create/cache the parser for the core
SolrRequestParsers parser = null;
parser = parsers.get(config);
if( parser == null ) {
parser = new SolrRequestParsers(config);
parsers.put(config, parser );
}
// Determine the handler from the url path if not set
// (we might already have selected the cores handler)
if( handler == null && path.length() > 1 ) { // don't match "" or "/" as valid path
handler = core.getRequestHandler( path );
// no handler yet but allowed to handle select; let's check
if( handler == null && parser.isHandleSelect() ) {
if( "/select".equals( path ) || "/select/".equals( path ) ) {
solrReq = parser.parse( core, path, req );
String qt = solrReq.getParams().get( CommonParams.QT );
handler = core.getRequestHandler( qt );
if( handler == null ) {
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "unknown handler: "+qt);
}
if( qt != null && qt.startsWith("/") && (handler instanceof ContentStreamHandlerBase)) {
//For security reasons it's a bad idea to allow a leading '/', ex: /select?qt=/update see SOLR-3161
//There was no restriction from Solr 1.4 thru 3.5 and it's not supported for update handlers.
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "Invalid query type. Do not use /select to access: "+qt);
}
}
}
}
// With a valid handler and a valid core...
if( handler != null ) {
// if not a /select, create the request
if( solrReq == null ) {
solrReq = parser.parse( core, path, req );
}
final Method reqMethod = Method.getMethod(req.getMethod());
HttpCacheHeaderUtil.setCacheControlHeader(config, resp, reqMethod);
// unless we have been explicitly told not to, do cache validation
// if we fail cache validation, execute the query
if (config.getHttpCachingConfig().isNever304() ||
!HttpCacheHeaderUtil.doCacheHeaderValidation(solrReq, req, reqMethod, resp)) {
SolrQueryResponse solrRsp = new SolrQueryResponse();
/* even for HEAD requests, we need to execute the handler to
* ensure we don't get an error (and to make sure the correct
* QueryResponseWriter is selected and we get the correct
* Content-Type)
*/
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(solrReq, solrRsp));
this.execute( req, handler, solrReq, solrRsp );
HttpCacheHeaderUtil.checkHttpCachingVeto(solrRsp, resp, reqMethod);
// add info to http headers
//TODO: See SOLR-232 and SOLR-267.
/*try {
NamedList solrRspHeader = solrRsp.getResponseHeader();
for (int i=0; i<solrRspHeader.size(); i++) {
((javax.servlet.http.HttpServletResponse) response).addHeader(("Solr-" + solrRspHeader.getName(i)), String.valueOf(solrRspHeader.getVal(i)));
}
} catch (ClassCastException cce) {
log.log(Level.WARNING, "exception adding response header log information", cce);
}*/
QueryResponseWriter responseWriter = core.getQueryResponseWriter(solrReq);
writeResponse(solrRsp, response, responseWriter, solrReq, reqMethod);
}
return; // we are done with a valid handler
}
}
log.debug("no handler or core retrieved for " + path + ", follow through...");
}
catch (Throwable ex) {
sendError( core, solrReq, request, (HttpServletResponse)response, ex );
return;
}
finally {
if( solrReq != null ) {
solrReq.close();
}
if (core != null) {
core.close();
}
SolrRequestInfo.clearRequestInfo();
}
}
// Otherwise let the webapp handle the request
chain.doFilter(request, response);
}
// in core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
private void handleAdminRequest(HttpServletRequest req, ServletResponse response, SolrRequestHandler handler,
SolrQueryRequest solrReq) throws IOException {
SolrQueryResponse solrResp = new SolrQueryResponse();
final NamedList<Object> responseHeader = new SimpleOrderedMap<Object>();
solrResp.add("responseHeader", responseHeader);
NamedList toLog = solrResp.getToLog();
toLog.add("webapp", req.getContextPath());
toLog.add("path", solrReq.getContext().get("path"));
toLog.add("params", "{" + solrReq.getParamString() + "}");
handler.handleRequest(solrReq, solrResp);
SolrCore.setResponseHeaderValues(handler, solrReq, solrResp);
StringBuilder sb = new StringBuilder();
for (int i = 0; i < toLog.size(); i++) {
String name = toLog.getName(i);
Object val = toLog.getVal(i);
sb.append(name).append("=").append(val).append(" ");
}
QueryResponseWriter respWriter = SolrCore.DEFAULT_RESPONSE_WRITERS.get(solrReq.getParams().get(CommonParams.WT));
if (respWriter == null) respWriter = SolrCore.DEFAULT_RESPONSE_WRITERS.get("standard");
writeResponse(solrResp, response, respWriter, solrReq, Method.getMethod(req.getMethod()));
}
// in core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
private void writeResponse(SolrQueryResponse solrRsp, ServletResponse response,
QueryResponseWriter responseWriter, SolrQueryRequest solrReq, Method reqMethod)
throws IOException {
// Now write it out
final String ct = responseWriter.getContentType(solrReq, solrRsp);
// don't call setContentType on null
if (null != ct) response.setContentType(ct);
if (solrRsp.getException() != null) {
NamedList info = new SimpleOrderedMap();
int code = getErrorInfo(solrRsp.getException(),info);
solrRsp.add("error", info);
((HttpServletResponse) response).setStatus(code);
}
if (Method.HEAD != reqMethod) {
if (responseWriter instanceof BinaryQueryResponseWriter) {
BinaryQueryResponseWriter binWriter = (BinaryQueryResponseWriter) responseWriter;
binWriter.write(response.getOutputStream(), solrReq, solrRsp);
} else {
String charset = ContentStreamBase.getCharsetFromContentType(ct);
Writer out = (charset == null || charset.equalsIgnoreCase("UTF-8"))
? new OutputStreamWriter(response.getOutputStream(), UTF8)
: new OutputStreamWriter(response.getOutputStream(), charset);
out = new FastWriter(out);
responseWriter.write(out, solrReq, solrRsp);
out.flush();
}
}
//else http HEAD request, nothing to write out, waited this long just to get ContentType
}
// in core/src/java/org/apache/solr/servlet/SolrDispatchFilter.java
protected void sendError(SolrCore core,
SolrQueryRequest req,
ServletRequest request,
HttpServletResponse response,
Throwable ex) throws IOException {
try {
SolrQueryResponse solrResp = new SolrQueryResponse();
if(ex instanceof Exception) {
solrResp.setException((Exception)ex);
}
else {
solrResp.setException(new RuntimeException(ex));
}
if(core==null) {
core = cores.getCore(""); // default core
}
if(req==null) {
req = new SolrQueryRequestBase(core,new ServletSolrParams(request)) {};
}
QueryResponseWriter writer = core.getQueryResponseWriter(req);
writeResponse(solrResp, response, writer, req, Method.GET);
}
catch( Throwable t ) { // This error really does not matter
SimpleOrderedMap info = new SimpleOrderedMap();
int code=getErrorInfo(ex, info);
response.sendError( code, info.toString() );
}
}
// in core/src/java/org/apache/solr/servlet/ZookeeperInfoServlet.java
Override
public void doGet(HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
response.setCharacterEncoding("UTF-8");
response.setContentType("application/json");
// This attribute is set by the SolrDispatchFilter
CoreContainer cores = (CoreContainer) request.getAttribute("org.apache.solr.CoreContainer");
String path = request.getParameter("path");
String addr = request.getParameter("addr");
if (addr != null && addr.length() == 0) {
addr = null;
}
String detailS = request.getParameter("detail");
boolean detail = detailS != null && detailS.equals("true");
String dumpS = request.getParameter("dump");
boolean dump = dumpS != null && dumpS.equals("true");
PrintWriter out = response.getWriter();
ZKPrinter printer = new ZKPrinter(response, out, cores.getZkController(), addr);
printer.detail = detail;
printer.dump = dump;
try {
printer.print(path);
} finally {
printer.close();
}
}
// in core/src/java/org/apache/solr/servlet/ZookeeperInfoServlet.java
Override
public void doPost(HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
doGet(request, response);
}
// in core/src/java/org/apache/solr/servlet/ZookeeperInfoServlet.java
void print(String path) throws IOException {
if (zkClient == null) {
return;
}
// normalize path
if (path == null) {
path = "/";
} else {
path.trim();
if (path.length() == 0) {
path = "/";
}
}
if (path.endsWith("/") && path.length() > 1) {
path = path.substring(0, path.length() - 1);
}
int idx = path.lastIndexOf('/');
String parent = idx >= 0 ? path.substring(0, idx) : path;
if (parent.length() == 0) {
parent = "/";
}
CharArr chars = new CharArr();
JSONWriter json = new JSONWriter(chars, 2);
json.startObject();
if (detail) {
if (!printZnode(json, path)) {
return;
}
json.writeValueSeparator();
}
json.writeString("tree");
json.writeNameSeparator();
json.startArray();
if (!printTree(json, path)) {
return; // there was an error
}
json.endArray();
json.endObject();
out.println(chars.toString());
}
// in core/src/java/org/apache/solr/servlet/ZookeeperInfoServlet.java
boolean printTree(JSONWriter json, String path) throws IOException {
String label = path;
if (!fullpath) {
int idx = path.lastIndexOf('/');
label = idx > 0 ? path.substring(idx + 1) : path;
}
json.startObject();
//writeKeyValue(json, "data", label, true );
json.writeString("data");
json.writeNameSeparator();
json.startObject();
writeKeyValue(json, "title", label, true);
json.writeValueSeparator();
json.writeString("attr");
json.writeNameSeparator();
json.startObject();
writeKeyValue(json, "href", "zookeeper?detail=true&path=" + URLEncoder.encode(path, "UTF-8"), true);
json.endObject();
json.endObject();
Stat stat = new Stat();
try {
// Trickily, the call to zkClient.getData fills in the stat variable
byte[] data = zkClient.getData(path, null, stat, true);
if (stat.getEphemeralOwner() != 0) {
writeKeyValue(json, "ephemeral", true, false);
writeKeyValue(json, "version", stat.getVersion(), false);
}
if (dump) {
json.writeValueSeparator();
printZnode(json, path);
}
/*
if (stat.getNumChildren() != 0)
{
writeKeyValue(json, "children_count", stat.getNumChildren(), false );
out.println(", \"children_count\" : \"" + stat.getNumChildren() + "\"");
}
*/
//if (stat.getDataLength() != 0)
if (data != null) {
String str = new BytesRef(data).utf8ToString();
//?? writeKeyValue(json, "content", str, false );
// Does nothing now, but on the assumption this will be used later we'll leave it in. If it comes out
// the catches below need to be restructured.
}
} catch (IllegalArgumentException e) {
// path doesn't exist (must have been removed)
writeKeyValue(json, "warning", "(path gone)", false);
} catch (KeeperException e) {
writeKeyValue(json, "warning", e.toString(), false);
log.warn("Keeper Exception", e);
} catch (InterruptedException e) {
writeKeyValue(json, "warning", e.toString(), false);
log.warn("InterruptedException", e);
}
if (stat.getNumChildren() > 0) {
json.writeValueSeparator();
if (indent) {
json.indent();
}
json.writeString("children");
json.writeNameSeparator();
json.startArray();
try {
List<String> children = zkClient.getChildren(path, null, true);
java.util.Collections.sort(children);
boolean first = true;
for (String child : children) {
if (!first) {
json.writeValueSeparator();
}
String childPath = path + (path.endsWith("/") ? "" : "/") + child;
if (!printTree(json, childPath)) {
return false;
}
first = false;
}
} catch (KeeperException e) {
writeError(500, e.toString());
return false;
} catch (InterruptedException e) {
writeError(500, e.toString());
return false;
} catch (IllegalArgumentException e) {
// path doesn't exist (must have been removed)
json.writeString("(children gone)");
}
json.endArray();
}
json.endObject();
return true;
}
// in core/src/java/org/apache/solr/servlet/ZookeeperInfoServlet.java
boolean printZnode(JSONWriter json, String path) throws IOException {
try {
Stat stat = new Stat();
// Trickily, the call to zkClient.getData fills in the stat variable
byte[] data = zkClient.getData(path, null, stat, true);
json.writeString("znode");
json.writeNameSeparator();
json.startObject();
writeKeyValue(json, "path", path, true);
json.writeValueSeparator();
json.writeString("prop");
json.writeNameSeparator();
json.startObject();
writeKeyValue(json, "version", stat.getVersion(), true);
writeKeyValue(json, "aversion", stat.getAversion(), false);
writeKeyValue(json, "children_count", stat.getNumChildren(), false);
writeKeyValue(json, "ctime", time(stat.getCtime()), false);
writeKeyValue(json, "cversion", stat.getCversion(), false);
writeKeyValue(json, "czxid", stat.getCzxid(), false);
writeKeyValue(json, "dataLength", stat.getDataLength(), false);
writeKeyValue(json, "ephemeralOwner", stat.getEphemeralOwner(), false);
writeKeyValue(json, "mtime", time(stat.getMtime()), false);
writeKeyValue(json, "mzxid", stat.getMzxid(), false);
writeKeyValue(json, "pzxid", stat.getPzxid(), false);
json.endObject();
if (data != null) {
writeKeyValue(json, "data", new BytesRef(data).utf8ToString(), false);
}
json.endObject();
} catch (KeeperException e) {
writeError(500, e.toString());
return false;
} catch (InterruptedException e) {
writeError(500, e.toString());
return false;
}
return true;
}
// in core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
public InputStream getStream() throws IOException {
return req.getInputStream();
}
// in core/src/java/org/apache/solr/servlet/SolrRequestParsers.java
public InputStream getStream() throws IOException {
return item.getInputStream();
}
// in core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java
Override
public void doGet(HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
response.setCharacterEncoding("UTF-8");
response.setContentType("text/html");
PrintWriter out = response.getWriter();
InputStream in = getServletContext().getResourceAsStream("/admin.html");
if(in != null) {
try {
// This attribute is set by the SolrDispatchFilter
CoreContainer cores = (CoreContainer) request.getAttribute("org.apache.solr.CoreContainer");
String html = IOUtils.toString(in, "UTF-8");
String[] search = new String[] {
"${contextPath}",
"${adminPath}"
};
String[] replace = new String[] {
StringEscapeUtils.escapeJavaScript(request.getContextPath()),
StringEscapeUtils.escapeJavaScript(cores.getAdminPath())
};
out.println( StringUtils.replaceEach(html, search, replace) );
} finally {
IOUtils.closeQuietly(in);
}
} else {
out.println("solr");
}
}
// in core/src/java/org/apache/solr/servlet/LoadAdminUiServlet.java
Override
public void doPost(HttpServletRequest request,
HttpServletResponse response)
throws IOException, ServletException {
doGet(request, response);
}
// in core/src/java/org/apache/solr/servlet/RedirectServlet.java
public void doGet(HttpServletRequest req, HttpServletResponse res)
throws ServletException,IOException {
res.setStatus(code);
res.setHeader("Location", destination);
}
// in core/src/java/org/apache/solr/servlet/RedirectServlet.java
public void doPost(HttpServletRequest req, HttpServletResponse res)
throws ServletException,IOException {
doGet(req,res);
}
// in core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java
public static void sendNotModified(HttpServletResponse res)
throws IOException {
res.setStatus(HttpServletResponse.SC_NOT_MODIFIED);
}
// in core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java
public static void sendPreconditionFailed(HttpServletResponse res)
throws IOException {
res.setStatus(HttpServletResponse.SC_PRECONDITION_FAILED);
}
// in core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java
public static boolean doCacheHeaderValidation(final SolrQueryRequest solrReq,
final HttpServletRequest req,
final Method reqMethod,
final HttpServletResponse resp)
throws IOException {
if (Method.POST==reqMethod || Method.OTHER==reqMethod) {
return false;
}
final long lastMod = HttpCacheHeaderUtil.calcLastModified(solrReq);
final String etag = HttpCacheHeaderUtil.calcEtag(solrReq);
resp.setDateHeader("Last-Modified", lastMod);
resp.setHeader("ETag", etag);
if (checkETagValidators(req, resp, reqMethod, etag)) {
return true;
}
if (checkLastModValidators(req, resp, lastMod)) {
return true;
}
return false;
}
// in core/src/java/org/apache/solr/servlet/cache/HttpCacheHeaderUtil.java
public static boolean checkLastModValidators(final HttpServletRequest req,
final HttpServletResponse resp,
final long lastMod)
throws IOException {
try {
// First check for If-Modified-Since because this is the common
// used header by HTTP clients
final long modifiedSince = req.getDateHeader("If-Modified-Since");
if (modifiedSince != -1L && lastMod <= modifiedSince) {
// Send a "not-modified"
sendNotModified(resp);
return true;
}
final long unmodifiedSince = req.getDateHeader("If-Unmodified-Since");
if (unmodifiedSince != -1L && lastMod > unmodifiedSince) {
// Send a "precondition failed"
sendPreconditionFailed(resp);
return true;
}
} catch (IllegalArgumentException iae) {
// one of our date headers was not formated properly, ignore it
/* NOOP */
}
return false;
}
// in core/src/java/org/apache/solr/schema/BCDIntField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeInt(name,toExternal(f));
}
// in core/src/java/org/apache/solr/schema/SortableIntField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String sval = f.stringValue();
writer.writeInt(name, NumberUtils.SortableStr2int(sval,0,sval.length()));
}
// in core/src/java/org/apache/solr/schema/SortableIntField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final int def = defVal;
return new DocTermsIndexDocValues(this, readerContext, field) {
private final BytesRef spare = new BytesRef();
@Override
protected String toTerm(String readableValue) {
return NumberUtils.int2sortableStr(readableValue);
}
@Override
public float floatVal(int doc) {
return (float)intVal(doc);
}
@Override
public int intVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? def : NumberUtils.SortableStr2int(termsIndex.lookup(ord, spare),0,3);
}
@Override
public long longVal(int doc) {
return (long)intVal(doc);
}
@Override
public double doubleVal(int doc) {
return (double)intVal(doc);
}
@Override
public String strVal(int doc) {
return Integer.toString(intVal(doc));
}
@Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
@Override
public Object objectVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? null : NumberUtils.SortableStr2int(termsIndex.lookup(ord, spare));
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueInt mval = new MutableValueInt();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
mval.value = def;
mval.exists = false;
} else {
mval.value = NumberUtils.SortableStr2int(termsIndex.lookup(ord, spare),0,3);
mval.exists = true;
}
}
};
}
};
}
// in core/src/java/org/apache/solr/schema/StrField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
// in core/src/java/org/apache/solr/schema/DoubleField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
double val = Double.parseDouble(s);
writer.writeDouble(name, val);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException { }
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public FieldComparator<Integer> newComparator(final String fieldname, final int numHits, int sortPos, boolean reversed) throws IOException {
return new FieldComparator<Integer>() {
int seed;
private final int[] values = new int[numHits];
int bottomVal;
@Override
public int compare(int slot1, int slot2) {
return values[slot1] - values[slot2]; // values will be positive... no overflow possible.
}
@Override
public void setBottom(int slot) {
bottomVal = values[slot];
}
@Override
public int compareBottom(int doc) throws IOException {
return bottomVal - hash(doc+seed);
}
@Override
public void copy(int slot, int doc) throws IOException {
values[slot] = hash(doc+seed);
}
@Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
seed = getSeed(fieldname, context);
return this;
}
@Override
public Integer value(int slot) {
return values[slot];
}
@Override
public int compareDocToValue(int doc, Integer valueObj) {
// values will be positive... no overflow possible.
return hash(doc+seed) - valueObj.intValue();
}
};
}
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public int compareBottom(int doc) throws IOException {
return bottomVal - hash(doc+seed);
}
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public void copy(int slot, int doc) throws IOException {
values[slot] = hash(doc+seed);
}
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
seed = getSeed(fieldname, context);
return this;
}
// in core/src/java/org/apache/solr/schema/RandomSortField.java
Override
public FunctionValues getValues(Map context, final AtomicReaderContext readerContext) throws IOException {
return new IntDocValues(this) {
private final int seed = getSeed(field, readerContext);
@Override
public int intVal(int doc) {
return hash(doc+seed);
}
};
}
// in core/src/java/org/apache/solr/schema/DateField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeDate(name, toExternal(f));
}
// in core/src/java/org/apache/solr/schema/DateField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
return new DocTermsIndexDocValues(this, readerContext, field) {
@Override
protected String toTerm(String readableValue) {
// needed for frange queries to work properly
return ft.toInternal(readableValue);
}
@Override
public float floatVal(int doc) {
return (float)intVal(doc);
}
@Override
public int intVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord;
}
@Override
public long longVal(int doc) {
return (long)intVal(doc);
}
@Override
public double doubleVal(int doc) {
return (double)intVal(doc);
}
@Override
public String strVal(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
return null;
} else {
final BytesRef br = termsIndex.lookup(ord, spare);
return ft.indexedToReadable(br, spareChars).toString();
}
}
@Override
public Object objectVal(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
return null;
} else {
final BytesRef br = termsIndex.lookup(ord, new BytesRef());
return ft.toObject(null, br);
}
}
@Override
public String toString(int doc) {
return description() + '=' + intVal(doc);
}
};
}
// in core/src/java/org/apache/solr/schema/CurrencyField.java
public void write(XMLWriter xmlWriter, String name, IndexableField field) throws IOException {
xmlWriter.writeStr(name, field.stringValue(), false);
}
// in core/src/java/org/apache/solr/schema/CurrencyField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField field) throws IOException {
writer.writeStr(name, field.stringValue(), false);
}
// in core/src/java/org/apache/solr/schema/CurrencyField.java
public FunctionValues getValues(Map context, AtomicReaderContext reader) throws IOException {
final FunctionValues amounts = amountValues.getValues(context, reader);
final FunctionValues currencies = currencyValues.getValues(context, reader);
return new FunctionValues() {
private final int MAX_CURRENCIES_TO_CACHE = 256;
private final int[] fractionDigitCache = new int[MAX_CURRENCIES_TO_CACHE];
private final String[] currencyOrdToCurrencyCache = new String[MAX_CURRENCIES_TO_CACHE];
private final double[] exchangeRateCache = new double[MAX_CURRENCIES_TO_CACHE];
private int targetFractionDigits = -1;
private int targetCurrencyOrd = -1;
private boolean initializedCache;
private String getDocCurrencyCode(int doc, int currencyOrd) {
if (currencyOrd < MAX_CURRENCIES_TO_CACHE) {
String currency = currencyOrdToCurrencyCache[currencyOrd];
if (currency == null) {
currencyOrdToCurrencyCache[currencyOrd] = currency = currencies.strVal(doc);
}
if (currency == null) {
currency = defaultCurrency;
}
if (targetCurrencyOrd == -1 && currency.equals(targetCurrencyCode)) {
targetCurrencyOrd = currencyOrd;
}
return currency;
} else {
return currencies.strVal(doc);
}
}
public long longVal(int doc) {
if (!initializedCache) {
for (int i = 0; i < fractionDigitCache.length; i++) {
fractionDigitCache[i] = -1;
}
initializedCache = true;
}
long amount = amounts.longVal(doc);
int currencyOrd = currencies.ordVal(doc);
if (currencyOrd == targetCurrencyOrd) {
return amount;
}
double exchangeRate;
int sourceFractionDigits;
if (targetFractionDigits == -1) {
targetFractionDigits = Currency.getInstance(targetCurrencyCode).getDefaultFractionDigits();
}
if (currencyOrd < MAX_CURRENCIES_TO_CACHE) {
exchangeRate = exchangeRateCache[currencyOrd];
if (exchangeRate <= 0.0) {
String sourceCurrencyCode = getDocCurrencyCode(doc, currencyOrd);
exchangeRate = exchangeRateCache[currencyOrd] = provider.getExchangeRate(sourceCurrencyCode, targetCurrencyCode);
}
sourceFractionDigits = fractionDigitCache[currencyOrd];
if (sourceFractionDigits == -1) {
String sourceCurrencyCode = getDocCurrencyCode(doc, currencyOrd);
sourceFractionDigits = fractionDigitCache[currencyOrd] = Currency.getInstance(sourceCurrencyCode).getDefaultFractionDigits();
}
} else {
String sourceCurrencyCode = getDocCurrencyCode(doc, currencyOrd);
exchangeRate = provider.getExchangeRate(sourceCurrencyCode, targetCurrencyCode);
sourceFractionDigits = Currency.getInstance(sourceCurrencyCode).getDefaultFractionDigits();
}
return CurrencyValue.convertAmount(exchangeRate, sourceFractionDigits, amount, targetFractionDigits);
}
public int intVal(int doc) {
return (int) longVal(doc);
}
public double doubleVal(int doc) {
return (double) longVal(doc);
}
public float floatVal(int doc) {
return (float) longVal(doc);
}
public String strVal(int doc) {
return Long.toString(longVal(doc));
}
public String toString(int doc) {
return name() + '(' + amounts.toString(doc) + ',' + currencies.toString(doc) + ')';
}
};
}
// in core/src/java/org/apache/solr/schema/SortableFloatField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String sval = f.stringValue();
writer.writeFloat(name, NumberUtils.SortableStr2float(sval));
}
// in core/src/java/org/apache/solr/schema/SortableFloatField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final float def = defVal;
return new DocTermsIndexDocValues(this, readerContext, field) {
private final BytesRef spare = new BytesRef();
@Override
protected String toTerm(String readableValue) {
return NumberUtils.float2sortableStr(readableValue);
}
@Override
public float floatVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? def : NumberUtils.SortableStr2float(termsIndex.lookup(ord, spare));
}
@Override
public int intVal(int doc) {
return (int)floatVal(doc);
}
@Override
public long longVal(int doc) {
return (long)floatVal(doc);
}
@Override
public double doubleVal(int doc) {
return (double)floatVal(doc);
}
@Override
public String strVal(int doc) {
return Float.toString(floatVal(doc));
}
@Override
public String toString(int doc) {
return description() + '=' + floatVal(doc);
}
@Override
public Object objectVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? null : NumberUtils.SortableStr2float(termsIndex.lookup(ord, spare));
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueFloat mval = new MutableValueFloat();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
mval.value = def;
mval.exists = false;
} else {
mval.value = NumberUtils.SortableStr2float(termsIndex.lookup(ord, spare));
mval.exists = true;
}
}
};
}
};
}
// in core/src/java/org/apache/solr/schema/IntField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
int val = Integer.parseInt(s);
writer.writeInt(name, val);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/FloatField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
float fval = Float.parseFloat(s);
writer.writeFloat(name, fval);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/TrieField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeVal(name, toObject(f));
}
// in core/src/java/org/apache/solr/schema/BoolField.java
Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer tokenizer = new Tokenizer(reader) {
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
boolean done = false;
@Override
public void reset(Reader input) throws IOException {
done = false;
super.reset(input);
}
@Override
public boolean incrementToken() throws IOException {
clearAttributes();
if (done) return false;
done = true;
int ch = input.read();
if (ch==-1) return false;
termAtt.copyBuffer(
((ch=='t' || ch=='T' || ch=='1') ? TRUE_TOKEN : FALSE_TOKEN)
,0,1);
return true;
}
};
return new TokenStreamComponents(tokenizer);
}
// in core/src/java/org/apache/solr/schema/BoolField.java
Override
public void reset(Reader input) throws IOException {
done = false;
super.reset(input);
}
// in core/src/java/org/apache/solr/schema/BoolField.java
Override
public boolean incrementToken() throws IOException {
clearAttributes();
if (done) return false;
done = true;
int ch = input.read();
if (ch==-1) return false;
termAtt.copyBuffer(
((ch=='t' || ch=='T' || ch=='1') ? TRUE_TOKEN : FALSE_TOKEN)
,0,1);
return true;
}
// in core/src/java/org/apache/solr/schema/BoolField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeBool(name, f.stringValue().charAt(0) == 'T');
}
// in core/src/java/org/apache/solr/schema/BoolField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FieldCache.DocTermsIndex sindex = FieldCache.DEFAULT.getTermsIndex(readerContext.reader(), field);
// figure out what ord maps to true
int nord = sindex.numOrd();
BytesRef br = new BytesRef();
int tord = -1;
for (int i=1; i<nord; i++) {
sindex.lookup(i, br);
if (br.length==1 && br.bytes[br.offset]=='T') {
tord = i;
break;
}
}
final int trueOrd = tord;
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return sindex.getOrd(doc) == trueOrd;
}
@Override
public boolean exists(int doc) {
return sindex.getOrd(doc) != 0;
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueBool mval = new MutableValueBool();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord = sindex.getOrd(doc);
mval.value = (ord == trueOrd);
mval.exists = (ord != 0);
}
};
}
};
}
// in core/src/java/org/apache/solr/schema/BinaryField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, toBase64String(toObject(f)), false);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), false);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public Query rewrite(IndexReader reader) throws IOException {
return bboxQuery != null ? bboxQuery.rewrite(reader) : this;
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public float getValueForNormalization() throws IOException {
queryWeight = getBoost();
return queryWeight * queryWeight;
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
return new SpatialScorer(context, acceptDocs, this, queryWeight);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
return ((SpatialScorer)scorer(context, true, true, context.reader().getLiveDocs())).explain(doc);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public int nextDoc() throws IOException {
for(;;) {
++doc;
if (doc>=maxDoc) {
return doc=NO_MORE_DOCS;
}
if (acceptDocs != null && !acceptDocs.get(doc)) continue;
if (!match()) continue;
return doc;
}
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public int advance(int target) throws IOException {
// this will work even if target==NO_MORE_DOCS
doc=target-1;
return nextDoc();
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public float score() throws IOException {
double dist = (doc == lastDistDoc) ? lastDist : dist(latVals.doubleVal(doc), lonVals.doubleVal(doc));
return (float)(dist * qWeight);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
public Explanation explain(int doc) throws IOException {
advance(doc);
boolean matched = this.doc == doc;
this.doc = doc;
float sc = matched ? score() : 0;
double dist = dist(latVals.doubleVal(doc), lonVals.doubleVal(doc));
String description = SpatialDistanceQuery.this.toString();
Explanation result = new ComplexExplanation
(this.doc == doc, sc, description + " product of:");
// result.addDetail(new Explanation((float)dist, "hsin("+latVals.explain(doc)+","+lonVals.explain(doc)));
result.addDetail(new Explanation((float)dist, "hsin("+latVals.doubleVal(doc)+","+lonVals.doubleVal(doc)));
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(weight.queryNorm,"queryNorm"));
return result;
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public void collect(int doc) throws IOException {
spatialScorer.doc = doc;
if (spatialScorer.match()) delegate.collect(doc);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
maxdoc = context.reader().maxDoc();
spatialScorer = new SpatialScorer(context, null, weight, 1.0f);
super.setNextReader(context);
}
// in core/src/java/org/apache/solr/schema/LatLonType.java
Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
// if we were supposed to use bboxQuery, then we should have been rewritten using that query
assert bboxQuery == null;
return new SpatialWeight(searcher);
}
// in core/src/java/org/apache/solr/schema/ByteField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
byte val = Byte.parseByte(s);
writer.writeInt(name, val);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/PointType.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), false);
}
// in core/src/java/org/apache/solr/schema/UUIDField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f)
throws IOException {
writer.writeStr(name, f.stringValue(), false);
}
// in core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java
Override
public ParseResult parse(Reader reader, AttributeSource parent) throws IOException {
ParseResult res = new ParseResult();
StringBuilder sb = new StringBuilder();
char[] buf = new char[128];
int cnt;
while ((cnt = reader.read(buf)) > 0) {
sb.append(buf, 0, cnt);
}
String val = sb.toString();
// empty string - accept even without version number
if (val.length() == 0) {
return res;
}
// first consume the version
int idx = val.indexOf(' ');
if (idx == -1) {
throw new IOException("Missing VERSION token");
}
String version = val.substring(0, idx);
if (!VERSION.equals(version)) {
throw new IOException("Unknown VERSION " + version);
}
val = val.substring(idx + 1);
// then consume the optional stored part
int tsStart = 0;
boolean hasStored = false;
StringBuilder storedBuf = new StringBuilder();
if (val.charAt(0) == '=') {
hasStored = true;
if (val.length() > 1) {
for (int i = 1; i < val.length(); i++) {
char c = val.charAt(i);
if (c == '\\') {
if (i < val.length() - 1) {
c = val.charAt(++i);
if (c == '=') { // we recognize only \= escape in the stored part
storedBuf.append('=');
} else {
storedBuf.append('\\');
storedBuf.append(c);
continue;
}
} else {
storedBuf.append(c);
continue;
}
} else if (c == '=') {
// end of stored text
tsStart = i + 1;
break;
} else {
storedBuf.append(c);
}
}
if (tsStart == 0) { // missing end-of-stored marker
throw new IOException("Missing end marker of stored part");
}
} else {
throw new IOException("Unexpected end of stored field");
}
}
if (hasStored) {
res.str = storedBuf.toString();
}
Tok tok = new Tok();
StringBuilder attName = new StringBuilder();
StringBuilder attVal = new StringBuilder();
// parser state
S s = S.UNDEF;
int lastPos = 0;
for (int i = tsStart; i < val.length(); i++) {
char c = val.charAt(i);
if (c == ' ') {
// collect leftovers
switch (s) {
case VALUE :
if (attVal.length() == 0) {
throw new IOException("Unexpected character '" + c + "' at position " + i + " - empty value of attribute.");
}
if (attName.length() > 0) {
tok.attr.put(attName.toString(), attVal.toString());
}
break;
case NAME: // attr name without a value ?
if (attName.length() > 0) {
throw new IOException("Unexpected character '" + c + "' at position " + i + " - missing attribute value.");
} else {
// accept missing att name and value
}
break;
case TOKEN:
case UNDEF:
// do nothing, advance to next token
}
attName.setLength(0);
attVal.setLength(0);
if (!tok.isEmpty() || s == S.NAME) {
AttributeSource.State state = createState(parent, tok, lastPos);
if (state != null) res.states.add(state.clone());
}
// reset tok
s = S.UNDEF;
tok.reset();
// skip
lastPos++;
continue;
}
StringBuilder tgt = null;
switch (s) {
case TOKEN:
tgt = tok.token;
break;
case NAME:
tgt = attName;
break;
case VALUE:
tgt = attVal;
break;
case UNDEF:
tgt = tok.token;
s = S.TOKEN;
}
if (c == '\\') {
if (s == S.TOKEN) lastPos++;
if (i >= val.length() - 1) { // end
tgt.append(c);
continue;
} else {
c = val.charAt(++i);
switch (c) {
case '\\' :
case '=' :
case ',' :
case ' ' :
tgt.append(c);
break;
case 'n':
tgt.append('\n');
break;
case 'r':
tgt.append('\r');
break;
case 't':
tgt.append('\t');
break;
default:
tgt.append('\\');
tgt.append(c);
lastPos++;
}
}
} else {
// state switch
if (c == ',') {
if (s == S.TOKEN) {
s = S.NAME;
} else if (s == S.VALUE) { // end of value, start of next attr
if (attVal.length() == 0) {
throw new IOException("Unexpected character '" + c + "' at position " + i + " - empty value of attribute.");
}
if (attName.length() > 0 && attVal.length() > 0) {
tok.attr.put(attName.toString(), attVal.toString());
}
// reset
attName.setLength(0);
attVal.setLength(0);
s = S.NAME;
} else {
throw new IOException("Unexpected character '" + c + "' at position " + i + " - missing attribute value.");
}
} else if (c == '=') {
if (s == S.NAME) {
s = S.VALUE;
} else {
throw new IOException("Unexpected character '" + c + "' at position " + i + " - empty value of attribute.");
}
} else {
tgt.append(c);
if (s == S.TOKEN) lastPos++;
}
}
}
// collect leftovers
if (!tok.isEmpty() || s == S.NAME || s == S.VALUE) {
// remaining attrib?
if (s == S.VALUE) {
if (attName.length() > 0 && attVal.length() > 0) {
tok.attr.put(attName.toString(), attVal.toString());
}
}
AttributeSource.State state = createState(parent, tok, lastPos);
if (state != null) res.states.add(state.clone());
}
return res;
}
// in core/src/java/org/apache/solr/schema/SimplePreAnalyzedParser.java
public String toFormattedString(Field f) throws IOException {
StringBuilder sb = new StringBuilder();
sb.append(VERSION + " ");
if (f.fieldType().stored()) {
String s = f.stringValue();
if (s != null) {
// encode the equals sign
s = s.replaceAll("=", "\\=");
sb.append('=');
sb.append(s);
sb.append('=');
}
}
TokenStream ts = f.tokenStreamValue();
if (ts != null) {
StringBuilder tok = new StringBuilder();
boolean next = false;
while (ts.incrementToken()) {
if (next) {
sb.append(' ');
} else {
next = true;
}
tok.setLength(0);
Iterator<Class<? extends Attribute>> it = ts.getAttributeClassesIterator();
String cTerm = null;
String tTerm = null;
while (it.hasNext()) {
Class<? extends Attribute> cl = it.next();
if (!ts.hasAttribute(cl)) {
continue;
}
Attribute att = ts.getAttribute(cl);
if (cl.isAssignableFrom(CharTermAttribute.class)) {
CharTermAttribute catt = (CharTermAttribute)att;
cTerm = escape(catt.buffer(), catt.length());
} else if (cl.isAssignableFrom(TermToBytesRefAttribute.class)) {
TermToBytesRefAttribute tatt = (TermToBytesRefAttribute)att;
char[] tTermChars = tatt.getBytesRef().utf8ToString().toCharArray();
tTerm = escape(tTermChars, tTermChars.length);
} else {
if (tok.length() > 0) tok.append(',');
if (cl.isAssignableFrom(FlagsAttribute.class)) {
tok.append("f=" + Integer.toHexString(((FlagsAttribute)att).getFlags()));
} else if (cl.isAssignableFrom(OffsetAttribute.class)) {
tok.append("s=" + ((OffsetAttribute)att).startOffset() + ",e=" + ((OffsetAttribute)att).endOffset());
} else if (cl.isAssignableFrom(PayloadAttribute.class)) {
Payload p = ((PayloadAttribute)att).getPayload();
if (p != null && p.length() > 0) {
tok.append("p=" + bytesToHex(p.getData(), p.getOffset(), p.length()));
} else if (tok.length() > 0) {
tok.setLength(tok.length() - 1); // remove the last comma
}
} else if (cl.isAssignableFrom(PositionIncrementAttribute.class)) {
tok.append("i=" + ((PositionIncrementAttribute)att).getPositionIncrement());
} else if (cl.isAssignableFrom(TypeAttribute.class)) {
tok.append("y=" + escape(((TypeAttribute)att).type()));
} else {
tok.append(cl.getName() + "=" + escape(att.toString()));
}
}
}
String term = null;
if (cTerm != null) {
term = cTerm;
} else {
term = tTerm;
}
if (term != null && term.length() > 0) {
if (tok.length() > 0) {
tok.insert(0, term + ",");
} else {
tok.insert(0, term);
}
}
sb.append(tok);
}
}
return sb.toString();
}
// in core/src/java/org/apache/solr/schema/LongField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
long val = Long.parseLong(s);
writer.writeLong(name, val);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/GeoHashField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f)
throws IOException {
writer.writeStr(name, toExternal(f), false);
}
// in core/src/java/org/apache/solr/schema/ExternalFileField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
throw new UnsupportedOperationException();
}
// in core/src/java/org/apache/solr/schema/ShortField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String s = f.stringValue();
// these values may be from a legacy lucene index, which may
// not be properly formatted in some output formats, or may
// incorrectly have a zero length.
if (s.length()==0) {
// zero length value means someone mistakenly indexed the value
// instead of simply leaving it out. Write a null value instead of a numeric.
writer.writeNull(name);
return;
}
try {
short val = Short.parseShort(s);
writer.writeInt(name, val);
} catch (NumberFormatException e){
// can't parse - write out the contents as a string so nothing is lost and
// clients don't get a parse error.
writer.writeStr(name, s, true);
}
}
// in core/src/java/org/apache/solr/schema/SchemaField.java
public void write(TextResponseWriter writer, String name, IndexableField val) throws IOException {
// name is passed in because it may be null if name should not be used.
type.write(writer,name,val);
}
// in core/src/java/org/apache/solr/schema/StrFieldSource.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
return new DocTermsIndexDocValues(this, readerContext, field) {
@Override
protected String toTerm(String readableValue) {
return readableValue;
}
@Override
public int ordVal(int doc) {
return termsIndex.getOrd(doc);
}
@Override
public int numOrd() {
return termsIndex.numOrd();
}
@Override
public Object objectVal(int doc) {
return strVal(doc);
}
@Override
public String toString(int doc) {
return description() + '=' + strVal(doc);
}
};
}
// in core/src/java/org/apache/solr/schema/PreAnalyzedField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f)
throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
// in core/src/java/org/apache/solr/schema/PreAnalyzedField.java
public String toFormattedString(Field f) throws IOException {
return parser.toFormattedString(f);
}
// in core/src/java/org/apache/solr/schema/PreAnalyzedField.java
public final boolean incrementToken() throws IOException {
// lazy init the iterator
if (it == null) {
it = cachedStates.iterator();
}
if (!it.hasNext()) {
return false;
}
AttributeSource.State state = (State) it.next();
restoreState(state.clone());
return true;
}
// in core/src/java/org/apache/solr/schema/PreAnalyzedField.java
Override
public void reset(Reader input) throws IOException {
super.reset(input);
cachedStates.clear();
stringValue = null;
binaryValue = null;
ParseResult res = parser.parse(input, this);
if (res != null) {
stringValue = res.str;
binaryValue = res.bin;
if (res.states != null) {
cachedStates.addAll(res.states);
}
}
}
// in core/src/java/org/apache/solr/schema/TextField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
// in core/src/java/org/apache/solr/schema/TrieDateField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
wrappedField.write(writer, name, f);
}
// in core/src/java/org/apache/solr/schema/SortableLongField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String sval = f.stringValue();
writer.writeLong(name, NumberUtils.SortableStr2long(sval,0,sval.length()));
}
// in core/src/java/org/apache/solr/schema/SortableLongField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final long def = defVal;
return new DocTermsIndexDocValues(this, readerContext, field) {
private final BytesRef spare = new BytesRef();
@Override
protected String toTerm(String readableValue) {
return NumberUtils.long2sortableStr(readableValue);
}
@Override
public float floatVal(int doc) {
return (float)longVal(doc);
}
@Override
public int intVal(int doc) {
return (int)longVal(doc);
}
@Override
public long longVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? def : NumberUtils.SortableStr2long(termsIndex.lookup(ord, spare),0,5);
}
@Override
public double doubleVal(int doc) {
return (double)longVal(doc);
}
@Override
public String strVal(int doc) {
return Long.toString(longVal(doc));
}
@Override
public Object objectVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? null : NumberUtils.SortableStr2long(termsIndex.lookup(ord, spare));
}
@Override
public String toString(int doc) {
return description() + '=' + longVal(doc);
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueLong mval = new MutableValueLong();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
mval.value = def;
mval.exists = false;
} else {
mval.value = NumberUtils.SortableStr2long(termsIndex.lookup(ord, spare),0,5);
mval.exists = true;
}
}
};
}
};
}
// in core/src/java/org/apache/solr/schema/SortableDoubleField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
String sval = f.stringValue();
writer.writeDouble(name, NumberUtils.SortableStr2double(sval));
}
// in core/src/java/org/apache/solr/schema/SortableDoubleField.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final double def = defVal;
return new DocTermsIndexDocValues(this, readerContext, field) {
private final BytesRef spare = new BytesRef();
@Override
protected String toTerm(String readableValue) {
return NumberUtils.double2sortableStr(readableValue);
}
@Override
public float floatVal(int doc) {
return (float)doubleVal(doc);
}
@Override
public int intVal(int doc) {
return (int)doubleVal(doc);
}
@Override
public long longVal(int doc) {
return (long)doubleVal(doc);
}
@Override
public double doubleVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? def : NumberUtils.SortableStr2double(termsIndex.lookup(ord, spare));
}
@Override
public String strVal(int doc) {
return Double.toString(doubleVal(doc));
}
@Override
public Object objectVal(int doc) {
int ord=termsIndex.getOrd(doc);
return ord==0 ? null : NumberUtils.SortableStr2double(termsIndex.lookup(ord, spare));
}
@Override
public String toString(int doc) {
return description() + '=' + doubleVal(doc);
}
@Override
public ValueFiller getValueFiller() {
return new ValueFiller() {
private final MutableValueDouble mval = new MutableValueDouble();
@Override
public MutableValue getValue() {
return mval;
}
@Override
public void fillValue(int doc) {
int ord=termsIndex.getOrd(doc);
if (ord == 0) {
mval.value = def;
mval.exists = false;
} else {
mval.value = NumberUtils.SortableStr2double(termsIndex.lookup(ord, spare));
mval.exists = true;
}
}
};
}
};
}
// in core/src/java/org/apache/solr/schema/CollationField.java
Override
public void write(TextResponseWriter writer, String name, IndexableField f) throws IOException {
writer.writeStr(name, f.stringValue(), true);
}
// in core/src/java/org/apache/solr/schema/JsonPreAnalyzedParser.java
Override
public String toFormattedString(Field f) throws IOException {
Map<String,Object> map = new HashMap<String,Object>();
map.put(VERSION_KEY, VERSION);
if (f.fieldType().stored()) {
String stringValue = f.stringValue();
if (stringValue != null) {
map.put(STRING_KEY, stringValue);
}
BytesRef binaryValue = f.binaryValue();
if (binaryValue != null) {
map.put(BINARY_KEY, Base64.byteArrayToBase64(binaryValue.bytes, binaryValue.offset, binaryValue.length));
}
}
TokenStream ts = f.tokenStreamValue();
if (ts != null) {
List<Map<String,Object>> tokens = new LinkedList<Map<String,Object>>();
while (ts.incrementToken()) {
Iterator<Class<? extends Attribute>> it = ts.getAttributeClassesIterator();
String cTerm = null;
String tTerm = null;
Map<String,Object> tok = new TreeMap<String,Object>();
while (it.hasNext()) {
Class<? extends Attribute> cl = it.next();
if (!ts.hasAttribute(cl)) {
continue;
}
Attribute att = ts.getAttribute(cl);
if (cl.isAssignableFrom(CharTermAttribute.class)) {
CharTermAttribute catt = (CharTermAttribute)att;
cTerm = new String(catt.buffer(), 0, catt.length());
} else if (cl.isAssignableFrom(TermToBytesRefAttribute.class)) {
TermToBytesRefAttribute tatt = (TermToBytesRefAttribute)att;
tTerm = tatt.getBytesRef().utf8ToString();
} else {
if (cl.isAssignableFrom(FlagsAttribute.class)) {
tok.put(FLAGS_KEY, Integer.toHexString(((FlagsAttribute)att).getFlags()));
} else if (cl.isAssignableFrom(OffsetAttribute.class)) {
tok.put(OFFSET_START_KEY, ((OffsetAttribute)att).startOffset());
tok.put(OFFSET_END_KEY, ((OffsetAttribute)att).endOffset());
} else if (cl.isAssignableFrom(PayloadAttribute.class)) {
Payload p = ((PayloadAttribute)att).getPayload();
if (p != null && p.length() > 0) {
tok.put(PAYLOAD_KEY, Base64.byteArrayToBase64(p.getData(), p.getOffset(), p.length()));
}
} else if (cl.isAssignableFrom(PositionIncrementAttribute.class)) {
tok.put(POSINCR_KEY, ((PositionIncrementAttribute)att).getPositionIncrement());
} else if (cl.isAssignableFrom(TypeAttribute.class)) {
tok.put(TYPE_KEY, ((TypeAttribute)att).type());
} else {
tok.put(cl.getName(), att.toString());
}
}
}
String term = null;
if (cTerm != null) {
term = cTerm;
} else {
term = tTerm;
}
if (term != null && term.length() > 0) {
tok.put(TOKEN_KEY, term);
}
tokens.add(tok);
}
map.put(TOKENS_KEY, tokens);
}
return JSONUtil.toJSON(map, -1);
}
// in core/src/java/org/apache/solr/schema/FieldType.java
Override
public TokenStreamComponents createComponents(String fieldName, Reader reader) {
Tokenizer ts = new Tokenizer(reader) {
final char[] cbuf = new char[maxChars];
final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
@Override
public boolean incrementToken() throws IOException {
clearAttributes();
int n = input.read(cbuf,0,maxChars);
if (n<=0) return false;
String s = toInternal(new String(cbuf,0,n));
termAtt.setEmpty().append(s);
offsetAtt.setOffset(correctOffset(0),correctOffset(n));
return true;
}
};
return new TokenStreamComponents(ts);
}
// in core/src/java/org/apache/solr/schema/FieldType.java
Override
public boolean incrementToken() throws IOException {
clearAttributes();
int n = input.read(cbuf,0,maxChars);
if (n<=0) return false;
String s = toInternal(new String(cbuf,0,n));
termAtt.setEmpty().append(s);
offsetAtt.setOffset(correctOffset(0),correctOffset(n));
return true;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
public String[][] getAllValues() throws IOException {
ArrayList records = new ArrayList();
String[] values;
String[][] ret = null;
while ((values = getLine()) != null) {
records.add(values);
}
if (records.size() > 0) {
ret = new String[records.size()][];
records.toArray(ret);
}
return ret;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
public String nextValue() throws IOException {
Token tkn = nextToken();
String ret = null;
switch (tkn.type) {
case TT_TOKEN:
case TT_EORECORD:
ret = tkn.content.toString();
break;
case TT_EOF:
ret = null;
break;
case TT_INVALID:
default:
// error no token available (or error)
throw new IOException(
"(line " + getLineNumber()
+ ") invalid parse sequence");
// unreachable: break;
}
return ret;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
public String[] getLine() throws IOException {
String[] ret = EMPTY_STRING_ARRAY;
record.clear();
while (true) {
reusableToken.reset();
nextToken(reusableToken);
switch (reusableToken.type) {
case TT_TOKEN:
record.add(reusableToken.content.toString());
break;
case TT_EORECORD:
record.add(reusableToken.content.toString());
break;
case TT_EOF:
if (reusableToken.isReady) {
record.add(reusableToken.content.toString());
} else {
ret = null;
}
break;
case TT_INVALID:
default:
// error: throw IOException
throw new IOException("(line " + getLineNumber() + ") invalid parse sequence");
// unreachable: break;
}
if (reusableToken.type != TT_TOKEN) {
break;
}
}
if (!record.isEmpty()) {
ret = (String[]) record.toArray(new String[record.size()]);
}
return ret;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
protected Token nextToken() throws IOException {
return nextToken(new Token());
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
protected Token nextToken(Token tkn) throws IOException {
wsBuf.clear(); // resuse
// get the last read char (required for empty line detection)
int lastChar = in.readAgain();
// read the next char and set eol
/* note: unfourtunately isEndOfLine may consumes a character silently.
* this has no effect outside of the method. so a simple workaround
* is to call 'readAgain' on the stream...
* uh: might using objects instead of base-types (jdk1.5 autoboxing!)
*/
int c = in.read();
boolean eol = isEndOfLine(c);
c = in.readAgain();
// empty line detection: eol AND (last char was EOL or beginning)
while (strategy.getIgnoreEmptyLines() && eol
&& (lastChar == '\n'
|| lastChar == ExtendedBufferedReader.UNDEFINED)
&& !isEndOfFile(lastChar)) {
// go on char ahead ...
lastChar = c;
c = in.read();
eol = isEndOfLine(c);
c = in.readAgain();
// reached end of file without any content (empty line at the end)
if (isEndOfFile(c)) {
tkn.type = TT_EOF;
return tkn;
}
}
// did we reached eof during the last iteration already ? TT_EOF
if (isEndOfFile(lastChar) || (lastChar != strategy.getDelimiter() && isEndOfFile(c))) {
tkn.type = TT_EOF;
return tkn;
}
// important: make sure a new char gets consumed in each iteration
while (!tkn.isReady && tkn.type != TT_EOF) {
// ignore whitespaces at beginning of a token
while (strategy.getIgnoreLeadingWhitespaces() && isWhitespace(c) && !eol) {
wsBuf.append((char) c);
c = in.read();
eol = isEndOfLine(c);
}
// ok, start of token reached: comment, encapsulated, or token
if (c == strategy.getCommentStart()) {
// ignore everything till end of line and continue (incr linecount)
in.readLine();
tkn = nextToken(tkn.reset());
} else if (c == strategy.getDelimiter()) {
// empty token return TT_TOKEN("")
tkn.type = TT_TOKEN;
tkn.isReady = true;
} else if (eol) {
// empty token return TT_EORECORD("")
//noop: tkn.content.append("");
tkn.type = TT_EORECORD;
tkn.isReady = true;
} else if (c == strategy.getEncapsulator()) {
// consume encapsulated token
encapsulatedTokenLexer(tkn, c);
} else if (isEndOfFile(c)) {
// end of file return TT_EOF()
//noop: tkn.content.append("");
tkn.type = TT_EOF;
tkn.isReady = true;
} else {
// next token must be a simple token
// add removed blanks when not ignoring whitespace chars...
if (!strategy.getIgnoreLeadingWhitespaces()) {
tkn.content.append(wsBuf);
}
simpleTokenLexer(tkn, c);
}
}
return tkn;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
private Token simpleTokenLexer(Token tkn, int c) throws IOException {
for (;;) {
if (isEndOfLine(c)) {
// end of record
tkn.type = TT_EORECORD;
tkn.isReady = true;
break;
} else if (isEndOfFile(c)) {
// end of file
tkn.type = TT_EOF;
tkn.isReady = true;
break;
} else if (c == strategy.getDelimiter()) {
// end of token
tkn.type = TT_TOKEN;
tkn.isReady = true;
break;
} else if (c == '\\' && strategy.getUnicodeEscapeInterpretation() && in.lookAhead() == 'u') {
// interpret unicode escaped chars (like \u0070 -> p)
tkn.content.append((char) unicodeEscapeLexer(c));
} else if (c == strategy.getEscape()) {
tkn.content.append((char)readEscape(c));
} else {
tkn.content.append((char) c);
}
c = in.read();
}
if (strategy.getIgnoreTrailingWhitespaces()) {
tkn.content.trimTrailingWhitespace();
}
return tkn;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
private Token encapsulatedTokenLexer(Token tkn, int c) throws IOException {
// save current line
int startLineNumber = getLineNumber();
// ignore the given delimiter
// assert c == delimiter;
for (;;) {
c = in.read();
if (c == '\\' && strategy.getUnicodeEscapeInterpretation() && in.lookAhead()=='u') {
tkn.content.append((char) unicodeEscapeLexer(c));
} else if (c == strategy.getEscape()) {
tkn.content.append((char)readEscape(c));
} else if (c == strategy.getEncapsulator()) {
if (in.lookAhead() == strategy.getEncapsulator()) {
// double or escaped encapsulator -> add single encapsulator to token
c = in.read();
tkn.content.append((char) c);
} else {
// token finish mark (encapsulator) reached: ignore whitespace till delimiter
for (;;) {
c = in.read();
if (c == strategy.getDelimiter()) {
tkn.type = TT_TOKEN;
tkn.isReady = true;
return tkn;
} else if (isEndOfFile(c)) {
tkn.type = TT_EOF;
tkn.isReady = true;
return tkn;
} else if (isEndOfLine(c)) {
// ok eo token reached
tkn.type = TT_EORECORD;
tkn.isReady = true;
return tkn;
} else if (!isWhitespace(c)) {
// error invalid char between token and next delimiter
throw new IOException(
"(line " + getLineNumber()
+ ") invalid char between encapsulated token end delimiter"
);
}
}
}
} else if (isEndOfFile(c)) {
// error condition (end of file before end of token)
throw new IOException(
"(startline " + startLineNumber + ")"
+ "eof reached before encapsulated token finished"
);
} else {
// consume character
tkn.content.append((char) c);
}
}
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
protected int unicodeEscapeLexer(int c) throws IOException {
int ret = 0;
// ignore 'u' (assume c==\ now) and read 4 hex digits
c = in.read();
code.clear();
try {
for (int i = 0; i < 4; i++) {
c = in.read();
if (isEndOfFile(c) || isEndOfLine(c)) {
throw new NumberFormatException("number too short");
}
code.append((char) c);
}
ret = Integer.parseInt(code.toString(), 16);
} catch (NumberFormatException e) {
throw new IOException(
"(line " + getLineNumber() + ") Wrong unicode escape sequence found '"
+ code.toString() + "'" + e.toString());
}
return ret;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
private int readEscape(int c) throws IOException {
// assume c is the escape char (normally a backslash)
c = in.read();
int out;
switch (c) {
case 'r': out='\r'; break;
case 'n': out='\n'; break;
case 't': out='\t'; break;
case 'b': out='\b'; break;
case 'f': out='\f'; break;
default : out=c;
}
return out;
}
// in core/src/java/org/apache/solr/internal/csv/CSVParser.java
private boolean isEndOfLine(int c) throws IOException {
// check if we have \r\n...
if (c == '\r') {
if (in.lookAhead() == '\n') {
// note: does not change c outside of this method !!
c = in.read();
}
}
return (c == '\n');
}
// in core/src/java/org/apache/solr/internal/csv/CSVUtils.java
public static String[][] parse(String s) throws IOException {
if (s == null) {
throw new IllegalArgumentException("Null argument not allowed.");
}
String[][] result = (new CSVParser(new StringReader(s))).getAllValues();
if (result == null) {
// since CSVStrategy ignores empty lines an empty array is returned
// (i.e. not "result = new String[][] {{""}};")
result = EMPTY_DOUBLE_STRING_ARRAY;
}
return result;
}
// in core/src/java/org/apache/solr/internal/csv/CSVUtils.java
public static String[] parseLine(String s) throws IOException {
if (s == null) {
throw new IllegalArgumentException("Null argument not allowed.");
}
// uh,jh: make sure that parseLine("").length == 0
if (s.length() == 0) {
return EMPTY_STRING_ARRAY;
}
return (new CSVParser(new StringReader(s))).getLine();
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void println() throws IOException {
out.write(strategy.getPrinterNewline());
newLine = true;
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void flush() throws IOException {
out.flush();
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void println(String[] values) throws IOException {
for (int i = 0; i < values.length; i++) {
print(values[i]);
}
println();
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void printlnComment(String comment) throws IOException {
if(this.strategy.isCommentingDisabled()) {
return;
}
if (!newLine) {
println();
}
out.write(this.strategy.getCommentStart());
out.write(' ');
for (int i = 0; i < comment.length(); i++) {
char c = comment.charAt(i);
switch (c) {
case '\r' :
if (i + 1 < comment.length() && comment.charAt(i + 1) == '\n') {
i++;
}
// break intentionally excluded.
case '\n' :
println();
out.write(this.strategy.getCommentStart());
out.write(' ');
break;
default :
out.write(c);
break;
}
}
println();
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void print(char[] value, int offset, int len, boolean checkForEscape) throws IOException {
if (!checkForEscape) {
printSep();
out.write(value, offset, len);
return;
}
if (strategy.getEncapsulator() != CSVStrategy.ENCAPSULATOR_DISABLED) {
printAndEncapsulate(value, offset, len);
} else if (strategy.getEscape() != CSVStrategy.ESCAPE_DISABLED) {
printAndEscape(value, offset, len);
} else {
printSep();
out.write(value, offset, len);
}
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
void printSep() throws IOException {
if (newLine) {
newLine = false;
} else {
out.write(this.strategy.getDelimiter());
}
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
void printAndEscape(char[] value, int offset, int len) throws IOException {
int start = offset;
int pos = offset;
int end = offset + len;
printSep();
char delim = this.strategy.getDelimiter();
char escape = this.strategy.getEscape();
while (pos < end) {
char c = value[pos];
if (c == '\r' || c=='\n' || c==delim || c==escape) {
// write out segment up until this char
int l = pos-start;
if (l>0) {
out.write(value, start, l);
}
if (c=='\n') c='n';
else if (c=='\r') c='r';
out.write(escape);
out.write(c);
start = pos+1; // start on the current char after this one
}
pos++;
}
// write last segment
int l = pos-start;
if (l>0) {
out.write(value, start, l);
}
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
void printAndEncapsulate(char[] value, int offset, int len) throws IOException {
boolean first = newLine; // is this the first value on this line?
boolean quote = false;
int start = offset;
int pos = offset;
int end = offset + len;
printSep();
char delim = this.strategy.getDelimiter();
char encapsulator = this.strategy.getEncapsulator();
if (len <= 0) {
// always quote an empty token that is the first
// on the line, as it may be the only thing on the
// line. If it were not quoted in that case,
// an empty line has no tokens.
if (first) {
quote = true;
}
} else {
char c = value[pos];
// Hmmm, where did this rule come from?
if (first
&& (c < '0'
|| (c > '9' && c < 'A')
|| (c > 'Z' && c < 'a')
|| (c > 'z'))) {
quote = true;
// } else if (c == ' ' || c == '\f' || c == '\t') {
} else if (c <= '#') {
// Some other chars at the start of a value caused the parser to fail, so for now
// encapsulate if we start in anything less than '#'. We are being conservative
// by including the default comment char too.
quote = true;
} else {
while (pos < end) {
c = value[pos];
if (c=='\n' || c=='\r' || c==encapsulator || c==delim) {
quote = true;
break;
}
pos++;
}
if (!quote) {
pos = end-1;
c = value[pos];
// if (c == ' ' || c == '\f' || c == '\t') {
// Some other chars at the end caused the parser to fail, so for now
// encapsulate if we end in anything less than ' '
if (c <= ' ') {
quote = true;
}
}
}
}
if (!quote) {
// no encapsulation needed - write out the original value
out.write(value, offset, len);
return;
}
// we hit something that needed encapsulation
out.write(encapsulator);
// Pick up where we left off: pos should be positioned on the first character that caused
// the need for encapsulation.
while (pos<end) {
char c = value[pos];
if (c==encapsulator) {
// write out the chunk up until this point
// add 1 to the length to write out the encapsulator also
out.write(value, start, pos-start+1);
// put the next starting position on the encapsulator so we will
// write it out again with the next string (effectively doubling it)
start = pos;
}
pos++;
}
// write the last segment
out.write(value, start, pos-start);
out.write(encapsulator);
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void print(String value, boolean checkForEscape) throws IOException {
if (!checkForEscape) {
// write directly from string
printSep();
out.write(value);
return;
}
if (buf.length < value.length()) {
buf = new char[value.length()];
}
value.getChars(0, value.length(), buf, 0);
print(buf, 0, value.length(), checkForEscape);
}
// in core/src/java/org/apache/solr/internal/csv/CSVPrinter.java
public void print(String value) throws IOException {
print(value, true);
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public int read() throws IOException {
// initalize the lookahead
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
lastChar = lookaheadChar;
if (super.ready()) {
lookaheadChar = super.read();
} else {
lookaheadChar = UNDEFINED;
}
if (lastChar == '\n') {
lineCounter++;
}
return lastChar;
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public int read(char[] buf, int off, int len) throws IOException {
// do not claim if len == 0
if (len == 0) {
return 0;
}
// init lookahead, but do not block !!
if (lookaheadChar == UNDEFINED) {
if (ready()) {
lookaheadChar = super.read();
} else {
return -1;
}
}
// 'first read of underlying stream'
if (lookaheadChar == -1) {
return -1;
}
// continue until the lookaheadChar would block
int cOff = off;
while (len > 0 && ready()) {
if (lookaheadChar == -1) {
// eof stream reached, do not continue
return cOff - off;
} else {
buf[cOff++] = (char) lookaheadChar;
if (lookaheadChar == '\n') {
lineCounter++;
}
lastChar = lookaheadChar;
lookaheadChar = super.read();
len--;
}
}
return cOff - off;
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public String readUntil(char c) throws IOException {
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
line.clear(); // reuse
while (lookaheadChar != c && lookaheadChar != END_OF_STREAM) {
line.append((char) lookaheadChar);
if (lookaheadChar == '\n') {
lineCounter++;
}
lastChar = lookaheadChar;
lookaheadChar = super.read();
}
return line.toString();
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public String readLine() throws IOException {
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
line.clear(); //reuse
// return null if end of stream has been reached
if (lookaheadChar == END_OF_STREAM) {
return null;
}
// do we have a line termination already
char laChar = (char) lookaheadChar;
if (laChar == '\n' || laChar == '\r') {
lastChar = lookaheadChar;
lookaheadChar = super.read();
// ignore '\r\n' as well
if ((char) lookaheadChar == '\n') {
lastChar = lookaheadChar;
lookaheadChar = super.read();
}
lineCounter++;
return line.toString();
}
// create the rest-of-line return and update the lookahead
line.append(laChar);
String restOfLine = super.readLine(); // TODO involves copying
lastChar = lookaheadChar;
lookaheadChar = super.read();
if (restOfLine != null) {
line.append(restOfLine);
}
lineCounter++;
return line.toString();
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public long skip(long n) throws IllegalArgumentException, IOException {
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
// illegal argument
if (n < 0) {
throw new IllegalArgumentException("negative argument not supported");
}
// no skipping
if (n == 0 || lookaheadChar == END_OF_STREAM) {
return 0;
}
// skip and reread the lookahead-char
long skiped = 0;
if (n > 1) {
skiped = super.skip(n - 1);
}
lookaheadChar = super.read();
// fixme uh: we should check the skiped sequence for line-terminations...
lineCounter = Integer.MIN_VALUE;
return skiped + 1;
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public long skipUntil(char c) throws IllegalArgumentException, IOException {
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
long counter = 0;
while (lookaheadChar != c && lookaheadChar != END_OF_STREAM) {
if (lookaheadChar == '\n') {
lineCounter++;
}
lookaheadChar = super.read();
counter++;
}
return counter;
}
// in core/src/java/org/apache/solr/internal/csv/ExtendedBufferedReader.java
public int lookAhead() throws IOException {
if (lookaheadChar == UNDEFINED) {
lookaheadChar = super.read();
}
return lookaheadChar;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public final int docFreq(Term term) throws IOException {
return reader.docFreq(term);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public void close() throws IOException {
if (debug) {
if (cachingEnabled) {
StringBuilder sb = new StringBuilder();
sb.append("Closing ").append(name);
for (SolrCache cache : cacheList) {
sb.append("\n\t");
sb.append(cache);
}
log.debug(sb.toString());
} else {
if (debug) log.debug("Closing " + name);
}
}
core.getInfoRegistry().remove(name);
// super.close();
// can't use super.close() since it just calls reader.close() and that may only be called once
// per reader (even if incRef() was previously called).
if (closeReader) reader.decRef();
for (SolrCache cache : cacheList) {
cache.close();
}
directoryFactory.release(getIndexReader().directory());
// do this at the end so it only gets done if there are no exceptions
numCloses.incrementAndGet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public static void initRegenerators(SolrConfig solrConfig) {
if (solrConfig.fieldValueCacheConfig != null && solrConfig.fieldValueCacheConfig.getRegenerator() == null) {
solrConfig.fieldValueCacheConfig.setRegenerator(
new CacheRegenerator() {
public boolean regenerateItem(SolrIndexSearcher newSearcher, SolrCache newCache, SolrCache oldCache, Object oldKey, Object oldVal) throws IOException {
if (oldVal instanceof UnInvertedField) {
UnInvertedField.getUnInvertedField((String)oldKey, newSearcher);
}
return true;
}
}
);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public boolean regenerateItem(SolrIndexSearcher newSearcher, SolrCache newCache, SolrCache oldCache, Object oldKey, Object oldVal) throws IOException {
if (oldVal instanceof UnInvertedField) {
UnInvertedField.getUnInvertedField((String)oldKey, newSearcher);
}
return true;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public boolean regenerateItem(SolrIndexSearcher newSearcher, SolrCache newCache, SolrCache oldCache, Object oldKey, Object oldVal) throws IOException {
newSearcher.cacheDocSet((Query)oldKey, null, false);
return true;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public boolean regenerateItem(SolrIndexSearcher newSearcher, SolrCache newCache, SolrCache oldCache, Object oldKey, Object oldVal) throws IOException {
QueryResultKey key = (QueryResultKey)oldKey;
int nDocs=1;
// request 1 doc and let caching round up to the next window size...
// unless the window size is <=1, in which case we will pick
// the minimum of the number of documents requested last time and
// a reasonable number such as 40.
// TODO: make more configurable later...
if (queryResultWindowSize<=1) {
DocList oldList = (DocList)oldVal;
int oldnDocs = oldList.offset() + oldList.size();
// 40 has factors of 2,4,5,10,20
nDocs = Math.min(oldnDocs,40);
}
int flags=NO_CHECK_QCACHE | key.nc_flags;
QueryCommand qc = new QueryCommand();
qc.setQuery(key.query)
.setFilterList(key.filters)
.setSort(key.sort)
.setLen(nDocs)
.setSupersetMaxDoc(nDocs)
.setFlags(flags);
QueryResult qr = new QueryResult();
newSearcher.getDocListC(qr,qc);
return true;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public QueryResult search(QueryResult qr, QueryCommand cmd) throws IOException {
getDocListC(qr,cmd);
return qr;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void binaryField(FieldInfo fieldInfo, byte[] value, int offset, int length) throws IOException {
doc.add(new StoredField(fieldInfo.name, value));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void stringField(FieldInfo fieldInfo, String value) throws IOException {
final FieldType ft = new FieldType(TextField.TYPE_STORED);
ft.setStoreTermVectors(fieldInfo.hasVectors());
ft.setIndexed(fieldInfo.isIndexed());
ft.setOmitNorms(fieldInfo.omitsNorms());
ft.setIndexOptions(fieldInfo.getIndexOptions());
doc.add(new Field(fieldInfo.name, value, ft));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public Document doc(int i) throws IOException {
return doc(i, (Set<String>)null);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void doc(int n, StoredFieldVisitor visitor) throws IOException {
getIndexReader().document(n, visitor);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public Document doc(int i, Set<String> fields) throws IOException {
Document d;
if (documentCache != null) {
d = documentCache.get(i);
if (d!=null) return d;
}
if(!enableLazyFieldLoading || fields == null) {
d = getIndexReader().document(i);
} else {
final SetNonLazyFieldSelector visitor = new SetNonLazyFieldSelector(fields, getIndexReader(), i);
getIndexReader().document(i, visitor);
d = visitor.doc;
}
if (documentCache != null) {
documentCache.put(i, d);
}
return d;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public void readDocs(Document[] docs, DocList ids) throws IOException {
readDocs(docs, ids, null);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public void readDocs(Document[] docs, DocList ids, Set<String> fields) throws IOException {
DocIterator iter = ids.iterator();
for (int i=0; i<docs.length; i++) {
docs[i] = doc(iter.nextDoc(), fields);
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public Sort weightSort(Sort sort) throws IOException {
return (sort != null) ? sort.rewrite(this) : null;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public int getFirstMatch(Term t) throws IOException {
Fields fields = atomicReader.fields();
if (fields == null) return -1;
Terms terms = fields.terms(t.field());
if (terms == null) return -1;
BytesRef termBytes = t.bytes();
final TermsEnum termsEnum = terms.iterator(null);
if (!termsEnum.seekExact(termBytes, false)) {
return -1;
}
DocsEnum docs = termsEnum.docs(atomicReader.getLiveDocs(), null, false);
if (docs == null) return -1;
int id = docs.nextDoc();
return id == DocIdSetIterator.NO_MORE_DOCS ? -1 : id;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public long lookupId(BytesRef idBytes) throws IOException {
String field = schema.getUniqueKeyField().getName();
final AtomicReaderContext[] leaves = leafContexts;
for (int i=0; i<leaves.length; i++) {
final AtomicReaderContext leaf = leaves[i];
final AtomicReader reader = leaf.reader();
final Fields fields = reader.fields();
if (fields == null) continue;
final Bits liveDocs = reader.getLiveDocs();
final DocsEnum docs = reader.termDocsEnum(liveDocs, field, idBytes, false);
if (docs == null) continue;
int id = docs.nextDoc();
if (id == DocIdSetIterator.NO_MORE_DOCS) continue;
assert docs.nextDoc() == DocIdSetIterator.NO_MORE_DOCS;
return (((long)i) << 32) | id;
}
return -1;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public void cacheDocSet(Query query, DocSet optionalAnswer, boolean mustCache) throws IOException {
// Even if the cache is null, still compute the DocSet as it may serve to warm the Lucene
// or OS disk cache.
if (optionalAnswer != null) {
if (filterCache!=null) {
filterCache.put(query,optionalAnswer);
}
return;
}
// Throw away the result, relying on the fact that getDocSet
// will currently always cache what it found. If getDocSet() starts
// using heuristics about what to cache, and mustCache==true, (or if we
// want this method to start using heuristics too) then
// this needs to change.
getDocSet(query);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocSet getDocSet(Query query) throws IOException {
if (query instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery)query;
if (!eq.getCache()) {
if (query instanceof WrappedQuery) {
query = ((WrappedQuery)query).getWrappedQuery();
}
query = QueryUtils.makeQueryable(query);
return getDocSetNC(query, null);
}
}
// Get the absolute value (positive version) of this query. If we
// get back the same reference, we know it's positive.
Query absQ = QueryUtils.getAbs(query);
boolean positive = query==absQ;
if (filterCache != null) {
DocSet absAnswer = filterCache.get(absQ);
if (absAnswer!=null) {
if (positive) return absAnswer;
else return getPositiveDocSet(matchAllDocsQuery).andNot(absAnswer);
}
}
DocSet absAnswer = getDocSetNC(absQ, null);
DocSet answer = positive ? absAnswer : getPositiveDocSet(matchAllDocsQuery).andNot(absAnswer);
if (filterCache != null) {
// cache negative queries as positive
filterCache.put(absQ, absAnswer);
}
return answer;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
DocSet getPositiveDocSet(Query q) throws IOException {
DocSet answer;
if (filterCache != null) {
answer = filterCache.get(q);
if (answer!=null) return answer;
}
answer = getDocSetNC(q,null);
if (filterCache != null) filterCache.put(
q,answer);
return answer;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocSet getDocSet(List<Query> queries) throws IOException {
ProcessedFilter pf = getProcessedFilter(null, queries);
if (pf.answer != null) return pf.answer;
DocSetCollector setCollector = new DocSetCollector(maxDoc()>>6, maxDoc());
Collector collector = setCollector;
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
final AtomicReaderContext[] leaves = leafContexts;
for (int i=0; i<leaves.length; i++) {
final AtomicReaderContext leaf = leaves[i];
final AtomicReader reader = leaf.reader();
final Bits liveDocs = reader.getLiveDocs(); // TODO: the filter may already only have liveDocs...
DocIdSet idSet = null;
if (pf.filter != null) {
idSet = pf.filter.getDocIdSet(leaf, liveDocs);
if (idSet == null) continue;
}
DocIdSetIterator idIter = null;
if (idSet != null) {
idIter = idSet.iterator();
if (idIter == null) continue;
}
collector.setNextReader(leaf);
int max = reader.maxDoc();
if (idIter == null) {
for (int docid = 0; docid<max; docid++) {
if (liveDocs != null && !liveDocs.get(docid)) continue;
collector.collect(docid);
}
} else {
for (int docid = -1; (docid = idIter.advance(docid+1)) < max; ) {
collector.collect(docid);
}
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public ProcessedFilter getProcessedFilter(DocSet setFilter, List<Query> queries) throws IOException {
ProcessedFilter pf = new ProcessedFilter();
if (queries==null || queries.size()==0) {
if (setFilter != null)
pf.filter = setFilter.getTopFilter();
return pf;
}
DocSet answer=null;
boolean[] neg = new boolean[queries.size()+1];
DocSet[] sets = new DocSet[queries.size()+1];
List<Query> notCached = null;
List<Query> postFilters = null;
int end = 0;
int smallestIndex = -1;
if (setFilter != null) {
answer = sets[end++] = setFilter;
smallestIndex = end;
}
int smallestCount = Integer.MAX_VALUE;
for (Query q : queries) {
if (q instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery)q;
if (!eq.getCache()) {
if (eq.getCost() >= 100 && eq instanceof PostFilter) {
if (postFilters == null) postFilters = new ArrayList<Query>(sets.length-end);
postFilters.add(q);
} else {
if (notCached == null) notCached = new ArrayList<Query>(sets.length-end);
notCached.add(q);
}
continue;
}
}
Query posQuery = QueryUtils.getAbs(q);
sets[end] = getPositiveDocSet(posQuery);
// Negative query if absolute value different from original
if (q==posQuery) {
neg[end] = false;
// keep track of the smallest positive set.
// This optimization is only worth it if size() is cached, which it would
// be if we don't do any set operations.
int sz = sets[end].size();
if (sz<smallestCount) {
smallestCount=sz;
smallestIndex=end;
answer = sets[end];
}
} else {
neg[end] = true;
}
end++;
}
// Are all of our normal cached filters negative?
if (end > 0 && answer==null) {
answer = getPositiveDocSet(matchAllDocsQuery);
}
// do negative queries first to shrink set size
for (int i=0; i<end; i++) {
if (neg[i]) answer = answer.andNot(sets[i]);
}
for (int i=0; i<end; i++) {
if (!neg[i] && i!=smallestIndex) answer = answer.intersection(sets[i]);
}
if (notCached != null) {
Collections.sort(notCached, sortByCost);
List<Weight> weights = new ArrayList<Weight>(notCached.size());
for (Query q : notCached) {
Query qq = QueryUtils.makeQueryable(q);
weights.add(createNormalizedWeight(qq));
}
pf.filter = new FilterImpl(answer, weights);
} else {
if (postFilters == null) {
if (answer == null) {
answer = getPositiveDocSet(matchAllDocsQuery);
}
// "answer" is the only part of the filter, so set it.
pf.answer = answer;
}
if (answer != null) {
pf.filter = answer.getTopFilter();
}
}
if (postFilters != null) {
Collections.sort(postFilters, sortByCost);
for (int i=postFilters.size()-1; i>=0; i--) {
DelegatingCollector prev = pf.postFilter;
pf.postFilter = ((PostFilter)postFilters.get(i)).getFilterCollector(this);
if (prev != null) pf.postFilter.setDelegate(prev);
}
}
return pf;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocSet getDocSet(DocsEnumState deState) throws IOException {
int largestPossible = deState.termsEnum.docFreq();
boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached;
TermQuery key = null;
if (useCache) {
key = new TermQuery(new Term(deState.fieldName, BytesRef.deepCopyOf(deState.termsEnum.term())));
DocSet result = filterCache.get(key);
if (result != null) return result;
}
int smallSetSize = maxDoc()>>6;
int scratchSize = Math.min(smallSetSize, largestPossible);
if (deState.scratch == null || deState.scratch.length < scratchSize)
deState.scratch = new int[scratchSize];
final int[] docs = deState.scratch;
int upto = 0;
int bitsSet = 0;
OpenBitSet obs = null;
DocsEnum docsEnum = deState.termsEnum.docs(deState.liveDocs, deState.docsEnum, false);
if (deState.docsEnum == null) {
deState.docsEnum = docsEnum;
}
if (docsEnum instanceof MultiDocsEnum) {
MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs();
int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs();
for (int subindex = 0; subindex<numSubs; subindex++) {
MultiDocsEnum.EnumWithSlice sub = subs[subindex];
if (sub.docsEnum == null) continue;
int base = sub.slice.start;
int docid;
if (largestPossible > docs.length) {
if (obs == null) obs = new OpenBitSet(maxDoc());
while ((docid = sub.docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
obs.fastSet(docid + base);
bitsSet++;
}
} else {
while ((docid = sub.docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
docs[upto++] = docid + base;
}
}
}
} else {
int docid;
if (largestPossible > docs.length) {
if (obs == null) obs = new OpenBitSet(maxDoc());
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
obs.fastSet(docid);
bitsSet++;
}
} else {
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
docs[upto++] = docid;
}
}
}
DocSet result;
if (obs != null) {
for (int i=0; i<upto; i++) {
obs.fastSet(docs[i]);
}
bitsSet += upto;
result = new BitDocSet(obs, bitsSet);
} else {
result = upto==0 ? DocSet.EMPTY : new SortedIntDocSet(Arrays.copyOf(docs, upto));
}
if (useCache) {
filterCache.put(key, result);
}
return result;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
protected DocSet getDocSetNC(Query query, DocSet filter) throws IOException {
DocSetCollector collector = new DocSetCollector(maxDoc()>>6, maxDoc());
if (filter==null) {
if (query instanceof TermQuery) {
Term t = ((TermQuery)query).getTerm();
final AtomicReaderContext[] leaves = leafContexts;
for (int i=0; i<leaves.length; i++) {
final AtomicReaderContext leaf = leaves[i];
final AtomicReader reader = leaf.reader();
collector.setNextReader(leaf);
Fields fields = reader.fields();
Terms terms = fields.terms(t.field());
BytesRef termBytes = t.bytes();
Bits liveDocs = reader.getLiveDocs();
DocsEnum docsEnum = null;
if (terms != null) {
final TermsEnum termsEnum = terms.iterator(null);
if (termsEnum.seekExact(termBytes, false)) {
docsEnum = termsEnum.docs(liveDocs, null, false);
}
}
if (docsEnum != null) {
int docid;
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
collector.collect(docid);
}
}
}
} else {
super.search(query,null,collector);
}
return collector.getDocSet();
} else {
Filter luceneFilter = filter.getTopFilter();
super.search(query, luceneFilter, collector);
return collector.getDocSet();
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocSet getDocSet(Query query, DocSet filter) throws IOException {
if (filter==null) return getDocSet(query);
if (query instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery)query;
if (!eq.getCache()) {
if (query instanceof WrappedQuery) {
query = ((WrappedQuery)query).getWrappedQuery();
}
query = QueryUtils.makeQueryable(query);
return getDocSetNC(query, filter);
}
}
// Negative query if absolute value different from original
Query absQ = QueryUtils.getAbs(query);
boolean positive = absQ==query;
DocSet first;
if (filterCache != null) {
first = filterCache.get(absQ);
if (first==null) {
first = getDocSetNC(absQ,null);
filterCache.put(absQ,first);
}
return positive ? first.intersection(filter) : filter.andNot(first);
}
// If there isn't a cache, then do a single filtered query if positive.
return positive ? getDocSetNC(absQ,filter) : filter.andNot(getPositiveDocSet(absQ));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocList getDocList(Query query, Query filter, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocList();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocList getDocList(Query query, List<Query> filterList, Sort lsort, int offset, int len, int flags) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filterList)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setFlags(flags);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocList();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
private void getDocListC(QueryResult qr, QueryCommand cmd) throws IOException {
DocListAndSet out = new DocListAndSet();
qr.setDocListAndSet(out);
QueryResultKey key=null;
int maxDocRequested = cmd.getOffset() + cmd.getLen();
// check for overflow, and check for # docs in index
if (maxDocRequested < 0 || maxDocRequested > maxDoc()) maxDocRequested = maxDoc();
int supersetMaxDoc= maxDocRequested;
DocList superset = null;
int flags = cmd.getFlags();
Query q = cmd.getQuery();
if (q instanceof ExtendedQuery) {
ExtendedQuery eq = (ExtendedQuery)q;
if (!eq.getCache()) {
flags |= (NO_CHECK_QCACHE | NO_SET_QCACHE | NO_CHECK_FILTERCACHE);
}
}
// we can try and look up the complete query in the cache.
// we can't do that if filter!=null though (we don't want to
// do hashCode() and equals() for a big DocSet).
if (queryResultCache != null && cmd.getFilter()==null
&& (flags & (NO_CHECK_QCACHE|NO_SET_QCACHE)) != ((NO_CHECK_QCACHE|NO_SET_QCACHE)))
{
// all of the current flags can be reused during warming,
// so set all of them on the cache key.
key = new QueryResultKey(q, cmd.getFilterList(), cmd.getSort(), flags);
if ((flags & NO_CHECK_QCACHE)==0) {
superset = queryResultCache.get(key);
if (superset != null) {
// check that the cache entry has scores recorded if we need them
if ((flags & GET_SCORES)==0 || superset.hasScores()) {
// NOTE: subset() returns null if the DocList has fewer docs than
// requested
out.docList = superset.subset(cmd.getOffset(),cmd.getLen());
}
}
if (out.docList != null) {
// found the docList in the cache... now check if we need the docset too.
// OPT: possible future optimization - if the doclist contains all the matches,
// use it to make the docset instead of rerunning the query.
if (out.docSet==null && ((flags & GET_DOCSET)!=0) ) {
if (cmd.getFilterList()==null) {
out.docSet = getDocSet(cmd.getQuery());
} else {
List<Query> newList = new ArrayList<Query>(cmd.getFilterList().size()+1);
newList.add(cmd.getQuery());
newList.addAll(cmd.getFilterList());
out.docSet = getDocSet(newList);
}
}
return;
}
}
// If we are going to generate the result, bump up to the
// next resultWindowSize for better caching.
if ((flags & NO_SET_QCACHE) == 0) {
// handle 0 special case as well as avoid idiv in the common case.
if (maxDocRequested < queryResultWindowSize) {
supersetMaxDoc=queryResultWindowSize;
} else {
supersetMaxDoc = ((maxDocRequested -1)/queryResultWindowSize + 1)*queryResultWindowSize;
if (supersetMaxDoc < 0) supersetMaxDoc=maxDocRequested;
}
} else {
key = null; // we won't be caching the result
}
}
// OK, so now we need to generate an answer.
// One way to do that would be to check if we have an unordered list
// of results for the base query. If so, we can apply the filters and then
// sort by the resulting set. This can only be used if:
// - the sort doesn't contain score
// - we don't want score returned.
// check if we should try and use the filter cache
boolean useFilterCache=false;
if ((flags & (GET_SCORES|NO_CHECK_FILTERCACHE))==0 && useFilterForSortedQuery && cmd.getSort() != null && filterCache != null) {
useFilterCache=true;
SortField[] sfields = cmd.getSort().getSort();
for (SortField sf : sfields) {
if (sf.getType() == SortField.Type.SCORE) {
useFilterCache=false;
break;
}
}
}
// disable useFilterCache optimization temporarily
if (useFilterCache) {
// now actually use the filter cache.
// for large filters that match few documents, this may be
// slower than simply re-executing the query.
if (out.docSet == null) {
out.docSet = getDocSet(cmd.getQuery(),cmd.getFilter());
DocSet bigFilt = getDocSet(cmd.getFilterList());
if (bigFilt != null) out.docSet = out.docSet.intersection(bigFilt);
}
// todo: there could be a sortDocSet that could take a list of
// the filters instead of anding them first...
// perhaps there should be a multi-docset-iterator
superset = sortDocSet(out.docSet,cmd.getSort(),supersetMaxDoc);
out.docList = superset.subset(cmd.getOffset(),cmd.getLen());
} else {
// do it the normal way...
cmd.setSupersetMaxDoc(supersetMaxDoc);
if ((flags & GET_DOCSET)!=0) {
// this currently conflates returning the docset for the base query vs
// the base query and all filters.
DocSet qDocSet = getDocListAndSetNC(qr,cmd);
// cache the docSet matching the query w/o filtering
if (qDocSet!=null && filterCache!=null && !qr.isPartialResults()) filterCache.put(cmd.getQuery(),qDocSet);
} else {
getDocListNC(qr,cmd);
//Parameters: cmd.getQuery(),theFilt,cmd.getSort(),0,supersetMaxDoc,cmd.getFlags(),cmd.getTimeAllowed(),responseHeader);
}
superset = out.docList;
out.docList = superset.subset(cmd.getOffset(),cmd.getLen());
}
// lastly, put the superset in the cache if the size is less than or equal
// to queryResultMaxDocsCached
if (key != null && superset.size() <= queryResultMaxDocsCached && !qr.isPartialResults()) {
queryResultCache.put(key, superset);
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
private void getDocListNC(QueryResult qr,QueryCommand cmd) throws IOException {
final long timeAllowed = cmd.getTimeAllowed();
int len = cmd.getSupersetMaxDoc();
int last = len;
if (last < 0 || last > maxDoc()) last=maxDoc();
final int lastDocRequested = last;
int nDocsReturned;
int totalHits;
float maxScore;
int[] ids;
float[] scores;
boolean needScores = (cmd.getFlags() & GET_SCORES) != 0;
Query query = QueryUtils.makeQueryable(cmd.getQuery());
ProcessedFilter pf = getProcessedFilter(cmd.getFilter(), cmd.getFilterList());
final Filter luceneFilter = pf.filter;
// handle zero case...
if (lastDocRequested<=0) {
final float[] topscore = new float[] { Float.NEGATIVE_INFINITY };
final int[] numHits = new int[1];
Collector collector;
if (!needScores) {
collector = new Collector () {
@Override
public void setScorer(Scorer scorer) throws IOException {
}
@Override
public void collect(int doc) throws IOException {
numHits[0]++;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
};
} else {
collector = new Collector() {
Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
numHits[0]++;
float score = scorer.score();
if (score > topscore[0]) topscore[0]=score;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
@Override
public boolean acceptsDocsOutOfOrder() {
return true;
}
};
}
if( timeAllowed > 0 ) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), timeAllowed);
}
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
try {
super.search(query, luceneFilter, collector);
}
catch( TimeLimitingCollector.TimeExceededException x ) {
log.warn( "Query: " + query + "; " + x.getMessage() );
qr.setPartialResults(true);
}
nDocsReturned=0;
ids = new int[nDocsReturned];
scores = new float[nDocsReturned];
totalHits = numHits[0];
maxScore = totalHits>0 ? topscore[0] : 0.0f;
} else {
TopDocsCollector topCollector;
if (cmd.getSort() == null) {
if(cmd.getScoreDoc() != null) {
topCollector = TopScoreDocCollector.create(len, cmd.getScoreDoc(), true); //create the Collector with InOrderPagingCollector
} else {
topCollector = TopScoreDocCollector.create(len, true);
}
} else {
topCollector = TopFieldCollector.create(weightSort(cmd.getSort()), len, false, needScores, needScores, true);
}
Collector collector = topCollector;
if( timeAllowed > 0 ) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), timeAllowed);
}
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
try {
super.search(query, luceneFilter, collector);
}
catch( TimeLimitingCollector.TimeExceededException x ) {
log.warn( "Query: " + query + "; " + x.getMessage() );
qr.setPartialResults(true);
}
totalHits = topCollector.getTotalHits();
TopDocs topDocs = topCollector.topDocs(0, len);
maxScore = totalHits>0 ? topDocs.getMaxScore() : 0.0f;
nDocsReturned = topDocs.scoreDocs.length;
ids = new int[nDocsReturned];
scores = (cmd.getFlags()&GET_SCORES)!=0 ? new float[nDocsReturned] : null;
for (int i=0; i<nDocsReturned; i++) {
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
ids[i] = scoreDoc.doc;
if (scores != null) scores[i] = scoreDoc.score;
}
}
int sliceLen = Math.min(lastDocRequested,nDocsReturned);
if (sliceLen < 0) sliceLen=0;
qr.setDocList(new DocSlice(0,sliceLen,ids,scores,totalHits,maxScore));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setScorer(Scorer scorer) throws IOException {
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void collect(int doc) throws IOException {
numHits[0]++;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void collect(int doc) throws IOException {
numHits[0]++;
float score = scorer.score();
if (score > topscore[0]) topscore[0]=score;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
private DocSet getDocListAndSetNC(QueryResult qr,QueryCommand cmd) throws IOException {
int len = cmd.getSupersetMaxDoc();
int last = len;
if (last < 0 || last > maxDoc()) last=maxDoc();
final int lastDocRequested = last;
int nDocsReturned;
int totalHits;
float maxScore;
int[] ids;
float[] scores;
DocSet set;
boolean needScores = (cmd.getFlags() & GET_SCORES) != 0;
int maxDoc = maxDoc();
int smallSetSize = maxDoc>>6;
ProcessedFilter pf = getProcessedFilter(cmd.getFilter(), cmd.getFilterList());
final Filter luceneFilter = pf.filter;
Query query = QueryUtils.makeQueryable(cmd.getQuery());
final long timeAllowed = cmd.getTimeAllowed();
// handle zero case...
if (lastDocRequested<=0) {
final float[] topscore = new float[] { Float.NEGATIVE_INFINITY };
Collector collector;
DocSetCollector setCollector;
if (!needScores) {
collector = setCollector = new DocSetCollector(smallSetSize, maxDoc);
} else {
collector = setCollector = new DocSetDelegateCollector(smallSetSize, maxDoc, new Collector() {
Scorer scorer;
@Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
@Override
public void collect(int doc) throws IOException {
float score = scorer.score();
if (score > topscore[0]) topscore[0]=score;
}
@Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
@Override
public boolean acceptsDocsOutOfOrder() {
return false;
}
});
}
if( timeAllowed > 0 ) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), timeAllowed);
}
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(collector);
collector = pf.postFilter;
}
try {
super.search(query, luceneFilter, collector);
}
catch( TimeLimitingCollector.TimeExceededException x ) {
log.warn( "Query: " + query + "; " + x.getMessage() );
qr.setPartialResults(true);
}
set = setCollector.getDocSet();
nDocsReturned = 0;
ids = new int[nDocsReturned];
scores = new float[nDocsReturned];
totalHits = set.size();
maxScore = totalHits>0 ? topscore[0] : 0.0f;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void collect(int doc) throws IOException {
float score = scorer.score();
if (score > topscore[0]) topscore[0]=score;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocList getDocList(Query query, DocSet filter, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilter(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocList();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, Query filter, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, Query filter, Sort lsort, int offset, int len, int flags) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setFlags(flags)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, List<Query> filterList, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filterList)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, List<Query> filterList, Sort lsort, int offset, int len, int flags) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilterList(filterList)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setFlags(flags)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, DocSet filter, Sort lsort, int offset, int len) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilter(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public DocListAndSet getDocListAndSet(Query query, DocSet filter, Sort lsort, int offset, int len, int flags) throws IOException {
QueryCommand qc = new QueryCommand();
qc.setQuery(query)
.setFilter(filter)
.setSort(lsort)
.setOffset(offset)
.setLen(len)
.setFlags(flags)
.setNeedDocSet(true);
QueryResult qr = new QueryResult();
search(qr,qc);
return qr.getDocListAndSet();
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
protected DocList sortDocSet(DocSet set, Sort sort, int nDocs) throws IOException {
if (nDocs == 0) {
// SOLR-2923
return new DocSlice(0, 0, new int[0], null, 0, 0f);
}
// bit of a hack to tell if a set is sorted - do it better in the future.
boolean inOrder = set instanceof BitDocSet || set instanceof SortedIntDocSet;
TopDocsCollector topCollector = TopFieldCollector.create(weightSort(sort), nDocs, false, false, false, inOrder);
DocIterator iter = set.iterator();
int base=0;
int end=0;
int readerIndex = 0;
while (iter.hasNext()) {
int doc = iter.nextDoc();
while (doc>=end) {
AtomicReaderContext leaf = leafContexts[readerIndex++];
base = leaf.docBase;
end = base + leaf.reader().maxDoc();
topCollector.setNextReader(leaf);
// we should never need to set the scorer given the settings for the collector
}
topCollector.collect(doc-base);
}
TopDocs topDocs = topCollector.topDocs(0, nDocs);
int nDocsReturned = topDocs.scoreDocs.length;
int[] ids = new int[nDocsReturned];
for (int i=0; i<nDocsReturned; i++) {
ScoreDoc scoreDoc = topDocs.scoreDocs[i];
ids[i] = scoreDoc.doc;
}
return new DocSlice(0,nDocsReturned,ids,null,topDocs.totalHits,0.0f);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public int numDocs(Query a, DocSet b) throws IOException {
// Negative query if absolute value different from original
Query absQ = QueryUtils.getAbs(a);
DocSet positiveA = getPositiveDocSet(absQ);
return a==absQ ? b.intersectionSize(positiveA) : b.andNotSize(positiveA);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public int numDocs(DocSet a, DocsEnumState deState) throws IOException {
// Negative query if absolute value different from original
return a.intersectionSize(getDocSet(deState));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public int numDocs(Query a, Query b) throws IOException {
Query absA = QueryUtils.getAbs(a);
Query absB = QueryUtils.getAbs(b);
DocSet positiveA = getPositiveDocSet(absA);
DocSet positiveB = getPositiveDocSet(absB);
// Negative query if absolute value different from original
if (a==absA) {
if (b==absB) return positiveA.intersectionSize(positiveB);
return positiveA.andNotSize(positiveB);
}
if (b==absB) return positiveB.andNotSize(positiveA);
// if both negative, we need to create a temp DocSet since we
// don't have a counting method that takes three.
DocSet all = getPositiveDocSet(matchAllDocsQuery);
// -a -b == *:*.andNot(a).andNotSize(b) == *.*.andNotSize(a.union(b))
// we use the last form since the intermediate DocSet should normally be smaller.
return all.andNotSize(positiveA.union(positiveB));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public Document[] readDocs(DocList ids) throws IOException {
Document[] docs = new Document[ids.size()];
readDocs(docs,ids);
return docs;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
public void warm(SolrIndexSearcher old) throws IOException {
// Make sure this is first! filters can help queryResults execute!
long warmingStartTime = System.currentTimeMillis();
// warm the caches in order...
ModifiableSolrParams params = new ModifiableSolrParams();
params.add("warming","true");
for (int i=0; i<cacheList.length; i++) {
if (debug) log.debug("autowarming " + this + " from " + old + "\n\t" + old.cacheList[i]);
SolrQueryRequest req = new LocalSolrQueryRequest(core,params) {
@Override public SolrIndexSearcher getSearcher() { return SolrIndexSearcher.this; }
@Override public void close() { }
};
SolrQueryResponse rsp = new SolrQueryResponse();
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp));
try {
this.cacheList[i].warm(this, old.cacheList[i]);
} finally {
try {
req.close();
} finally {
SolrRequestInfo.clearRequestInfo();
}
}
if (debug) log.debug("autowarming result for " + this + "\n\t" + this.cacheList[i]);
}
warmupTime = System.currentTimeMillis() - warmingStartTime;
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public Explanation explain(Query query, int doc) throws IOException {
return super.explain(QueryUtils.makeQueryable(query), doc);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
DocIdSet sub = topFilter == null ? null : topFilter.getDocIdSet(context, acceptDocs);
if (weights.size() == 0) return sub;
return new FilterSet(sub, context);
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public DocIdSetIterator iterator() throws IOException {
List<DocIdSetIterator> iterators = new ArrayList<DocIdSetIterator>(weights.size()+1);
if (docIdSet != null) {
DocIdSetIterator iter = docIdSet.iterator();
if (iter == null) return null;
iterators.add(iter);
}
for (Weight w : weights) {
Scorer scorer = w.scorer(context, true, false, context.reader().getLiveDocs());
if (scorer == null) return null;
iterators.add(scorer);
}
if (iterators.size()==0) return null;
if (iterators.size()==1) return iterators.get(0);
if (iterators.size()==2) return new DualFilterIterator(iterators.get(0), iterators.get(1));
return new FilterIterator(iterators.toArray(new DocIdSetIterator[iterators.size()]));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public Bits bits() throws IOException {
return null; // don't use random access
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
private int doNext(int doc) throws IOException {
int which=0; // index of the iterator with the highest id
int i=1;
outer: for(;;) {
for (; i<iterators.length; i++) {
if (i == which) continue;
DocIdSetIterator iter = iterators[i];
int next = iter.advance(doc);
if (next != doc) {
doc = next;
which = i;
i = 0;
continue outer;
}
}
return doc;
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public int nextDoc() throws IOException {
return doNext(first.nextDoc());
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public int advance(int target) throws IOException {
return doNext(first.advance(target));
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public int nextDoc() throws IOException {
int doc = a.nextDoc();
for(;;) {
int other = b.advance(doc);
if (other == doc) return doc;
doc = a.advance(other);
if (other == doc) return doc;
}
}
// in core/src/java/org/apache/solr/search/SolrIndexSearcher.java
Override
public int advance(int target) throws IOException {
int doc = a.advance(target);
for(;;) {
int other = b.advance(doc);
if (other == doc) return doc;
doc = a.advance(other);
if (other == doc) return doc;
}
}
// in core/src/java/org/apache/solr/search/LFUCache.java
public void warm(SolrIndexSearcher searcher, SolrCache old) throws IOException {
if (regenerator == null) return;
long warmingStartTime = System.currentTimeMillis();
LFUCache other = (LFUCache) old;
// warm entries
if (autowarmCount != 0) {
int sz = other.size();
if (autowarmCount != -1) sz = Math.min(sz, autowarmCount);
Map items = other.cache.getMostUsedItems(sz);
Map.Entry[] itemsArr = new Map.Entry[items.size()];
int counter = 0;
for (Object mapEntry : items.entrySet()) {
itemsArr[counter++] = (Map.Entry) mapEntry;
}
for (int i = itemsArr.length - 1; i >= 0; i--) {
try {
boolean continueRegen = regenerator.regenerateItem(searcher,
this, old, itemsArr[i].getKey(), itemsArr[i].getValue());
if (!continueRegen) break;
} catch (Throwable e) {
SolrException.log(log, "Error during auto-warming of key:" + itemsArr[i].getKey(), e);
}
}
}
warmupTime = System.currentTimeMillis() - warmingStartTime;
}
// in core/src/java/org/apache/solr/search/DelegatingCollector.java
Override
public void setScorer(Scorer scorer) throws IOException {
this.scorer = scorer;
delegate.setScorer(scorer);
}
// in core/src/java/org/apache/solr/search/DelegatingCollector.java
Override
public void collect(int doc) throws IOException {
delegate.collect(doc);
}
// in core/src/java/org/apache/solr/search/DelegatingCollector.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
this.context = context;
this.docBase = context.docBase;
delegate.setNextReader(context);
}
// in core/src/java/org/apache/solr/search/Grouping.java
public void execute() throws IOException {
if (commands.isEmpty()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specify at least one field, function or query to group by.");
}
DocListAndSet out = new DocListAndSet();
qr.setDocListAndSet(out);
SolrIndexSearcher.ProcessedFilter pf = searcher.getProcessedFilter(cmd.getFilter(), cmd.getFilterList());
final Filter luceneFilter = pf.filter;
maxDoc = searcher.maxDoc();
needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
boolean cacheScores = false;
// NOTE: Change this when groupSort can be specified per group
if (!needScores && !commands.isEmpty()) {
if (commands.get(0).groupSort == null) {
cacheScores = true;
} else {
for (SortField field : commands.get(0).groupSort.getSort()) {
if (field.getType() == SortField.Type.SCORE) {
cacheScores = true;
break;
}
}
}
} else if (needScores) {
cacheScores = needScores;
}
getDocSet = (cmd.getFlags() & SolrIndexSearcher.GET_DOCSET) != 0;
getDocList = (cmd.getFlags() & SolrIndexSearcher.GET_DOCLIST) != 0;
query = QueryUtils.makeQueryable(cmd.getQuery());
for (Command cmd : commands) {
cmd.prepare();
}
AbstractAllGroupHeadsCollector<?> allGroupHeadsCollector = null;
List<Collector> collectors = new ArrayList<Collector>(commands.size());
for (Command cmd : commands) {
Collector collector = cmd.createFirstPassCollector();
if (collector != null) {
collectors.add(collector);
}
if (getGroupedDocSet && allGroupHeadsCollector == null) {
collectors.add(allGroupHeadsCollector = cmd.createAllGroupCollector());
}
}
Collector allCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
DocSetCollector setCollector = null;
if (getDocSet && allGroupHeadsCollector == null) {
setCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, allCollectors);
allCollectors = setCollector;
}
CachingCollector cachedCollector = null;
if (cacheSecondPassSearch && allCollectors != null) {
int maxDocsToCache = (int) Math.round(maxDoc * (maxDocsPercentageToCache / 100.0d));
// Only makes sense to cache if we cache more than zero.
// Maybe we should have a minimum and a maximum, that defines the window we would like caching for.
if (maxDocsToCache > 0) {
allCollectors = cachedCollector = CachingCollector.create(allCollectors, cacheScores, maxDocsToCache);
}
}
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(allCollectors);
allCollectors = pf.postFilter;
}
if (allCollectors != null) {
searchWithTimeLimiter(luceneFilter, allCollectors);
}
if (getGroupedDocSet && allGroupHeadsCollector != null) {
FixedBitSet fixedBitSet = allGroupHeadsCollector.retrieveGroupHeads(maxDoc);
long[] bits = fixedBitSet.getBits();
OpenBitSet openBitSet = new OpenBitSet(bits, bits.length);
qr.setDocSet(new BitDocSet(openBitSet));
} else if (getDocSet) {
qr.setDocSet(setCollector.getDocSet());
}
collectors.clear();
for (Command cmd : commands) {
Collector collector = cmd.createSecondPassCollector();
if (collector != null)
collectors.add(collector);
}
if (!collectors.isEmpty()) {
Collector secondPhaseCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
if (collectors.size() > 0) {
if (cachedCollector != null) {
if (cachedCollector.isCached()) {
cachedCollector.replay(secondPhaseCollectors);
} else {
signalCacheWarning = true;
logger.warn(String.format("The grouping cache is active, but not used because it exceeded the max cache limit of %d percent", maxDocsPercentageToCache));
logger.warn("Please increase cache size or disable group caching.");
searchWithTimeLimiter(luceneFilter, secondPhaseCollectors);
}
} else {
if (pf.postFilter != null) {
pf.postFilter.setLastDelegate(secondPhaseCollectors);
secondPhaseCollectors = pf.postFilter;
}
searchWithTimeLimiter(luceneFilter, secondPhaseCollectors);
}
}
}
for (Command cmd : commands) {
cmd.finish();
}
qr.groupedResults = grouped;
if (getDocList) {
int sz = idSet.size();
int[] ids = new int[sz];
int idx = 0;
for (int val : idSet) {
ids[idx++] = val;
}
qr.setDocList(new DocSlice(0, sz, ids, null, maxMatches, maxScore));
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
private void searchWithTimeLimiter(final Filter luceneFilter, Collector collector) throws IOException {
if (cmd.getTimeAllowed() > 0) {
if (timeLimitingCollector == null) {
timeLimitingCollector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), cmd.getTimeAllowed());
} else {
/*
* This is so the same timer can be used for grouping's multiple phases.
* We don't want to create a new TimeLimitingCollector for each phase because that would
* reset the timer for each phase. If time runs out during the first phase, the
* second phase should timeout quickly.
*/
timeLimitingCollector.setCollector(collector);
}
collector = timeLimitingCollector;
}
try {
searcher.search(query, luceneFilter, collector);
} catch (TimeLimitingCollector.TimeExceededException x) {
logger.warn( "Query: " + query + "; " + x.getMessage() );
qr.setPartialResults(true);
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createSecondPassCollector() throws IOException {
return null;
}
// in core/src/java/org/apache/solr/search/Grouping.java
public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
return null;
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void prepare() throws IOException {
actualGroupsToFind = getMax(offset, numGroups, maxDoc);
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createFirstPassCollector() throws IOException {
// Ok we don't want groups, but do want a total count
if (actualGroupsToFind <= 0) {
fallBackCollector = new TotalHitCountCollector();
return fallBackCollector;
}
sort = sort == null ? Sort.RELEVANCE : sort;
firstPass = new TermFirstPassGroupingCollector(groupBy, sort, actualGroupsToFind);
return firstPass;
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createSecondPassCollector() throws IOException {
if (actualGroupsToFind <= 0) {
allGroupsCollector = new TermAllGroupsCollector(groupBy);
return totalCount == TotalCount.grouped ? allGroupsCollector : null;
}
topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false);
if (topGroups == null) {
if (totalCount == TotalCount.grouped) {
allGroupsCollector = new TermAllGroupsCollector(groupBy);
fallBackCollector = new TotalHitCountCollector();
return MultiCollector.wrap(allGroupsCollector, fallBackCollector);
} else {
fallBackCollector = new TotalHitCountCollector();
return fallBackCollector;
}
}
int groupedDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
groupedDocsToCollect = Math.max(groupedDocsToCollect, 1);
secondPass = new TermSecondPassGroupingCollector(
groupBy, topGroups, sort, groupSort, groupedDocsToCollect, needScores, needScores, false
);
if (totalCount == TotalCount.grouped) {
allGroupsCollector = new TermAllGroupsCollector(groupBy);
return MultiCollector.wrap(secondPass, allGroupsCollector);
} else {
return secondPass;
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
Override
public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
Sort sortWithinGroup = groupSort != null ? groupSort : new Sort();
return TermAllGroupHeadsCollector.create(groupBy, sortWithinGroup);
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void finish() throws IOException {
result = secondPass != null ? secondPass.getTopGroups(0) : null;
if (main) {
mainResult = createSimpleResponse();
return;
}
NamedList groupResult = commonResponse();
if (format == Format.simple) {
groupResult.add("doclist", createSimpleResponse());
return;
}
List groupList = new ArrayList();
groupResult.add("groups", groupList); // grouped={ key={ groups=[
if (result == null) {
return;
}
// handle case of rows=0
if (numGroups == 0) return;
for (GroupDocs<BytesRef> group : result.groups) {
NamedList nl = new SimpleOrderedMap();
groupList.add(nl); // grouped={ key={ groups=[ {
// To keep the response format compatable with trunk.
// In trunk MutableValue can convert an indexed value to its native type. E.g. string to int
// The only option I currently see is the use the FieldType for this
if (group.groupValue != null) {
SchemaField schemaField = searcher.getSchema().getField(groupBy);
FieldType fieldType = schemaField.getType();
String readableValue = fieldType.indexedToReadable(group.groupValue.utf8ToString());
IndexableField field = schemaField.createField(readableValue, 0.0f);
nl.add("groupValue", fieldType.toObject(field));
} else {
nl.add("groupValue", null);
}
addDocList(nl, group);
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void prepare() throws IOException {
actualGroupsToFind = getMax(offset, numGroups, maxDoc);
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createFirstPassCollector() throws IOException {
DocSet groupFilt = searcher.getDocSet(query);
topCollector = newCollector(groupSort, needScores);
collector = new FilterCollector(groupFilt, topCollector);
return collector;
}
// in core/src/java/org/apache/solr/search/Grouping.java
TopDocsCollector newCollector(Sort sort, boolean needScores) throws IOException {
int groupDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
if (sort == null || sort == Sort.RELEVANCE) {
return TopScoreDocCollector.create(groupDocsToCollect, true);
} else {
return TopFieldCollector.create(searcher.weightSort(sort), groupDocsToCollect, false, needScores, needScores, true);
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void finish() throws IOException {
TopDocsCollector topDocsCollector = (TopDocsCollector) collector.getDelegate();
TopDocs topDocs = topDocsCollector.topDocs();
GroupDocs<String> groupDocs = new GroupDocs<String>(topDocs.getMaxScore(), topDocs.totalHits, topDocs.scoreDocs, query.toString(), null);
if (main) {
mainResult = getDocList(groupDocs);
} else {
NamedList rsp = commonResponse();
addDocList(rsp, groupDocs);
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void prepare() throws IOException {
Map context = ValueSource.newContext(searcher);
groupBy.createWeight(context, searcher);
actualGroupsToFind = getMax(offset, numGroups, maxDoc);
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createFirstPassCollector() throws IOException {
// Ok we don't want groups, but do want a total count
if (actualGroupsToFind <= 0) {
fallBackCollector = new TotalHitCountCollector();
return fallBackCollector;
}
sort = sort == null ? Sort.RELEVANCE : sort;
firstPass = new FunctionFirstPassGroupingCollector(groupBy, context, searcher.weightSort(sort), actualGroupsToFind);
return firstPass;
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected Collector createSecondPassCollector() throws IOException {
if (actualGroupsToFind <= 0) {
allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
return totalCount == TotalCount.grouped ? allGroupsCollector : null;
}
topGroups = format == Format.grouped ? firstPass.getTopGroups(offset, false) : firstPass.getTopGroups(0, false);
if (topGroups == null) {
if (totalCount == TotalCount.grouped) {
allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
fallBackCollector = new TotalHitCountCollector();
return MultiCollector.wrap(allGroupsCollector, fallBackCollector);
} else {
fallBackCollector = new TotalHitCountCollector();
return fallBackCollector;
}
}
int groupdDocsToCollect = getMax(groupOffset, docsPerGroup, maxDoc);
groupdDocsToCollect = Math.max(groupdDocsToCollect, 1);
secondPass = new FunctionSecondPassGroupingCollector(
topGroups, sort, groupSort, groupdDocsToCollect, needScores, needScores, false, groupBy, context
);
if (totalCount == TotalCount.grouped) {
allGroupsCollector = new FunctionAllGroupsCollector(groupBy, context);
return MultiCollector.wrap(secondPass, allGroupsCollector);
} else {
return secondPass;
}
}
// in core/src/java/org/apache/solr/search/Grouping.java
Override
public AbstractAllGroupHeadsCollector<?> createAllGroupCollector() throws IOException {
Sort sortWithinGroup = groupSort != null ? groupSort : new Sort();
return new FunctionAllGroupHeadsCollector(groupBy, context, sortWithinGroup);
}
// in core/src/java/org/apache/solr/search/Grouping.java
protected void finish() throws IOException {
result = secondPass != null ? secondPass.getTopGroups(0) : null;
if (main) {
mainResult = createSimpleResponse();
return;
}
NamedList groupResult = commonResponse();
if (format == Format.simple) {
groupResult.add("doclist", createSimpleResponse());
return;
}
List groupList = new ArrayList();
groupResult.add("groups", groupList); // grouped={ key={ groups=[
if (result == null) {
return;
}
// handle case of rows=0
if (numGroups == 0) return;
for (GroupDocs<MutableValue> group : result.groups) {
NamedList nl = new SimpleOrderedMap();
groupList.add(nl); // grouped={ key={ groups=[ {
nl.add("groupValue", group.groupValue.toObject());
addDocList(nl, group);
}
}
// in core/src/java/org/apache/solr/search/FunctionRangeQuery.java
Override
public void collect(int doc) throws IOException {
if (doc<maxdoc && scorer.matches(doc)) {
delegate.collect(doc);
}
}
// in core/src/java/org/apache/solr/search/FunctionRangeQuery.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
maxdoc = context.reader().maxDoc();
FunctionValues dv = rangeFilt.getValueSource().getValues(fcontext, context);
scorer = dv.getRangeScorer(context.reader(), rangeFilt.getLowerVal(), rangeFilt.getUpperVal(), rangeFilt.isIncludeLower(), rangeFilt.isIncludeUpper());
super.setNextReader(context);
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public Filter getTopFilter() {
final OpenBitSet bs = bits;
// TODO: if cardinality isn't cached, do a quick measure of sparseness
// and return null from bits() if too sparse.
return new Filter() {
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
}
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
return new Bits() {
@Override
public boolean get(int index) {
return bs.fastGet(index + base);
}
@Override
public int length() {
return maxDoc;
}
};
}
}, acceptDocs2);
}
};
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
}
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
return new Bits() {
@Override
public boolean get(int index) {
return bs.fastGet(index + base);
}
@Override
public int length() {
return maxDoc;
}
};
}
}, acceptDocs2);
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
// in core/src/java/org/apache/solr/search/BitDocSet.java
Override
public Bits bits() throws IOException {
return new Bits() {
@Override
public boolean get(int index) {
return bs.fastGet(index + base);
}
@Override
public int length() {
return maxDoc;
}
};
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public Query rewrite(IndexReader reader) throws IOException {
return this;
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public float getValueForNormalization() throws IOException {
queryWeight = getBoost();
return queryWeight * queryWeight;
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
return new ConstantScorer(context, this, queryWeight, acceptDocs);
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
ConstantScorer cs = new ConstantScorer(context, this, queryWeight, context.reader().getLiveDocs());
boolean exists = cs.docIdSetIterator.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation();
if (exists) {
result.setDescription("ConstantScoreQuery(" + filter
+ "), product of:");
result.setValue(queryWeight);
result.setMatch(Boolean.TRUE);
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(queryNorm,"queryNorm"));
} else {
result.setDescription("ConstantScoreQuery(" + filter
+ ") doesn't match id " + doc);
result.setValue(0);
result.setMatch(Boolean.FALSE);
}
return result;
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public int nextDoc() throws IOException {
return docIdSetIterator.nextDoc();
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public float score() throws IOException {
return theScore;
}
// in core/src/java/org/apache/solr/search/SolrConstantScoreQuery.java
Override
public int advance(int target) throws IOException {
return docIdSetIterator.advance(target);
}
// in core/src/java/org/apache/solr/search/LRUCache.java
public void warm(SolrIndexSearcher searcher, SolrCache<K,V> old) throws IOException {
if (regenerator==null) return;
long warmingStartTime = System.currentTimeMillis();
LRUCache<K,V> other = (LRUCache<K,V>)old;
// warm entries
if (isAutowarmingOn()) {
Object[] keys,vals = null;
// Don't do the autowarming in the synchronized block, just pull out the keys and values.
synchronized (other.map) {
int sz = autowarm.getWarmCount(other.map.size());
keys = new Object[sz];
vals = new Object[sz];
Iterator<Map.Entry<K, V>> iter = other.map.entrySet().iterator();
// iteration goes from oldest (least recently used) to most recently used,
// so we need to skip over the oldest entries.
int skip = other.map.size() - sz;
for (int i=0; i<skip; i++) iter.next();
for (int i=0; i<sz; i++) {
Map.Entry<K,V> entry = iter.next();
keys[i]=entry.getKey();
vals[i]=entry.getValue();
}
}
// autowarm from the oldest to the newest entries so that the ordering will be
// correct in the new cache.
for (int i=0; i<keys.length; i++) {
try {
boolean continueRegen = regenerator.regenerateItem(searcher, this, old, keys[i], vals[i]);
if (!continueRegen) break;
}
catch (Throwable e) {
SolrException.log(log,"Error during auto-warming of key:" + keys[i], e);
}
}
}
warmupTime = System.currentTimeMillis() - warmingStartTime;
}
// in core/src/java/org/apache/solr/search/QueryParsing.java
static FieldType writeFieldName(String name, IndexSchema schema, Appendable out, int flags) throws IOException {
FieldType ft = null;
ft = schema.getFieldTypeNoEx(name);
out.append(name);
if (ft == null) {
out.append("(UNKNOWN FIELD " + name + ')');
}
out.append(':');
return ft;
}
// in core/src/java/org/apache/solr/search/QueryParsing.java
static void writeFieldVal(String val, FieldType ft, Appendable out, int flags) throws IOException {
if (ft != null) {
try {
out.append(ft.indexedToReadable(val));
} catch (Exception e) {
out.append("EXCEPTION(val=");
out.append(val);
out.append(")");
}
} else {
out.append(val);
}
}
// in core/src/java/org/apache/solr/search/QueryParsing.java
static void writeFieldVal(BytesRef val, FieldType ft, Appendable out, int flags) throws IOException {
if (ft != null) {
try {
CharsRef readable = new CharsRef();
ft.indexedToReadable(val, readable);
out.append(readable);
} catch (Exception e) {
out.append("EXCEPTION(val=");
out.append(val.utf8ToString());
out.append(")");
}
} else {
out.append(val.utf8ToString());
}
}
// in core/src/java/org/apache/solr/search/QueryParsing.java
public static void toString(Query query, IndexSchema schema, Appendable out, int flags) throws IOException {
boolean writeBoost = true;
if (query instanceof TermQuery) {
TermQuery q = (TermQuery) query;
Term t = q.getTerm();
FieldType ft = writeFieldName(t.field(), schema, out, flags);
writeFieldVal(t.bytes(), ft, out, flags);
} else if (query instanceof TermRangeQuery) {
TermRangeQuery q = (TermRangeQuery) query;
String fname = q.getField();
FieldType ft = writeFieldName(fname, schema, out, flags);
out.append(q.includesLower() ? '[' : '{');
BytesRef lt = q.getLowerTerm();
BytesRef ut = q.getUpperTerm();
if (lt == null) {
out.append('*');
} else {
writeFieldVal(lt, ft, out, flags);
}
out.append(" TO ");
if (ut == null) {
out.append('*');
} else {
writeFieldVal(ut, ft, out, flags);
}
out.append(q.includesUpper() ? ']' : '}');
} else if (query instanceof NumericRangeQuery) {
NumericRangeQuery q = (NumericRangeQuery) query;
String fname = q.getField();
FieldType ft = writeFieldName(fname, schema, out, flags);
out.append(q.includesMin() ? '[' : '{');
Number lt = q.getMin();
Number ut = q.getMax();
if (lt == null) {
out.append('*');
} else {
out.append(lt.toString());
}
out.append(" TO ");
if (ut == null) {
out.append('*');
} else {
out.append(ut.toString());
}
out.append(q.includesMax() ? ']' : '}');
} else if (query instanceof BooleanQuery) {
BooleanQuery q = (BooleanQuery) query;
boolean needParens = false;
if (q.getBoost() != 1.0 || q.getMinimumNumberShouldMatch() != 0 || q.isCoordDisabled()) {
needParens = true;
}
if (needParens) {
out.append('(');
}
boolean first = true;
for (BooleanClause c : q.clauses()) {
if (!first) {
out.append(' ');
} else {
first = false;
}
if (c.isProhibited()) {
out.append('-');
} else if (c.isRequired()) {
out.append('+');
}
Query subQuery = c.getQuery();
boolean wrapQuery = false;
// TODO: may need to put parens around other types
// of queries too, depending on future syntax.
if (subQuery instanceof BooleanQuery) {
wrapQuery = true;
}
if (wrapQuery) {
out.append('(');
}
toString(subQuery, schema, out, flags);
if (wrapQuery) {
out.append(')');
}
}
if (needParens) {
out.append(')');
}
if (q.getMinimumNumberShouldMatch() > 0) {
out.append('~');
out.append(Integer.toString(q.getMinimumNumberShouldMatch()));
}
if (q.isCoordDisabled()) {
out.append("/no_coord");
}
} else if (query instanceof PrefixQuery) {
PrefixQuery q = (PrefixQuery) query;
Term prefix = q.getPrefix();
FieldType ft = writeFieldName(prefix.field(), schema, out, flags);
out.append(prefix.text());
out.append('*');
} else if (query instanceof WildcardQuery) {
out.append(query.toString());
writeBoost = false;
} else if (query instanceof FuzzyQuery) {
out.append(query.toString());
writeBoost = false;
} else if (query instanceof ConstantScoreQuery) {
out.append(query.toString());
writeBoost = false;
} else {
out.append(query.getClass().getSimpleName()
+ '(' + query.toString() + ')');
writeBoost = false;
}
if (writeBoost && query.getBoost() != 1.0f) {
out.append("^");
out.append(Float.toString(query.getBoost()));
}
}
// in core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java
public List<Collector> create() throws IOException {
if (firstPhaseGroups.isEmpty()) {
return Collections.emptyList();
}
List<Collector> collectors = new ArrayList<Collector>();
secondPassCollector = new TermSecondPassGroupingCollector(
field.getName(), firstPhaseGroups, groupSort, sortWithinGroup, maxDocPerGroup, needScores, needMaxScore, true
);
collectors.add(secondPassCollector);
return collectors;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/command/SearchGroupsFieldCommand.java
public List<Collector> create() throws IOException {
List<Collector> collectors = new ArrayList<Collector>();
if (topNGroups > 0) {
firstPassGroupingCollector = new TermFirstPassGroupingCollector(field.getName(), groupSort, topNGroups);
collectors.add(firstPassGroupingCollector);
}
if (includeGroupCount) {
allGroupsCollector = new TermAllGroupsCollector(field.getName());
collectors.add(allGroupsCollector);
}
return collectors;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java
public Builder setDocSet(SolrIndexSearcher searcher) throws IOException {
return setDocSet(searcher.getDocSet(query));
}
// in core/src/java/org/apache/solr/search/grouping/distributed/command/QueryCommand.java
public List<Collector> create() throws IOException {
if (sort == null || sort == Sort.RELEVANCE) {
collector = TopScoreDocCollector.create(docsToCollect, true);
} else {
collector = TopFieldCollector.create(sort, docsToCollect, true, needScores, needScores, true);
}
filterCollector = new FilterCollector(docSet, collector);
return Arrays.asList((Collector) filterCollector);
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java
public NamedList transform(List<Command> data) throws IOException {
NamedList<NamedList> result = new NamedList<NamedList>();
for (Command command : data) {
final NamedList<Object> commandResult = new NamedList<Object>();
if (SearchGroupsFieldCommand.class.isInstance(command)) {
SearchGroupsFieldCommand fieldCommand = (SearchGroupsFieldCommand) command;
Pair<Integer, Collection<SearchGroup<BytesRef>>> pair = fieldCommand.result();
Integer groupedCount = pair.getA();
Collection<SearchGroup<BytesRef>> searchGroups = pair.getB();
if (searchGroups != null) {
commandResult.add("topGroups", serializeSearchGroup(searchGroups, fieldCommand.getGroupSort()));
}
if (groupedCount != null) {
commandResult.add("groupCount", groupedCount);
}
} else {
continue;
}
result.add(command.getKey(), commandResult);
}
return result;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/SearchGroupsResultTransformer.java
public Map<String, Pair<Integer, Collection<SearchGroup<BytesRef>>>> transformToNative(NamedList<NamedList> shardResponse, Sort groupSort, Sort sortWithinGroup, String shard) throws IOException {
Map<String, Pair<Integer, Collection<SearchGroup<BytesRef>>>> result = new HashMap<String, Pair<Integer, Collection<SearchGroup<BytesRef>>>>();
for (Map.Entry<String, NamedList> command : shardResponse) {
List<SearchGroup<BytesRef>> searchGroups = new ArrayList<SearchGroup<BytesRef>>();
NamedList topGroupsAndGroupCount = command.getValue();
@SuppressWarnings("unchecked")
NamedList<List<Comparable>> rawSearchGroups = (NamedList<List<Comparable>>) topGroupsAndGroupCount.get("topGroups");
if (rawSearchGroups != null) {
for (Map.Entry<String, List<Comparable>> rawSearchGroup : rawSearchGroups){
SearchGroup<BytesRef> searchGroup = new SearchGroup<BytesRef>();
searchGroup.groupValue = rawSearchGroup.getKey() != null ? new BytesRef(rawSearchGroup.getKey()) : null;
searchGroup.sortValues = rawSearchGroup.getValue().toArray(new Comparable[rawSearchGroup.getValue().size()]);
searchGroups.add(searchGroup);
}
}
Integer groupCount = (Integer) topGroupsAndGroupCount.get("groupCount");
result.put(command.getKey(), new Pair<Integer, Collection<SearchGroup<BytesRef>>>(groupCount, searchGroups));
}
return result;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java
public NamedList transform(List<Command> data) throws IOException {
NamedList<NamedList> result = new NamedList<NamedList>();
for (Command command : data) {
NamedList commandResult;
if (TopGroupsFieldCommand.class.isInstance(command)) {
TopGroupsFieldCommand fieldCommand = (TopGroupsFieldCommand) command;
SchemaField groupField = rb.req.getSearcher().getSchema().getField(fieldCommand.getKey());
commandResult = serializeTopGroups(fieldCommand.result(), groupField);
} else if (QueryCommand.class.isInstance(command)) {
QueryCommand queryCommand = (QueryCommand) command;
commandResult = serializeTopDocs(queryCommand.result());
} else {
commandResult = null;
}
result.add(command.getKey(), commandResult);
}
return result;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java
protected NamedList serializeTopGroups(TopGroups<BytesRef> data, SchemaField groupField) throws IOException {
NamedList<Object> result = new NamedList<Object>();
result.add("totalGroupedHitCount", data.totalGroupedHitCount);
result.add("totalHitCount", data.totalHitCount);
if (data.totalGroupCount != null) {
result.add("totalGroupCount", data.totalGroupCount);
}
CharsRef spare = new CharsRef();
SchemaField uniqueField = rb.req.getSearcher().getSchema().getUniqueKeyField();
for (GroupDocs<BytesRef> searchGroup : data.groups) {
NamedList<Object> groupResult = new NamedList<Object>();
groupResult.add("totalHits", searchGroup.totalHits);
if (!Float.isNaN(searchGroup.maxScore)) {
groupResult.add("maxScore", searchGroup.maxScore);
}
List<NamedList<Object>> documents = new ArrayList<NamedList<Object>>();
for (int i = 0; i < searchGroup.scoreDocs.length; i++) {
NamedList<Object> document = new NamedList<Object>();
documents.add(document);
Document doc = retrieveDocument(uniqueField, searchGroup.scoreDocs[i].doc);
document.add("id", uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
if (!Float.isNaN(searchGroup.scoreDocs[i].score)) {
document.add("score", searchGroup.scoreDocs[i].score);
}
if (!(searchGroup.scoreDocs[i] instanceof FieldDoc)) {
continue;
}
FieldDoc fieldDoc = (FieldDoc) searchGroup.scoreDocs[i];
Object[] convertedSortValues = new Object[fieldDoc.fields.length];
for (int j = 0; j < fieldDoc.fields.length; j++) {
Object sortValue = fieldDoc.fields[j];
Sort sortWithinGroup = rb.getGroupingSpec().getSortWithinGroup();
SchemaField field = sortWithinGroup.getSort()[j].getField() != null ? rb.req.getSearcher().getSchema().getFieldOrNull(sortWithinGroup.getSort()[j].getField()) : null;
if (field != null) {
FieldType fieldType = field.getType();
if (sortValue instanceof BytesRef) {
UnicodeUtil.UTF8toUTF16((BytesRef)sortValue, spare);
String indexedValue = spare.toString();
sortValue = fieldType.toObject(field.createField(fieldType.indexedToReadable(indexedValue), 0.0f));
} else if (sortValue instanceof String) {
sortValue = fieldType.toObject(field.createField(fieldType.indexedToReadable((String) sortValue), 0.0f));
}
}
convertedSortValues[j] = sortValue;
}
document.add("sortValues", convertedSortValues);
}
groupResult.add("documents", documents);
String groupValue = searchGroup.groupValue != null ? groupField.getType().indexedToReadable(searchGroup.groupValue.utf8ToString()): null;
result.add(groupValue, groupResult);
}
return result;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java
protected NamedList serializeTopDocs(QueryCommandResult result) throws IOException {
NamedList<Object> queryResult = new NamedList<Object>();
queryResult.add("matches", result.getMatches());
queryResult.add("totalHits", result.getTopDocs().totalHits);
if (rb.getGroupingSpec().isNeedScore()) {
queryResult.add("maxScore", result.getTopDocs().getMaxScore());
}
List<NamedList> documents = new ArrayList<NamedList>();
queryResult.add("documents", documents);
SchemaField uniqueField = rb.req.getSearcher().getSchema().getUniqueKeyField();
CharsRef spare = new CharsRef();
for (ScoreDoc scoreDoc : result.getTopDocs().scoreDocs) {
NamedList<Object> document = new NamedList<Object>();
documents.add(document);
Document doc = retrieveDocument(uniqueField, scoreDoc.doc);
document.add("id", uniqueField.getType().toExternal(doc.getField(uniqueField.getName())));
if (rb.getGroupingSpec().isNeedScore()) {
document.add("score", scoreDoc.score);
}
if (!FieldDoc.class.isInstance(scoreDoc)) {
continue;
}
FieldDoc fieldDoc = (FieldDoc) scoreDoc;
Object[] convertedSortValues = new Object[fieldDoc.fields.length];
for (int j = 0; j < fieldDoc.fields.length; j++) {
Object sortValue = fieldDoc.fields[j];
Sort groupSort = rb.getGroupingSpec().getGroupSort();
SchemaField field = groupSort.getSort()[j].getField() != null ? rb.req.getSearcher().getSchema().getFieldOrNull(groupSort.getSort()[j].getField()) : null;
if (field != null) {
FieldType fieldType = field.getType();
if (sortValue instanceof BytesRef) {
UnicodeUtil.UTF8toUTF16((BytesRef)sortValue, spare);
String indexedValue = spare.toString();
sortValue = fieldType.toObject(field.createField(fieldType.indexedToReadable(indexedValue), 0.0f));
} else if (sortValue instanceof String) {
sortValue = fieldType.toObject(field.createField(fieldType.indexedToReadable((String) sortValue), 0.0f));
}
}
convertedSortValues[j] = sortValue;
}
document.add("sortValues", convertedSortValues);
}
return queryResult;
}
// in core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java
private Document retrieveDocument(final SchemaField uniqueField, int doc) throws IOException {
DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(uniqueField.getName());
rb.req.getSearcher().doc(doc, visitor);
return visitor.getDocument();
}
// in core/src/java/org/apache/solr/search/grouping/CommandHandler.java
private DocSet computeGroupedDocSet(Query query, Filter luceneFilter, List<Collector> collectors) throws IOException {
Command firstCommand = commands.get(0);
AbstractAllGroupHeadsCollector termAllGroupHeadsCollector =
TermAllGroupHeadsCollector.create(firstCommand.getKey(), firstCommand.getSortWithinGroup());
if (collectors.isEmpty()) {
searchWithTimeLimiter(query, luceneFilter, termAllGroupHeadsCollector);
} else {
collectors.add(termAllGroupHeadsCollector);
searchWithTimeLimiter(query, luceneFilter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
}
int maxDoc = searcher.maxDoc();
long[] bits = termAllGroupHeadsCollector.retrieveGroupHeads(maxDoc).getBits();
return new BitDocSet(new OpenBitSet(bits, bits.length));
}
// in core/src/java/org/apache/solr/search/grouping/CommandHandler.java
private DocSet computeDocSet(Query query, Filter luceneFilter, List<Collector> collectors) throws IOException {
int maxDoc = searcher.maxDoc();
DocSetCollector docSetCollector;
if (collectors.isEmpty()) {
docSetCollector = new DocSetCollector(maxDoc >> 6, maxDoc);
} else {
Collector wrappedCollectors = MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()]));
docSetCollector = new DocSetDelegateCollector(maxDoc >> 6, maxDoc, wrappedCollectors);
}
searchWithTimeLimiter(query, luceneFilter, docSetCollector);
return docSetCollector.getDocSet();
}
// in core/src/java/org/apache/solr/search/grouping/CommandHandler.java
private void searchWithTimeLimiter(final Query query, final Filter luceneFilter, Collector collector) throws IOException {
if (queryCommand.getTimeAllowed() > 0 ) {
collector = new TimeLimitingCollector(collector, TimeLimitingCollector.getGlobalCounter(), queryCommand.getTimeAllowed());
}
TotalHitCountCollector hitCountCollector = new TotalHitCountCollector();
if (includeHitCount) {
collector = MultiCollector.wrap(collector, hitCountCollector);
}
try {
searcher.search(query, luceneFilter, collector);
} catch (TimeLimitingCollector.TimeExceededException x) {
partialResults = true;
logger.warn( "Query: " + query + "; " + x.getMessage() );
}
if (includeHitCount) {
totalHitCount = hitCountCollector.getTotalHits();
}
}
// in core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
public void setScorer(Scorer scorer) throws IOException {
delegate.setScorer(scorer);
}
// in core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
public void collect(int doc) throws IOException {
matches++;
if (filter.exists(doc + docBase)) {
delegate.collect(doc);
}
}
// in core/src/java/org/apache/solr/search/grouping/collector/FilterCollector.java
public void setNextReader(AtomicReaderContext context) throws IOException {
this.docBase = context.docBase;
delegate.setNextReader(context);
}
// in core/src/java/org/apache/solr/search/DocSetDelegateCollector.java
Override
public void collect(int doc) throws IOException {
collector.collect(doc);
doc += base;
// optimistically collect the first docs in an array
// in case the total number will be small enough to represent
// as a small set like SortedIntDocSet instead...
// Storing in this array will be quicker to convert
// than scanning through a potentially huge bit vector.
// FUTURE: when search methods all start returning docs in order, maybe
// we could have a ListDocSet() and use the collected array directly.
if (pos < scratch.length) {
scratch[pos]=doc;
} else {
// this conditional could be removed if BitSet was preallocated, but that
// would take up more memory, and add more GC time...
if (bits==null) bits = new OpenBitSet(maxDoc);
bits.fastSet(doc);
}
pos++;
}
// in core/src/java/org/apache/solr/search/DocSetDelegateCollector.java
Override
public void setScorer(Scorer scorer) throws IOException {
collector.setScorer(scorer);
}
// in core/src/java/org/apache/solr/search/DocSetDelegateCollector.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
collector.setNextReader(context);
this.base = context.docBase;
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public Query rewrite(IndexReader reader) throws IOException {
// don't rewrite the subQuery
return this;
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
public Weight createWeight(IndexSearcher searcher) throws IOException {
return new JoinQueryWeight((SolrIndexSearcher)searcher);
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public void close() throws IOException {
ref.decref();
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public void close() throws IOException {
fromCore.close();
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public float getValueForNormalization() throws IOException {
queryWeight = getBoost();
return queryWeight * queryWeight;
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public Scorer scorer(AtomicReaderContext context, boolean scoreDocsInOrder,
boolean topScorer, Bits acceptDocs) throws IOException {
if (filter == null) {
boolean debug = rb != null && rb.isDebug();
long start = debug ? System.currentTimeMillis() : 0;
resultSet = getDocSet();
long end = debug ? System.currentTimeMillis() : 0;
if (debug) {
SimpleOrderedMap<Object> dbg = new SimpleOrderedMap<Object>();
dbg.add("time", (end-start));
dbg.add("fromSetSize", fromSetSize); // the input
dbg.add("toSetSize", resultSet.size()); // the output
dbg.add("fromTermCount", fromTermCount);
dbg.add("fromTermTotalDf", fromTermTotalDf);
dbg.add("fromTermDirectCount", fromTermDirectCount);
dbg.add("fromTermHits", fromTermHits);
dbg.add("fromTermHitsTotalDf", fromTermHitsTotalDf);
dbg.add("toTermHits", toTermHits);
dbg.add("toTermHitsTotalDf", toTermHitsTotalDf);
dbg.add("toTermDirectCount", toTermDirectCount);
dbg.add("smallSetsDeferred", smallSetsDeferred);
dbg.add("toSetDocsAdded", resultListDocs);
// TODO: perhaps synchronize addDebug in the future...
rb.addDebug(dbg, "join", JoinQuery.this.toString());
}
filter = resultSet.getTopFilter();
}
// Although this set only includes live docs, other filters can be pushed down to queries.
DocIdSet readerSet = filter.getDocIdSet(context, acceptDocs);
if (readerSet == null) readerSet=DocIdSet.EMPTY_DOCIDSET;
return new JoinScorer(this, readerSet.iterator(), getBoost());
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
public DocSet getDocSet() throws IOException {
OpenBitSet resultBits = null;
// minimum docFreq to use the cache
int minDocFreqFrom = Math.max(5, fromSearcher.maxDoc() >> 13);
int minDocFreqTo = Math.max(5, toSearcher.maxDoc() >> 13);
// use a smaller size than normal since we will need to sort and dedup the results
int maxSortedIntSize = Math.max(10, toSearcher.maxDoc() >> 10);
DocSet fromSet = fromSearcher.getDocSet(q);
fromSetSize = fromSet.size();
List<DocSet> resultList = new ArrayList<DocSet>(10);
// make sure we have a set that is fast for random access, if we will use it for that
DocSet fastForRandomSet = fromSet;
if (minDocFreqFrom>0 && fromSet instanceof SortedIntDocSet) {
SortedIntDocSet sset = (SortedIntDocSet)fromSet;
fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
}
Fields fromFields = fromSearcher.getAtomicReader().fields();
Fields toFields = fromSearcher==toSearcher ? fromFields : toSearcher.getAtomicReader().fields();
if (fromFields == null) return DocSet.EMPTY;
Terms terms = fromFields.terms(fromField);
Terms toTerms = toFields.terms(toField);
if (terms == null || toTerms==null) return DocSet.EMPTY;
String prefixStr = TrieField.getMainValuePrefix(fromSearcher.getSchema().getFieldType(fromField));
BytesRef prefix = prefixStr == null ? null : new BytesRef(prefixStr);
BytesRef term = null;
TermsEnum termsEnum = terms.iterator(null);
TermsEnum toTermsEnum = toTerms.iterator(null);
SolrIndexSearcher.DocsEnumState fromDeState = null;
SolrIndexSearcher.DocsEnumState toDeState = null;
if (prefix == null) {
term = termsEnum.next();
} else {
if (termsEnum.seekCeil(prefix, true) != TermsEnum.SeekStatus.END) {
term = termsEnum.term();
}
}
Bits fromLiveDocs = fromSearcher.getAtomicReader().getLiveDocs();
Bits toLiveDocs = fromSearcher == toSearcher ? fromLiveDocs : toSearcher.getAtomicReader().getLiveDocs();
fromDeState = new SolrIndexSearcher.DocsEnumState();
fromDeState.fieldName = fromField;
fromDeState.liveDocs = fromLiveDocs;
fromDeState.termsEnum = termsEnum;
fromDeState.docsEnum = null;
fromDeState.minSetSizeCached = minDocFreqFrom;
toDeState = new SolrIndexSearcher.DocsEnumState();
toDeState.fieldName = toField;
toDeState.liveDocs = toLiveDocs;
toDeState.termsEnum = toTermsEnum;
toDeState.docsEnum = null;
toDeState.minSetSizeCached = minDocFreqTo;
while (term != null) {
if (prefix != null && !StringHelper.startsWith(term, prefix))
break;
fromTermCount++;
boolean intersects = false;
int freq = termsEnum.docFreq();
fromTermTotalDf++;
if (freq < minDocFreqFrom) {
fromTermDirectCount++;
// OK to skip liveDocs, since we check for intersection with docs matching query
fromDeState.docsEnum = fromDeState.termsEnum.docs(null, fromDeState.docsEnum, false);
DocsEnum docsEnum = fromDeState.docsEnum;
if (docsEnum instanceof MultiDocsEnum) {
MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs();
int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs();
outer: for (int subindex = 0; subindex<numSubs; subindex++) {
MultiDocsEnum.EnumWithSlice sub = subs[subindex];
if (sub.docsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid+base)) {
intersects = true;
break outer;
}
}
}
} else {
int docid;
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
if (fastForRandomSet.exists(docid)) {
intersects = true;
break;
}
}
}
} else {
// use the filter cache
DocSet fromTermSet = fromSearcher.getDocSet(fromDeState);
intersects = fromSet.intersects(fromTermSet);
}
if (intersects) {
fromTermHits++;
fromTermHitsTotalDf++;
TermsEnum.SeekStatus status = toTermsEnum.seekCeil(term);
if (status == TermsEnum.SeekStatus.END) break;
if (status == TermsEnum.SeekStatus.FOUND) {
toTermHits++;
int df = toTermsEnum.docFreq();
toTermHitsTotalDf += df;
if (resultBits==null && df + resultListDocs > maxSortedIntSize && resultList.size() > 0) {
resultBits = new OpenBitSet(toSearcher.maxDoc());
}
// if we don't have a bitset yet, or if the resulting set will be too large
// use the filterCache to get a DocSet
if (toTermsEnum.docFreq() >= minDocFreqTo || resultBits == null) {
// use filter cache
DocSet toTermSet = toSearcher.getDocSet(toDeState);
resultListDocs += toTermSet.size();
if (resultBits != null) {
toTermSet.setBitsOn(resultBits);
} else {
if (toTermSet instanceof BitDocSet) {
resultBits = (OpenBitSet)((BitDocSet)toTermSet).bits.clone();
} else {
resultList.add(toTermSet);
}
}
} else {
toTermDirectCount++;
// need to use liveDocs here so we don't map to any deleted ones
toDeState.docsEnum = toDeState.termsEnum.docs(toDeState.liveDocs, toDeState.docsEnum, false);
DocsEnum docsEnum = toDeState.docsEnum;
if (docsEnum instanceof MultiDocsEnum) {
MultiDocsEnum.EnumWithSlice[] subs = ((MultiDocsEnum)docsEnum).getSubs();
int numSubs = ((MultiDocsEnum)docsEnum).getNumSubs();
for (int subindex = 0; subindex<numSubs; subindex++) {
MultiDocsEnum.EnumWithSlice sub = subs[subindex];
if (sub.docsEnum == null) continue;
int base = sub.slice.start;
int docid;
while ((docid = sub.docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
resultListDocs++;
resultBits.fastSet(docid + base);
}
}
} else {
int docid;
while ((docid = docsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
resultListDocs++;
resultBits.fastSet(docid);
}
}
}
}
}
term = termsEnum.next();
}
smallSetsDeferred = resultList.size();
if (resultBits != null) {
for (DocSet set : resultList) {
set.setBitsOn(resultBits);
}
return new BitDocSet(resultBits);
}
if (resultList.size()==0) {
return DocSet.EMPTY;
}
if (resultList.size() == 1) {
return resultList.get(0);
}
int sz = 0;
for (DocSet set : resultList)
sz += set.size();
int[] docs = new int[sz];
int pos = 0;
for (DocSet set : resultList) {
System.arraycopy(((SortedIntDocSet)set).getDocs(), 0, docs, pos, set.size());
pos += set.size();
}
Arrays.sort(docs);
int[] dedup = new int[sz];
pos = 0;
int last = -1;
for (int doc : docs) {
if (doc != last)
dedup[pos++] = doc;
last = doc;
}
if (pos != dedup.length) {
dedup = Arrays.copyOf(dedup, pos);
}
return new SortedIntDocSet(dedup, dedup.length);
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public Explanation explain(AtomicReaderContext context, int doc) throws IOException {
Scorer scorer = scorer(context, true, false, context.reader().getLiveDocs());
boolean exists = scorer.advance(doc) == doc;
ComplexExplanation result = new ComplexExplanation();
if (exists) {
result.setDescription(this.toString()
+ " , product of:");
result.setValue(queryWeight);
result.setMatch(Boolean.TRUE);
result.addDetail(new Explanation(getBoost(), "boost"));
result.addDetail(new Explanation(queryNorm,"queryNorm"));
} else {
result.setDescription(this.toString()
+ " doesn't match id " + doc);
result.setValue(0);
result.setMatch(Boolean.FALSE);
}
return result;
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public int nextDoc() throws IOException {
return iter.nextDoc();
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public float score() throws IOException {
return score;
}
// in core/src/java/org/apache/solr/search/JoinQParserPlugin.java
Override
public int advance(int target) throws IOException {
return iter.advance(target);
}
// in core/src/java/org/apache/solr/search/function/distance/StringDistanceFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues str1DV = str1.getValues(context, readerContext);
final FunctionValues str2DV = str2.getValues(context, readerContext);
return new FloatDocValues(this) {
@Override
public float floatVal(int doc) {
return dist.getDistance(str1DV.strVal(doc), str2DV.strVal(doc));
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append("strdist").append('(');
sb.append(str1DV.toString(doc)).append(',').append(str2DV.toString(doc))
.append(", dist=").append(dist.getClass().getName());
sb.append(')');
return sb.toString();
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues gh1DV = geoHash1.getValues(context, readerContext);
final FunctionValues gh2DV = geoHash2.getValues(context, readerContext);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return distance(doc, gh1DV, gh2DV);
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name()).append('(');
sb.append(gh1DV.toString(doc)).append(',').append(gh2DV.toString(doc));
sb.append(')');
return sb.toString();
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/GeohashHaversineFunction.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
geoHash1.createWeight(context, searcher);
geoHash2.createWeight(context, searcher);
}
// in core/src/java/org/apache/solr/search/function/distance/GeohashFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues latDV = lat.getValues(context, readerContext);
final FunctionValues lonDV = lon.getValues(context, readerContext);
return new FunctionValues() {
@Override
public String strVal(int doc) {
return GeohashUtils.encodeLatLon(latDV.doubleVal(doc), lonDV.doubleVal(doc));
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name()).append('(');
sb.append(latDV.toString(doc)).append(',').append(lonDV.toString(doc));
sb.append(')');
return sb.toString();
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues vals1 = source1.getValues(context, readerContext);
final FunctionValues vals2 = source2.getValues(context, readerContext);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return distance(doc, vals1, vals2);
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name()).append('(').append(power).append(',');
boolean firstTime = true;
sb.append(vals1.toString(doc)).append(',');
sb.append(vals2.toString(doc));
sb.append(')');
return sb.toString();
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/VectorDistanceFunction.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
source1.createWeight(context, searcher);
source2.createWeight(context, searcher);
}
// in core/src/java/org/apache/solr/search/function/distance/HaversineFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues vals1 = p1.getValues(context, readerContext);
final FunctionValues vals2 = p2.getValues(context, readerContext);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return distance(doc, vals1, vals2);
}
@Override
public String toString(int doc) {
StringBuilder sb = new StringBuilder();
sb.append(name()).append('(');
sb.append(vals1.toString(doc)).append(',').append(vals2.toString(doc));
sb.append(')');
return sb.toString();
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/HaversineFunction.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
p1.createWeight(context, searcher);
p2.createWeight(context, searcher);
}
// in core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues latVals = latSource.getValues(context, readerContext);
final FunctionValues lonVals = lonSource.getValues(context, readerContext);
final double latCenterRad = this.latCenter * DEGREES_TO_RADIANS;
final double lonCenterRad = this.lonCenter * DEGREES_TO_RADIANS;
final double latCenterRad_cos = this.latCenterRad_cos;
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
double latRad = latVals.doubleVal(doc) * DEGREES_TO_RADIANS;
double lonRad = lonVals.doubleVal(doc) * DEGREES_TO_RADIANS;
double diffX = latCenterRad - latRad;
double diffY = lonCenterRad - lonRad;
double hsinX = Math.sin(diffX * 0.5);
double hsinY = Math.sin(diffY * 0.5);
double h = hsinX * hsinX +
(latCenterRad_cos * Math.cos(latRad) * hsinY * hsinY);
return (EARTH_MEAN_DIAMETER * Math.atan2(Math.sqrt(h), Math.sqrt(1 - h)));
}
@Override
public String toString(int doc) {
return name() + '(' + latVals.toString(doc) + ',' + lonVals.toString(doc) + ',' + latCenter + ',' + lonCenter + ')';
}
};
}
// in core/src/java/org/apache/solr/search/function/distance/HaversineConstFunction.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
latSource.createWeight(context, searcher);
lonSource.createWeight(context, searcher);
}
// in core/src/java/org/apache/solr/search/function/FileFloatSource.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final int off = readerContext.docBase;
IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(readerContext);
final float[] arr = getCachedFloats(topLevelContext.reader());
return new FloatDocValues(this) {
@Override
public float floatVal(int doc) {
return arr[doc + off];
}
@Override
public Object objectVal(int doc) {
return floatVal(doc); // TODO: keep track of missing values
}
};
}
// in core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
Override
public DocIdSet getDocIdSet(final Map context, final AtomicReaderContext readerContext, Bits acceptDocs) throws IOException {
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
}
@Override
public Bits bits() throws IOException {
return null; // don't use random access
}
}, acceptDocs);
}
// in core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
Override
public DocIdSetIterator iterator() throws IOException {
return valueSource.getValues(context, readerContext).getRangeScorer(readerContext.reader(), lowerVal, upperVal, includeLower, includeUpper);
}
// in core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
Override
public Bits bits() throws IOException {
return null; // don't use random access
}
// in core/src/java/org/apache/solr/search/function/ValueSourceRangeFilter.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
valueSource.createWeight(context, searcher);
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public Filter getTopFilter() {
return new Filter() {
int lastEndIdx = 0;
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
int sidx = Math.max(0,lastEndIdx);
if (sidx > 0 && docs[sidx-1] >= base) {
// oops, the lastEndIdx isn't correct... we must have been used
// in a multi-threaded context, or the indexreaders are being
// used out-of-order. start at 0.
sidx = 0;
}
if (sidx < docs.length && docs[sidx] < base) {
// if docs[sidx] is < base, we need to seek to find the real start.
sidx = findIndex(docs, base, sidx, docs.length-1);
}
final int startIdx = sidx;
// Largest possible end index is limited to the start index
// plus the number of docs contained in the segment. Subtract 1 since
// the end index is inclusive.
int eidx = Math.min(docs.length, startIdx + maxDoc) - 1;
// find the real end
eidx = findIndex(docs, max, startIdx, eidx) - 1;
final int endIdx = eidx;
lastEndIdx = endIdx;
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int idx = startIdx;
int adjustedDoc = -1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
return adjustedDoc = (idx > endIdx) ? NO_MORE_DOCS : (docs[idx++] - base);
}
@Override
public int advance(int target) throws IOException {
if (idx > endIdx || target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
target += base;
// probe next
int rawDoc = docs[idx++];
if (rawDoc >= target) return adjustedDoc=rawDoc-base;
int high = endIdx;
// TODO: probe more before resorting to binary search?
// binary search
while (idx <= high) {
int mid = (idx+high) >>> 1;
rawDoc = docs[mid];
if (rawDoc < target) {
idx = mid+1;
}
else if (rawDoc > target) {
high = mid-1;
}
else {
idx=mid+1;
return adjustedDoc=rawDoc - base;
}
}
// low is on the insertion point...
if (idx <= endIdx) {
return adjustedDoc = docs[idx++] - base;
} else {
return adjustedDoc=NO_MORE_DOCS;
}
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
// random access is expensive for this set
return null;
}
}, acceptDocs2);
}
};
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, final Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
int sidx = Math.max(0,lastEndIdx);
if (sidx > 0 && docs[sidx-1] >= base) {
// oops, the lastEndIdx isn't correct... we must have been used
// in a multi-threaded context, or the indexreaders are being
// used out-of-order. start at 0.
sidx = 0;
}
if (sidx < docs.length && docs[sidx] < base) {
// if docs[sidx] is < base, we need to seek to find the real start.
sidx = findIndex(docs, base, sidx, docs.length-1);
}
final int startIdx = sidx;
// Largest possible end index is limited to the start index
// plus the number of docs contained in the segment. Subtract 1 since
// the end index is inclusive.
int eidx = Math.min(docs.length, startIdx + maxDoc) - 1;
// find the real end
eidx = findIndex(docs, max, startIdx, eidx) - 1;
final int endIdx = eidx;
lastEndIdx = endIdx;
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int idx = startIdx;
int adjustedDoc = -1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
return adjustedDoc = (idx > endIdx) ? NO_MORE_DOCS : (docs[idx++] - base);
}
@Override
public int advance(int target) throws IOException {
if (idx > endIdx || target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
target += base;
// probe next
int rawDoc = docs[idx++];
if (rawDoc >= target) return adjustedDoc=rawDoc-base;
int high = endIdx;
// TODO: probe more before resorting to binary search?
// binary search
while (idx <= high) {
int mid = (idx+high) >>> 1;
rawDoc = docs[mid];
if (rawDoc < target) {
idx = mid+1;
}
else if (rawDoc > target) {
high = mid-1;
}
else {
idx=mid+1;
return adjustedDoc=rawDoc - base;
}
}
// low is on the insertion point...
if (idx <= endIdx) {
return adjustedDoc = docs[idx++] - base;
} else {
return adjustedDoc=NO_MORE_DOCS;
}
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
// random access is expensive for this set
return null;
}
}, acceptDocs2);
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int idx = startIdx;
int adjustedDoc = -1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
return adjustedDoc = (idx > endIdx) ? NO_MORE_DOCS : (docs[idx++] - base);
}
@Override
public int advance(int target) throws IOException {
if (idx > endIdx || target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
target += base;
// probe next
int rawDoc = docs[idx++];
if (rawDoc >= target) return adjustedDoc=rawDoc-base;
int high = endIdx;
// TODO: probe more before resorting to binary search?
// binary search
while (idx <= high) {
int mid = (idx+high) >>> 1;
rawDoc = docs[mid];
if (rawDoc < target) {
idx = mid+1;
}
else if (rawDoc > target) {
high = mid-1;
}
else {
idx=mid+1;
return adjustedDoc=rawDoc - base;
}
}
// low is on the insertion point...
if (idx <= endIdx) {
return adjustedDoc = docs[idx++] - base;
} else {
return adjustedDoc=NO_MORE_DOCS;
}
}
};
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public int nextDoc() throws IOException {
return adjustedDoc = (idx > endIdx) ? NO_MORE_DOCS : (docs[idx++] - base);
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public int advance(int target) throws IOException {
if (idx > endIdx || target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
target += base;
// probe next
int rawDoc = docs[idx++];
if (rawDoc >= target) return adjustedDoc=rawDoc-base;
int high = endIdx;
// TODO: probe more before resorting to binary search?
// binary search
while (idx <= high) {
int mid = (idx+high) >>> 1;
rawDoc = docs[mid];
if (rawDoc < target) {
idx = mid+1;
}
else if (rawDoc > target) {
high = mid-1;
}
else {
idx=mid+1;
return adjustedDoc=rawDoc - base;
}
}
// low is on the insertion point...
if (idx <= endIdx) {
return adjustedDoc = docs[idx++] - base;
} else {
return adjustedDoc=NO_MORE_DOCS;
}
}
// in core/src/java/org/apache/solr/search/SortedIntDocSet.java
Override
public Bits bits() throws IOException {
// random access is expensive for this set
return null;
}
// in core/src/java/org/apache/solr/search/LuceneQueryOptimizer.java
public TopDocs optimize(BooleanQuery original,
SolrIndexSearcher searcher,
int numHits,
Query[] queryOut,
Filter[] filterOut
)
throws IOException {
BooleanQuery query = new BooleanQuery();
BooleanQuery filterQuery = null;
for (BooleanClause c : original.clauses()) {
/***
System.out.println("required="+c.required);
System.out.println("boost="+c.query.getBoost());
System.out.println("isTermQuery="+(c.query instanceof TermQuery));
if (c.query instanceof TermQuery) {
System.out.println("term="+((TermQuery)c.query).getTerm());
System.out.println("docFreq="+searcher.docFreq(((TermQuery)c.query).getTerm()));
}
***/
Query q = c.getQuery();
if (c.isRequired() // required
&& q.getBoost() == 0.0f // boost is zero
&& q instanceof TermQuery // TermQuery
&& (searcher.docFreq(((TermQuery)q).getTerm())
/ (float)searcher.maxDoc()) >= threshold) { // check threshold
if (filterQuery == null)
filterQuery = new BooleanQuery();
filterQuery.add(q, BooleanClause.Occur.MUST); // filter it
//System.out.println("WooHoo... qualified to be hoisted to a filter!");
} else {
query.add(c); // query it
}
}
Filter filter = null;
if (filterQuery != null) {
synchronized (cache) { // check cache
filter = (Filter)cache.get(filterQuery);
}
if (filter == null) { // miss
filter = new CachingWrapperFilter(new QueryWrapperFilter(filterQuery)); // construct new entry
synchronized (cache) {
cache.put(filterQuery, filter); // cache it
}
}
}
// YCS: added code to pass out optimized query and filter
// so they can be used with Hits
if (queryOut != null && filterOut != null) {
queryOut[0] = query; filterOut[0] = filter;
return null;
} else {
return searcher.search(query, filter, numHits);
}
}
// in core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
Override
public FieldComparator newComparator(String fieldname, int numHits, int sortPos, boolean reversed) throws IOException {
return new TermOrdValComparator_SML(numHits, fieldname, sortPos, reversed, missingValueProxy);
}
// in core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
return TermOrdValComparator_SML.createComparator(context.reader(), this);
}
// in core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
Override
public FieldComparator setNextReader(AtomicReaderContext context) throws IOException {
return TermOrdValComparator_SML.createComparator(context.reader(), parent);
}
// in core/src/java/org/apache/solr/search/MissingStringLastComparatorSource.java
public static FieldComparator createComparator(AtomicReader reader, TermOrdValComparator_SML parent) throws IOException {
parent.termsIndex = FieldCache.DEFAULT.getTermsIndex(reader, parent.field);
final PackedInts.Reader docToOrd = parent.termsIndex.getDocToOrd();
PerSegmentComparator perSegComp = null;
if (docToOrd.hasArray()) {
final Object arr = docToOrd.getArray();
if (arr instanceof byte[]) {
perSegComp = new ByteOrdComparator((byte[]) arr, parent);
} else if (arr instanceof short[]) {
perSegComp = new ShortOrdComparator((short[]) arr, parent);
} else if (arr instanceof int[]) {
perSegComp = new IntOrdComparator((int[]) arr, parent);
}
}
if (perSegComp == null) {
perSegComp = new AnyOrdComparator(docToOrd, parent);
}
if (perSegComp.bottomSlot != -1) {
perSegComp.setBottom(perSegComp.bottomSlot);
}
parent.current = perSegComp;
return perSegComp;
}
// in core/src/java/org/apache/solr/search/WrappedQuery.java
Override
public Weight createWeight(IndexSearcher searcher) throws IOException {
return q.createWeight(searcher);
}
// in core/src/java/org/apache/solr/search/WrappedQuery.java
Override
public Query rewrite(IndexReader reader) throws IOException {
// currently no need to continue wrapping at this point.
return q.rewrite(reader);
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
public Filter getTopFilter() {
final OpenBitSet bs = getBits();
return new Filter() {
@Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
}
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
// sparse filters should not use random access
return null;
}
}, acceptDocs2);
}
};
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
Override
public DocIdSet getDocIdSet(final AtomicReaderContext context, Bits acceptDocs) throws IOException {
AtomicReader reader = context.reader();
// all Solr DocSets that are used as filters only include live docs
final Bits acceptDocs2 = acceptDocs == null ? null : (reader.getLiveDocs() == acceptDocs ? null : acceptDocs);
if (context.isTopLevel) {
return BitsFilteredDocIdSet.wrap(bs, acceptDocs);
}
final int base = context.docBase;
final int maxDoc = reader.maxDoc();
final int max = base + maxDoc; // one past the max doc in this segment.
return BitsFilteredDocIdSet.wrap(new DocIdSet() {
@Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
@Override
public boolean isCacheable() {
return true;
}
@Override
public Bits bits() throws IOException {
// sparse filters should not use random access
return null;
}
}, acceptDocs2);
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
Override
public DocIdSetIterator iterator() throws IOException {
return new DocIdSetIterator() {
int pos=base-1;
int adjustedDoc=-1;
@Override
public int docID() {
return adjustedDoc;
}
@Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
@Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
};
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
Override
public int nextDoc() throws IOException {
pos = bs.nextSetBit(pos+1);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
Override
public int advance(int target) throws IOException {
if (target==NO_MORE_DOCS) return adjustedDoc=NO_MORE_DOCS;
pos = bs.nextSetBit(target+base);
return adjustedDoc = (pos>=0 && pos<max) ? pos-base : NO_MORE_DOCS;
}
// in core/src/java/org/apache/solr/search/DocSetBase.java
Override
public Bits bits() throws IOException {
// sparse filters should not use random access
return null;
}
// in core/src/java/org/apache/solr/search/DocSetCollector.java
Override
public void collect(int doc) throws IOException {
doc += base;
// optimistically collect the first docs in an array
// in case the total number will be small enough to represent
// as a small set like SortedIntDocSet instead...
// Storing in this array will be quicker to convert
// than scanning through a potentially huge bit vector.
// FUTURE: when search methods all start returning docs in order, maybe
// we could have a ListDocSet() and use the collected array directly.
if (pos < scratch.length) {
scratch[pos]=doc;
} else {
// this conditional could be removed if BitSet was preallocated, but that
// would take up more memory, and add more GC time...
if (bits==null) bits = new OpenBitSet(maxDoc);
bits.fastSet(doc);
}
pos++;
}
// in core/src/java/org/apache/solr/search/DocSetCollector.java
Override
public void setScorer(Scorer scorer) throws IOException {
}
// in core/src/java/org/apache/solr/search/DocSetCollector.java
Override
public void setNextReader(AtomicReaderContext context) throws IOException {
this.base = context.docBase;
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
return new LongDocValues(this) {
@Override
public float floatVal(int doc) {
return fv;
}
@Override
public int intVal(int doc) {
return (int) constant;
}
@Override
public long longVal(int doc) {
return constant;
}
@Override
public double doubleVal(int doc) {
return dv;
}
@Override
public String toString(int doc) {
return description();
}
};
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues vals = source.getValues(context, readerContext);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return func(doc, vals);
}
@Override
public String toString(int doc) {
return name() + '(' + vals.toString(doc) + ')';
}
};
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
final FunctionValues aVals = a.getValues(context, readerContext);
final FunctionValues bVals = b.getValues(context, readerContext);
return new DoubleDocValues(this) {
@Override
public double doubleVal(int doc) {
return func(doc, aVals, bVals);
}
@Override
public String toString(int doc) {
return name() + '(' + aVals.toString(doc) + ',' + bVals.toString(doc) + ')';
}
};
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
return new BoolDocValues(this) {
@Override
public boolean boolVal(int doc) {
return constant;
}
};
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public FunctionValues getValues(Map context, AtomicReaderContext readerContext) throws IOException {
if (context.get(this) == null) {
SolrRequestInfo requestInfo = SolrRequestInfo.getRequestInfo();
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "testfunc: unweighted value source detected. delegate="+source + " request=" + (requestInfo==null ? "null" : requestInfo.getReq()));
}
return source.getValues(context, readerContext);
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public void createWeight(Map context, IndexSearcher searcher) throws IOException {
context.put(this, this);
}
// in core/src/java/org/apache/solr/search/ValueSourceParser.java
Override
public SortField getSortField(boolean reverse) throws IOException {
return super.getSortField(reverse);
}
// in core/src/java/org/apache/solr/search/SolrFilter.java
Override
public DocIdSet getDocIdSet(AtomicReaderContext context, Bits acceptDocs) throws IOException {
return getDocIdSet(null, context, acceptDocs);
}
// in core/src/java/org/apache/solr/search/FastLRUCache.java
public void warm(SolrIndexSearcher searcher, SolrCache old) throws IOException {
if (regenerator == null) return;
long warmingStartTime = System.currentTimeMillis();
FastLRUCache other = (FastLRUCache) old;
// warm entries
if (isAutowarmingOn()) {
int sz = autowarm.getWarmCount(other.size());
Map items = other.cache.getLatestAccessedItems(sz);
Map.Entry[] itemsArr = new Map.Entry[items.size()];
int counter = 0;
for (Object mapEntry : items.entrySet()) {
itemsArr[counter++] = (Map.Entry) mapEntry;
}
for (int i = itemsArr.length - 1; i >= 0; i--) {
try {
boolean continueRegen = regenerator.regenerateItem(searcher,
this, old, itemsArr[i].getKey(), itemsArr[i].getValue());
if (!continueRegen) break;
}
catch (Throwable e) {
SolrException.log(log, "Error during auto-warming of key:" + itemsArr[i].getKey(), e);
}
}
}
warmupTime = System.currentTimeMillis() - warmingStartTime;
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
protected Highlighter getPhraseHighlighter(Query query, String fieldName, SolrQueryRequest request, CachingTokenFilter tokenStream) throws IOException {
SolrParams params = request.getParams();
Highlighter highlighter = null;
highlighter = new Highlighter(
getFormatter(fieldName, params),
getEncoder(fieldName, params),
getSpanQueryScorer(query, fieldName, tokenStream, request));
highlighter.setTextFragmenter(getFragmenter(fieldName, params));
return highlighter;
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
private QueryScorer getSpanQueryScorer(Query query, String fieldName, TokenStream tokenStream, SolrQueryRequest request) throws IOException {
boolean reqFieldMatch = request.getParams().getFieldBool(fieldName, HighlightParams.FIELD_MATCH, false);
Boolean highlightMultiTerm = request.getParams().getBool(HighlightParams.HIGHLIGHT_MULTI_TERM, true);
if(highlightMultiTerm == null) {
highlightMultiTerm = false;
}
QueryScorer scorer;
if (reqFieldMatch) {
scorer = new QueryScorer(query, fieldName);
}
else {
scorer = new QueryScorer(query, null);
}
scorer.setExpandMultiTermQuery(highlightMultiTerm);
return scorer;
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
private void doHighlightingByHighlighter( Query query, SolrQueryRequest req, NamedList docSummaries,
int docId, Document doc, String fieldName ) throws IOException {
final SolrIndexSearcher searcher = req.getSearcher();
final IndexSchema schema = searcher.getSchema();
// TODO: Currently in trunk highlighting numeric fields is broken (Lucene) -
// so we disable them until fixed (see LUCENE-3080)!
// BEGIN: Hack
final SchemaField schemaField = schema.getFieldOrNull(fieldName);
if (schemaField != null && (
(schemaField.getType() instanceof org.apache.solr.schema.TrieField) ||
(schemaField.getType() instanceof org.apache.solr.schema.TrieDateField)
)) return;
// END: Hack
SolrParams params = req.getParams();
IndexableField[] docFields = doc.getFields(fieldName);
List<String> listFields = new ArrayList<String>();
for (IndexableField field : docFields) {
listFields.add(field.stringValue());
}
String[] docTexts = (String[]) listFields.toArray(new String[listFields.size()]);
// according to Document javadoc, doc.getValues() never returns null. check empty instead of null
if (docTexts.length == 0) return;
TokenStream tstream = null;
int numFragments = getMaxSnippets(fieldName, params);
boolean mergeContiguousFragments = isMergeContiguousFragments(fieldName, params);
String[] summaries = null;
List<TextFragment> frags = new ArrayList<TextFragment>();
TermOffsetsTokenStream tots = null; // to be non-null iff we're using TermOffsets optimization
try {
TokenStream tvStream = TokenSources.getTokenStream(searcher.getIndexReader(), docId, fieldName);
if (tvStream != null) {
tots = new TermOffsetsTokenStream(tvStream);
}
}
catch (IllegalArgumentException e) {
// No problem. But we can't use TermOffsets optimization.
}
for (int j = 0; j < docTexts.length; j++) {
if( tots != null ) {
// if we're using TermOffsets optimization, then get the next
// field value's TokenStream (i.e. get field j's TokenStream) from tots:
tstream = tots.getMultiValuedTokenStream( docTexts[j].length() );
} else {
// fall back to analyzer
tstream = createAnalyzerTStream(schema, fieldName, docTexts[j]);
}
int maxCharsToAnalyze = params.getFieldInt(fieldName,
HighlightParams.MAX_CHARS,
Highlighter.DEFAULT_MAX_CHARS_TO_ANALYZE);
Highlighter highlighter;
if (Boolean.valueOf(req.getParams().get(HighlightParams.USE_PHRASE_HIGHLIGHTER, "true"))) {
if (maxCharsToAnalyze < 0) {
tstream = new CachingTokenFilter(tstream);
} else {
tstream = new CachingTokenFilter(new OffsetLimitTokenFilter(tstream, maxCharsToAnalyze));
}
// get highlighter
highlighter = getPhraseHighlighter(query, fieldName, req, (CachingTokenFilter) tstream);
// after highlighter initialization, reset tstream since construction of highlighter already used it
tstream.reset();
}
else {
// use "the old way"
highlighter = getHighlighter(query, fieldName, req);
}
if (maxCharsToAnalyze < 0) {
highlighter.setMaxDocCharsToAnalyze(docTexts[j].length());
} else {
highlighter.setMaxDocCharsToAnalyze(maxCharsToAnalyze);
}
try {
TextFragment[] bestTextFragments = highlighter.getBestTextFragments(tstream, docTexts[j], mergeContiguousFragments, numFragments);
for (int k = 0; k < bestTextFragments.length; k++) {
if ((bestTextFragments[k] != null) && (bestTextFragments[k].getScore() > 0)) {
frags.add(bestTextFragments[k]);
}
}
} catch (InvalidTokenOffsetsException e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
}
}
// sort such that the fragments with the highest score come first
Collections.sort(frags, new Comparator<TextFragment>() {
public int compare(TextFragment arg0, TextFragment arg1) {
return Math.round(arg1.getScore() - arg0.getScore());
}
});
// convert fragments back into text
// TODO: we can include score and position information in output as snippet attributes
if (frags.size() > 0) {
ArrayList<String> fragTexts = new ArrayList<String>();
for (TextFragment fragment: frags) {
if ((fragment != null) && (fragment.getScore() > 0)) {
fragTexts.add(fragment.toString());
}
if (fragTexts.size() >= numFragments) break;
}
summaries = fragTexts.toArray(new String[0]);
if (summaries.length > 0)
docSummaries.add(fieldName, summaries);
}
// no summeries made, copy text from alternate field
if (summaries == null || summaries.length == 0) {
alternateField( docSummaries, params, doc, fieldName );
}
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
private void doHighlightingByFastVectorHighlighter( FastVectorHighlighter highlighter, FieldQuery fieldQuery,
SolrQueryRequest req, NamedList docSummaries, int docId, Document doc,
String fieldName ) throws IOException {
SolrParams params = req.getParams();
SolrFragmentsBuilder solrFb = getSolrFragmentsBuilder( fieldName, params );
String[] snippets = highlighter.getBestFragments( fieldQuery, req.getSearcher().getIndexReader(), docId, fieldName,
params.getFieldInt( fieldName, HighlightParams.FRAGSIZE, 100 ),
params.getFieldInt( fieldName, HighlightParams.SNIPPETS, 1 ),
getFragListBuilder( fieldName, params ),
getFragmentsBuilder( fieldName, params ),
solrFb.getPreTags( params, fieldName ),
solrFb.getPostTags( params, fieldName ),
getEncoder( fieldName, params ) );
if( snippets != null && snippets.length > 0 )
docSummaries.add( fieldName, snippets );
else
alternateField( docSummaries, params, doc, fieldName );
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
private TokenStream createAnalyzerTStream(IndexSchema schema, String fieldName, String docText) throws IOException {
TokenStream tstream;
TokenStream ts = schema.getAnalyzer().tokenStream(fieldName, new StringReader(docText));
ts.reset();
tstream = new TokenOrderingFilter(ts, 10);
return tstream;
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
Override
public boolean incrementToken() throws IOException {
while (!done && queue.size() < windowSize) {
if (!input.incrementToken()) {
done = true;
break;
}
// reverse iterating for better efficiency since we know the
// list is already sorted, and most token start offsets will be too.
ListIterator<OrderedToken> iter = queue.listIterator(queue.size());
while(iter.hasPrevious()) {
if (offsetAtt.startOffset() >= iter.previous().startOffset) {
// insertion will be before what next() would return (what
// we just compared against), so move back one so the insertion
// will be after.
iter.next();
break;
}
}
OrderedToken ot = new OrderedToken();
ot.state = captureState();
ot.startOffset = offsetAtt.startOffset();
iter.add(ot);
}
if (queue.isEmpty()) {
return false;
} else {
restoreState(queue.removeFirst().state);
return true;
}
}
// in core/src/java/org/apache/solr/highlight/DefaultSolrHighlighter.java
Override
public boolean incrementToken() throws IOException {
while( true ){
if( bufferedToken == null ) {
if (!bufferedTokenStream.incrementToken())
return false;
bufferedToken = bufferedTokenStream.captureState();
bufferedStartOffset = bufferedOffsetAtt.startOffset();
bufferedEndOffset = bufferedOffsetAtt.endOffset();
}
if( startOffset <= bufferedStartOffset &&
bufferedEndOffset <= endOffset ){
restoreState(bufferedToken);
bufferedToken = null;
offsetAtt.setOffset( offsetAtt.startOffset() - startOffset, offsetAtt.endOffset() - startOffset );
return true;
}
else if( bufferedEndOffset > endOffset ){
startOffset += length + 1;
return false;
}
bufferedToken = null;
}
}
// in core/src/java/org/apache/solr/spelling/SpellingQueryConverter.java
protected void analyze(Collection<Token> result, Reader text, int offset) throws IOException {
TokenStream stream = analyzer.tokenStream("", text);
// TODO: support custom attributes
CharTermAttribute termAtt = stream.addAttribute(CharTermAttribute.class);
FlagsAttribute flagsAtt = stream.addAttribute(FlagsAttribute.class);
TypeAttribute typeAtt = stream.addAttribute(TypeAttribute.class);
PayloadAttribute payloadAtt = stream.addAttribute(PayloadAttribute.class);
PositionIncrementAttribute posIncAtt = stream.addAttribute(PositionIncrementAttribute.class);
OffsetAttribute offsetAtt = stream.addAttribute(OffsetAttribute.class);
stream.reset();
while (stream.incrementToken()) {
Token token = new Token();
token.copyBuffer(termAtt.buffer(), 0, termAtt.length());
token.setStartOffset(offset + offsetAtt.startOffset());
token.setEndOffset(offset + offsetAtt.endOffset());
token.setFlags(flagsAtt.getFlags());
token.setType(typeAtt.type());
token.setPayload(payloadAtt.getPayload());
token.setPositionIncrement(posIncAtt.getPositionIncrement());
result.add(token);
}
stream.end();
stream.close();
}
// in core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java
Override
public SpellingResult getSuggestions(SpellingOptions options) throws IOException {
SpellingResult result = new SpellingResult(options.tokens);
IndexReader reader = determineReader(options.reader);
Term term = field != null ? new Term(field, "") : null;
float theAccuracy = (options.accuracy == Float.MIN_VALUE) ? spellChecker.getAccuracy() : options.accuracy;
int count = Math.max(options.count, AbstractLuceneSpellChecker.DEFAULT_SUGGESTION_COUNT);
for (Token token : options.tokens) {
String tokenText = new String(token.buffer(), 0, token.length());
term = new Term(field, tokenText);
int docFreq = 0;
if (reader != null) {
docFreq = reader.docFreq(term);
}
String[] suggestions = spellChecker.suggestSimilar(tokenText,
((options.alternativeTermCount == null || docFreq == 0) ? count
: options.alternativeTermCount), field != null ? reader : null, // workaround LUCENE-1295
field, options.suggestMode, theAccuracy);
if (suggestions.length == 1 && suggestions[0].equals(tokenText)
&& options.alternativeTermCount == null) {
// These are spelled the same, continue on
continue;
}
// If considering alternatives to "correctly-spelled" terms, then add the
// original as a viable suggestion.
if (options.alternativeTermCount != null && docFreq > 0) {
boolean foundOriginal = false;
String[] suggestionsWithOrig = new String[suggestions.length + 1];
for (int i = 0; i < suggestions.length; i++) {
if (suggestions[i].equals(tokenText)) {
foundOriginal = true;
break;
}
suggestionsWithOrig[i + 1] = suggestions[i];
}
if (!foundOriginal) {
suggestionsWithOrig[0] = tokenText;
suggestions = suggestionsWithOrig;
}
}
if (options.extendedResults == true && reader != null && field != null) {
result.addFrequency(token, docFreq);
int countLimit = Math.min(options.count, suggestions.length);
if(countLimit>0)
{
for (int i = 0; i < countLimit; i++) {
term = new Term(field, suggestions[i]);
result.add(token, suggestions[i], reader.docFreq(term));
}
} else {
List<String> suggList = Collections.emptyList();
result.add(token, suggList);
}
} else {
if (suggestions.length > 0) {
List<String> suggList = Arrays.asList(suggestions);
if (suggestions.length > options.count) {
suggList = suggList.subList(0, options.count);
}
result.add(token, suggList);
} else {
List<String> suggList = Collections.emptyList();
result.add(token, suggList);
}
}
}
return result;
}
// in core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java
Override
public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException {
spellChecker.setSpellIndex(index);
}
// in core/src/java/org/apache/solr/spelling/AbstractLuceneSpellChecker.java
protected void initIndex() throws IOException {
if (indexDir != null) {
index = FSDirectory.open(new File(indexDir));
} else {
index = new RAMDirectory();
}
}
// in core/src/java/org/apache/solr/spelling/suggest/Suggester.java
Override
public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException {
LOG.info("reload()");
if (dictionary == null && storeDir != null) {
// this may be a firstSearcher event, try loading it
if (lookup.load(new FileInputStream(new File(storeDir, factory.storeFileName())))) {
return; // loaded ok
}
LOG.debug("load failed, need to build Lookup again");
}
// loading was unsuccessful - build it again
build(core, searcher);
}
// in core/src/java/org/apache/solr/spelling/suggest/Suggester.java
Override
public SpellingResult getSuggestions(SpellingOptions options) throws IOException {
LOG.debug("getSuggestions: " + options.tokens);
if (lookup == null) {
LOG.info("Lookup is null - invoke spellchecker.build first");
return EMPTY_RESULT;
}
SpellingResult res = new SpellingResult();
CharsRef scratch = new CharsRef();
for (Token t : options.tokens) {
scratch.chars = t.buffer();
scratch.offset = 0;
scratch.length = t.length();
List<LookupResult> suggestions = lookup.lookup(scratch,
(options.suggestMode == SuggestMode.SUGGEST_MORE_POPULAR), options.count);
if (suggestions == null) {
continue;
}
if (options.suggestMode != SuggestMode.SUGGEST_MORE_POPULAR) {
Collections.sort(suggestions);
}
for (LookupResult lr : suggestions) {
res.add(t, lr.key.toString(), (int)lr.value);
}
}
return res;
}
// in core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java
Override
public void reload(SolrCore core, SolrIndexSearcher searcher)
throws IOException {}
// in core/src/java/org/apache/solr/spelling/DirectSolrSpellChecker.java
Override
public SpellingResult getSuggestions(SpellingOptions options)
throws IOException {
LOG.debug("getSuggestions: " + options.tokens);
SpellingResult result = new SpellingResult();
float accuracy = (options.accuracy == Float.MIN_VALUE) ? checker.getAccuracy() : options.accuracy;
for (Token token : options.tokens) {
String tokenText = token.toString();
Term term = new Term(field, tokenText);
int freq = options.reader.docFreq(term);
int count = (options.alternativeTermCount != null && freq > 0) ? options.alternativeTermCount: options.count;
SuggestWord[] suggestions = checker.suggestSimilar(term, count,options.reader, options.suggestMode, accuracy);
result.addFrequency(token, freq);
// If considering alternatives to "correctly-spelled" terms, then add the
// original as a viable suggestion.
if (options.alternativeTermCount != null && freq > 0) {
boolean foundOriginal = false;
SuggestWord[] suggestionsWithOrig = new SuggestWord[suggestions.length + 1];
for (int i = 0; i < suggestions.length; i++) {
if (suggestions[i].string.equals(tokenText)) {
foundOriginal = true;
break;
}
suggestionsWithOrig[i + 1] = suggestions[i];
}
if (!foundOriginal) {
SuggestWord orig = new SuggestWord();
orig.freq = freq;
orig.string = tokenText;
suggestionsWithOrig[0] = orig;
suggestions = suggestionsWithOrig;
}
}
if(suggestions.length==0 && freq==0) {
List<String> empty = Collections.emptyList();
result.add(token, empty);
} else {
for (SuggestWord suggestion : suggestions) {
result.add(token, suggestion.string, suggestion.freq);
}
}
}
return result;
}
// in core/src/java/org/apache/solr/spelling/IndexBasedSpellChecker.java
Override
public void reload(SolrCore core, SolrIndexSearcher searcher) throws IOException {
super.reload(core, searcher);
//reload the source
initSourceReader();
}
// in core/src/java/org/apache/solr/cloud/SyncStrategy.java
private boolean syncWithReplicas(ZkController zkController, SolrCore core,
ZkNodeProps props, String collection, String shardId)
throws MalformedURLException, SolrServerException, IOException {
List<ZkCoreNodeProps> nodes = zkController.getZkStateReader()
.getReplicaProps(collection, shardId,
props.get(ZkStateReader.NODE_NAME_PROP),
props.get(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE); // TODO:
// should
// there
// be a
// state
// filter?
if (nodes == null) {
// I have no replicas
return true;
}
List<String> syncWith = new ArrayList<String>();
for (ZkCoreNodeProps node : nodes) {
// if we see a leader, must be stale state, and this is the guy that went down
if (!node.getNodeProps().keySet().contains(ZkStateReader.LEADER_PROP)) {
syncWith.add(node.getCoreUrl());
}
}
PeerSync peerSync = new PeerSync(core, syncWith, core.getUpdateHandler().getUpdateLog().numRecordsToKeep);
return peerSync.sync();
}
// in core/src/java/org/apache/solr/cloud/SyncStrategy.java
private void syncToMe(ZkController zkController, String collection,
String shardId, ZkNodeProps leaderProps) throws MalformedURLException,
SolrServerException, IOException {
// sync everyone else
// TODO: we should do this in parallel at least
List<ZkCoreNodeProps> nodes = zkController
.getZkStateReader()
.getReplicaProps(collection, shardId,
leaderProps.get(ZkStateReader.NODE_NAME_PROP),
leaderProps.get(ZkStateReader.CORE_NAME_PROP), ZkStateReader.ACTIVE);
if (nodes == null) {
// System.out.println("I have no replicas");
// I have no replicas
return;
}
//System.out.println("tell my replicas to sync");
ZkCoreNodeProps zkLeader = new ZkCoreNodeProps(leaderProps);
for (ZkCoreNodeProps node : nodes) {
try {
// System.out
// .println("try and ask " + node.getCoreUrl() + " to sync");
log.info("try and ask " + node.getCoreUrl() + " to sync");
requestSync(zkLeader.getCoreUrl(), node.getCoreName());
} catch (Exception e) {
SolrException.log(log, "Error syncing replica to leader", e);
}
}
for(;;) {
ShardResponse srsp = shardHandler.takeCompletedOrError();
if (srsp == null) break;
boolean success = handleResponse(srsp);
//System.out.println("got response:" + success);
if (!success) {
try {
log.info("Sync failed - asking replica to recover.");
//System.out.println("Sync failed - asking replica to recover.");
RequestRecovery recoverRequestCmd = new RequestRecovery();
recoverRequestCmd.setAction(CoreAdminAction.REQUESTRECOVERY);
recoverRequestCmd.setCoreName(((SyncShardRequest)srsp.getShardRequest()).coreName);
HttpSolrServer server = new HttpSolrServer(zkLeader.getBaseUrl());
server.request(recoverRequestCmd);
} catch (Exception e) {
log.info("Could not tell a replica to recover", e);
}
shardHandler.cancelAll();
break;
}
}
}
// in core/src/java/org/apache/solr/cloud/ElectionContext.java
Override
void runLeaderProcess(boolean weAreReplacement)
throws KeeperException, InterruptedException, IOException {
try {
zkClient.makePath(leaderPath,
leaderProps == null ? null : ZkStateReader.toJSON(leaderProps),
CreateMode.EPHEMERAL, true);
} catch (NodeExistsException e) {
// if a previous leader ephemeral still exists for some reason, try and
// remove it
zkClient.delete(leaderPath, -1, true);
zkClient.makePath(leaderPath,
leaderProps == null ? null : ZkStateReader.toJSON(leaderProps),
CreateMode.EPHEMERAL, true);
}
}
// in core/src/java/org/apache/solr/cloud/ElectionContext.java
Override
void runLeaderProcess(boolean weAreReplacement)
throws KeeperException, InterruptedException, IOException {
if (cc != null) {
String coreName = leaderProps.get(ZkStateReader.CORE_NAME_PROP);
SolrCore core = null;
try {
// the first time we are run, we will get a startupCore - after
// we will get null and must use cc.getCore
core = cc.getCore(coreName);
if (core == null) {
cancelElection();
throw new SolrException(ErrorCode.SERVER_ERROR, "Fatal Error, SolrCore not found:" + coreName + " in " + cc.getCoreNames());
}
// should I be leader?
if (weAreReplacement && !shouldIBeLeader(leaderProps)) {
// System.out.println("there is a better leader candidate it appears");
rejoinLeaderElection(leaderSeqPath, core);
return;
}
if (weAreReplacement) {
if (zkClient.exists(leaderPath, true)) {
zkClient.delete(leaderPath, -1, true);
}
// System.out.println("I may be the new Leader:" + leaderPath
// + " - I need to try and sync");
boolean success = syncStrategy.sync(zkController, core, leaderProps);
if (!success && anyoneElseActive()) {
rejoinLeaderElection(leaderSeqPath, core);
return;
}
}
// If I am going to be the leader I have to be active
// System.out.println("I am leader go active");
core.getUpdateHandler().getSolrCoreState().cancelRecovery();
zkController.publish(core.getCoreDescriptor(), ZkStateReader.ACTIVE);
} finally {
if (core != null ) {
core.close();
}
}
}
super.runLeaderProcess(weAreReplacement);
}
// in core/src/java/org/apache/solr/cloud/ElectionContext.java
private void rejoinLeaderElection(String leaderSeqPath, SolrCore core)
throws InterruptedException, KeeperException, IOException {
// remove our ephemeral and re join the election
// System.out.println("sync failed, delete our election node:"
// + leaderSeqPath);
zkController.publish(core.getCoreDescriptor(), ZkStateReader.DOWN);
cancelElection();
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, core.getName());
leaderElector.joinElection(this);
}
// in core/src/java/org/apache/solr/cloud/SolrZkServer.java
Override
public void parseProperties(Properties zkProp)
throws IOException, ConfigException {
for (Entry<Object, Object> entry : zkProp.entrySet()) {
String key = entry.getKey().toString().trim();
String value = entry.getValue().toString().trim();
if (key.equals("dataDir")) {
dataDir = value;
} else if (key.equals("dataLogDir")) {
dataLogDir = value;
} else if (key.equals("clientPort")) {
setClientPort(Integer.parseInt(value));
} else if (key.equals("tickTime")) {
tickTime = Integer.parseInt(value);
} else if (key.equals("initLimit")) {
initLimit = Integer.parseInt(value);
} else if (key.equals("syncLimit")) {
syncLimit = Integer.parseInt(value);
} else if (key.equals("electionAlg")) {
electionAlg = Integer.parseInt(value);
} else if (key.equals("maxClientCnxns")) {
maxClientCnxns = Integer.parseInt(value);
} else if (key.startsWith("server.")) {
int dot = key.indexOf('.');
long sid = Long.parseLong(key.substring(dot + 1));
String parts[] = value.split(":");
if ((parts.length != 2) && (parts.length != 3)) {
LOG.error(value
+ " does not have the form host:port or host:port:port");
}
InetSocketAddress addr = new InetSocketAddress(parts[0],
Integer.parseInt(parts[1]));
if (parts.length == 2) {
servers.put(Long.valueOf(sid), new QuorumPeer.QuorumServer(sid, addr));
} else if (parts.length == 3) {
InetSocketAddress electionAddr = new InetSocketAddress(
parts[0], Integer.parseInt(parts[2]));
servers.put(Long.valueOf(sid), new QuorumPeer.QuorumServer(sid, addr,
electionAddr));
}
} else if (key.startsWith("group")) {
int dot = key.indexOf('.');
long gid = Long.parseLong(key.substring(dot + 1));
numGroups++;
String parts[] = value.split(":");
for(String s : parts){
long sid = Long.parseLong(s);
if(serverGroup.containsKey(sid))
throw new ConfigException("Server " + sid + "is in multiple groups");
else
serverGroup.put(sid, gid);
}
} else if(key.startsWith("weight")) {
int dot = key.indexOf('.');
long sid = Long.parseLong(key.substring(dot + 1));
serverWeight.put(sid, Long.parseLong(value));
} else {
System.setProperty("zookeeper." + key, value);
}
}
if (dataDir == null) {
throw new IllegalArgumentException("dataDir is not set");
}
if (dataLogDir == null) {
dataLogDir = dataDir;
} else {
if (!new File(dataLogDir).isDirectory()) {
throw new IllegalArgumentException("dataLogDir " + dataLogDir
+ " is missing.");
}
}
if (tickTime == 0) {
throw new IllegalArgumentException("tickTime is not set");
}
if (servers.size() > 1) {
if (initLimit == 0) {
throw new IllegalArgumentException("initLimit is not set");
}
if (syncLimit == 0) {
throw new IllegalArgumentException("syncLimit is not set");
}
/*
* If using FLE, then every server requires a separate election
* port.
*/
if (electionAlg != 0) {
for (QuorumPeer.QuorumServer s : servers.values()) {
if (s.electionAddr == null)
throw new IllegalArgumentException(
"Missing election port for server: " + s.id);
}
}
/*
* Default of quorum config is majority
*/
if(serverGroup.size() > 0){
if(servers.size() != serverGroup.size())
throw new ConfigException("Every server must be in exactly one group");
/*
* The deafult weight of a server is 1
*/
for(QuorumPeer.QuorumServer s : servers.values()){
if(!serverWeight.containsKey(s.id))
serverWeight.put(s.id, (long) 1);
}
/*
* Set the quorumVerifier to be QuorumHierarchical
*/
quorumVerifier = new QuorumHierarchical(numGroups,
serverWeight, serverGroup);
} else {
/*
* The default QuorumVerifier is QuorumMaj
*/
LOG.info("Defaulting to majority quorums");
quorumVerifier = new QuorumMaj(servers.size());
}
File myIdFile = new File(dataDir, "myid");
if (!myIdFile.exists()) {
///////////////// ADDED FOR SOLR //////
Long myid = getMySeverId();
if (myid != null) {
serverId = myid;
return;
}
if (zkRun == null) return;
//////////////// END ADDED FOR SOLR //////
throw new IllegalArgumentException(myIdFile.toString()
+ " file is missing");
}
BufferedReader br = new BufferedReader(new FileReader(myIdFile));
String myIdString;
try {
myIdString = br.readLine();
} finally {
br.close();
}
try {
serverId = Long.parseLong(myIdString);
} catch (NumberFormatException e) {
throw new IllegalArgumentException("serverid " + myIdString
+ " is not a number");
}
}
}
// in core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
private void replicate(String nodeName, SolrCore core, ZkNodeProps leaderprops, String baseUrl)
throws SolrServerException, IOException {
String leaderBaseUrl = leaderprops.get(ZkStateReader.BASE_URL_PROP);
ZkCoreNodeProps leaderCNodeProps = new ZkCoreNodeProps(leaderprops);
String leaderUrl = leaderCNodeProps.getCoreUrl();
log.info("Attempting to replicate from " + leaderUrl);
// if we are the leader, either we are trying to recover faster
// then our ephemeral timed out or we are the only node
if (!leaderBaseUrl.equals(baseUrl)) {
// send commit
commitOnLeader(leaderUrl);
// use rep handler directly, so we can do this sync rather than async
SolrRequestHandler handler = core.getRequestHandler(REPLICATION_HANDLER);
if (handler instanceof LazyRequestHandlerWrapper) {
handler = ((LazyRequestHandlerWrapper)handler).getWrappedHandler();
}
ReplicationHandler replicationHandler = (ReplicationHandler) handler;
if (replicationHandler == null) {
throw new SolrException(ErrorCode.SERVICE_UNAVAILABLE,
"Skipping recovery, no " + REPLICATION_HANDLER + " handler found");
}
ModifiableSolrParams solrParams = new ModifiableSolrParams();
solrParams.set(ReplicationHandler.MASTER_URL, leaderUrl + "replication");
if (isClosed()) retries = INTERRUPTED;
boolean success = replicationHandler.doFetch(solrParams, true); // TODO: look into making sure force=true does not download files we already have
if (!success) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Replication for recovery failed.");
}
// solrcloud_debug
// try {
// RefCounted<SolrIndexSearcher> searchHolder = core.getNewestSearcher(false);
// SolrIndexSearcher searcher = searchHolder.get();
// try {
// System.out.println(core.getCoreDescriptor().getCoreContainer().getZkController().getNodeName() + " replicated "
// + searcher.search(new MatchAllDocsQuery(), 1).totalHits + " from " + leaderUrl + " gen:" + core.getDeletionPolicy().getLatestCommit().getGeneration() + " data:" + core.getDataDir());
// } finally {
// searchHolder.decref();
// }
// } catch (Exception e) {
//
// }
}
}
// in core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
private void commitOnLeader(String leaderUrl) throws MalformedURLException,
SolrServerException, IOException {
HttpSolrServer server = new HttpSolrServer(leaderUrl);
server.setConnectionTimeout(30000);
server.setSoTimeout(30000);
UpdateRequest ureq = new UpdateRequest();
ureq.setParams(new ModifiableSolrParams());
ureq.getParams().set(DistributedUpdateProcessor.COMMIT_END_POINT, true);
ureq.setAction(AbstractUpdateRequest.ACTION.COMMIT, false, true).process(
server);
server.shutdown();
}
// in core/src/java/org/apache/solr/cloud/RecoveryStrategy.java
private void sendPrepRecoveryCmd(String leaderBaseUrl,
String leaderCoreName) throws MalformedURLException, SolrServerException,
IOException {
HttpSolrServer server = new HttpSolrServer(leaderBaseUrl);
server.setConnectionTimeout(45000);
server.setSoTimeout(45000);
WaitForState prepCmd = new WaitForState();
prepCmd.setCoreName(leaderCoreName);
prepCmd.setNodeName(zkController.getNodeName());
prepCmd.setCoreNodeName(coreZkNodeName);
prepCmd.setState(ZkStateReader.RECOVERING);
prepCmd.setCheckLive(true);
prepCmd.setPauseFor(6000);
server.request(prepCmd);
server.shutdown();
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
private String getHostAddress(String host) throws IOException {
if (host == null) {
host = "http://" + InetAddress.getLocalHost().getHostName();
} else {
Matcher m = URL_PREFIX.matcher(host);
if (m.matches()) {
String prefix = m.group(1);
host = prefix + host;
} else {
host = "http://" + host;
}
}
return host;
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public String readConfigName(String collection) throws KeeperException,
InterruptedException, IOException {
String configName = null;
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
if (log.isInfoEnabled()) {
log.info("Load collection config from:" + path);
}
byte[] data = zkClient.getData(path, null, null, true);
if(data != null) {
ZkNodeProps props = ZkNodeProps.load(data);
configName = props.get(CONFIGNAME_PROP);
}
if (configName != null && !zkClient.exists(CONFIGS_ZKNODE + "/" + configName, true)) {
log.error("Specified config does not exist in ZooKeeper:" + configName);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"Specified config does not exist in ZooKeeper:" + configName);
}
return configName;
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
private void joinElection(CoreDescriptor cd) throws InterruptedException, KeeperException, IOException {
String shardId = cd.getCloudDescriptor().getShardId();
Map<String,String> props = new HashMap<String,String>();
// we only put a subset of props into the leader node
props.put(ZkStateReader.BASE_URL_PROP, getBaseUrl());
props.put(ZkStateReader.CORE_NAME_PROP, cd.getName());
props.put(ZkStateReader.NODE_NAME_PROP, getNodeName());
final String coreZkNodeName = getNodeName() + "_" + cd.getName();
ZkNodeProps ourProps = new ZkNodeProps(props);
String collection = cd.getCloudDescriptor()
.getCollectionName();
ElectionContext context = new ShardLeaderElectionContext(leaderElector, shardId,
collection, coreZkNodeName, ourProps, this, cc);
leaderElector.setup(context);
electionContexts.put(coreZkNodeName, context);
leaderElector.joinElection(context);
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
private boolean checkRecovery(String coreName, final CoreDescriptor desc,
boolean recoverReloadedCores, final boolean isLeader,
final CloudDescriptor cloudDesc, final String collection,
final String shardZkNodeName, String shardId, ZkNodeProps leaderProps,
SolrCore core, CoreContainer cc) throws InterruptedException,
KeeperException, IOException, ExecutionException {
if (SKIP_AUTO_RECOVERY) {
log.warn("Skipping recovery according to sys prop solrcloud.skip.autorecovery");
return false;
}
boolean doRecovery = true;
if (!isLeader) {
if (core.isReloaded() && !recoverReloadedCores) {
doRecovery = false;
}
if (doRecovery) {
log.info("Core needs to recover:" + core.getName());
core.getUpdateHandler().getSolrCoreState().doRecovery(cc, coreName);
return true;
}
} else {
log.info("I am the leader, no recovery necessary");
}
return false;
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public void uploadToZK(File dir, String zkPath) throws IOException, KeeperException, InterruptedException {
uploadToZK(zkClient, dir, zkPath);
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public void uploadConfigDir(File dir, String configName) throws IOException, KeeperException, InterruptedException {
uploadToZK(zkClient, dir, ZkController.CONFIGS_ZKNODE + "/" + configName);
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public void createCollectionZkNode(CloudDescriptor cd) throws KeeperException, InterruptedException, IOException {
String collection = cd.getCollectionName();
log.info("Check for collection zkNode:" + collection);
String collectionPath = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
try {
if(!zkClient.exists(collectionPath, true)) {
log.info("Creating collection in ZooKeeper:" + collection);
SolrParams params = cd.getParams();
try {
Map<String,String> collectionProps = new HashMap<String,String>();
// TODO: if collection.configName isn't set, and there isn't already a conf in zk, just use that?
String defaultConfigName = System.getProperty(COLLECTION_PARAM_PREFIX+CONFIGNAME_PROP, collection);
// params passed in - currently only done via core admin (create core commmand).
if (params != null) {
Iterator<String> iter = params.getParameterNamesIterator();
while (iter.hasNext()) {
String paramName = iter.next();
if (paramName.startsWith(COLLECTION_PARAM_PREFIX)) {
collectionProps.put(paramName.substring(COLLECTION_PARAM_PREFIX.length()), params.get(paramName));
}
}
// if the config name wasn't passed in, use the default
if (!collectionProps.containsKey(CONFIGNAME_PROP))
getConfName(collection, collectionPath, collectionProps);
} else if(System.getProperty("bootstrap_confdir") != null) {
// if we are bootstrapping a collection, default the config for
// a new collection to the collection we are bootstrapping
log.info("Setting config for collection:" + collection + " to " + defaultConfigName);
Properties sysProps = System.getProperties();
for (String sprop : System.getProperties().stringPropertyNames()) {
if (sprop.startsWith(COLLECTION_PARAM_PREFIX)) {
collectionProps.put(sprop.substring(COLLECTION_PARAM_PREFIX.length()), sysProps.getProperty(sprop));
}
}
// if the config name wasn't passed in, use the default
if (!collectionProps.containsKey(CONFIGNAME_PROP))
collectionProps.put(CONFIGNAME_PROP, defaultConfigName);
} else if (Boolean.getBoolean("bootstrap_conf")) {
// the conf name should should be the collection name of this core
collectionProps.put(CONFIGNAME_PROP, cd.getCollectionName());
} else {
getConfName(collection, collectionPath, collectionProps);
}
ZkNodeProps zkProps = new ZkNodeProps(collectionProps);
zkClient.makePath(collectionPath, ZkStateReader.toJSON(zkProps), CreateMode.PERSISTENT, null, true);
// ping that there is a new collection
zkClient.setData(ZkStateReader.COLLECTIONS_ZKNODE, (byte[])null, true);
} catch (KeeperException e) {
// its okay if the node already exists
if (e.code() != KeeperException.Code.NODEEXISTS) {
throw e;
}
}
} else {
log.info("Collection zkNode exists");
}
} catch (KeeperException e) {
// its okay if another beats us creating the node
if (e.code() != KeeperException.Code.NODEEXISTS) {
throw e;
}
}
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public static void uploadToZK(SolrZkClient zkClient, File dir, String zkPath) throws IOException, KeeperException, InterruptedException {
File[] files = dir.listFiles();
if (files == null) {
throw new IllegalArgumentException("Illegal directory: " + dir);
}
for(File file : files) {
if (!file.getName().startsWith(".")) {
if (!file.isDirectory()) {
zkClient.makePath(zkPath + "/" + file.getName(), file, false, true);
} else {
uploadToZK(zkClient, file, zkPath + "/" + file.getName());
}
}
}
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public static void uploadConfigDir(SolrZkClient zkClient, File dir, String configName) throws IOException, KeeperException, InterruptedException {
uploadToZK(zkClient, dir, ZkController.CONFIGS_ZKNODE + "/" + configName);
}
// in core/src/java/org/apache/solr/cloud/ZkController.java
public static void bootstrapConf(SolrZkClient zkClient, Config cfg, String solrHome) throws IOException,
KeeperException, InterruptedException {
NodeList nodes = (NodeList)cfg.evaluate("solr/cores/core", XPathConstants.NODESET);
for (int i=0; i<nodes.getLength(); i++) {
Node node = nodes.item(i);
String rawName = DOMUtil.getAttr(node, "name", null);
String instanceDir = DOMUtil.getAttr(node, "instanceDir", null);
File idir = new File(instanceDir);
if (!idir.isAbsolute()) {
idir = new File(solrHome, instanceDir);
}
String confName = DOMUtil.getAttr(node, "collection", null);
if (confName == null) {
confName = rawName;
}
ZkController.uploadConfigDir(zkClient, new File(idir, "conf"), confName);
}
}
// in core/src/java/org/apache/solr/cloud/LeaderElector.java
private void checkIfIamLeader(final int seq, final ElectionContext context, boolean replacement) throws KeeperException,
InterruptedException, IOException {
// get all other numbers...
final String holdElectionPath = context.electionPath + ELECTION_NODE;
List<String> seqs = zkClient.getChildren(holdElectionPath, null, true);
sortSeqs(seqs);
List<Integer> intSeqs = getSeqs(seqs);
if (seq <= intSeqs.get(0)) {
runIamLeaderProcess(context, replacement);
} else {
// I am not the leader - watch the node below me
int i = 1;
for (; i < intSeqs.size(); i++) {
int s = intSeqs.get(i);
if (seq < s) {
// we found who we come before - watch the guy in front
break;
}
}
int index = i - 2;
if (index < 0) {
log.warn("Our node is no longer in line to be leader");
return;
}
try {
zkClient.getData(holdElectionPath + "/" + seqs.get(index),
new Watcher() {
@Override
public void process(WatchedEvent event) {
// am I the next leader?
try {
checkIfIamLeader(seq, context, true);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.warn("", e);
} catch (IOException e) {
log.warn("", e);
} catch (Exception e) {
log.warn("", e);
}
}
}, null, true);
} catch (KeeperException.SessionExpiredException e) {
throw e;
} catch (KeeperException e) {
// we couldn't set our watch - the node before us may already be down?
// we need to check if we are the leader again
checkIfIamLeader(seq, context, true);
}
}
// in core/src/java/org/apache/solr/cloud/LeaderElector.java
protected void runIamLeaderProcess(final ElectionContext context, boolean weAreReplacement) throws KeeperException,
InterruptedException, IOException {
context.runLeaderProcess(weAreReplacement);
}
// in core/src/java/org/apache/solr/cloud/LeaderElector.java
public int joinElection(ElectionContext context) throws KeeperException, InterruptedException, IOException {
final String shardsElectZkPath = context.electionPath + LeaderElector.ELECTION_NODE;
long sessionId = zkClient.getSolrZooKeeper().getSessionId();
String id = sessionId + "-" + context.id;
String leaderSeqPath = null;
boolean cont = true;
int tries = 0;
while (cont) {
try {
leaderSeqPath = zkClient.create(shardsElectZkPath + "/" + id + "-n_", null,
CreateMode.EPHEMERAL_SEQUENTIAL, false);
context.leaderSeqPath = leaderSeqPath;
cont = false;
} catch (ConnectionLossException e) {
// we don't know if we made our node or not...
List<String> entries = zkClient.getChildren(shardsElectZkPath, null, true);
boolean foundId = false;
for (String entry : entries) {
String nodeId = getNodeId(entry);
if (id.equals(nodeId)) {
// we did create our node...
foundId = true;
break;
}
}
if (!foundId) {
throw e;
}
} catch (KeeperException.NoNodeException e) {
// we must have failed in creating the election node - someone else must
// be working on it, lets try again
if (tries++ > 9) {
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
}
cont = true;
Thread.sleep(50);
}
}
int seq = getSeq(leaderSeqPath);
checkIfIamLeader(seq, context, false);
return seq;
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
private void deleteAll() throws IOException {
SolrCore.log.info(core.getLogId()+"REMOVING ALL DOCUMENTS FROM INDEX");
solrCoreState.getIndexWriter(core).deleteAll();
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
protected void rollbackWriter() throws IOException {
numDocsPending.set(0);
solrCoreState.rollbackIndexWriter(core);
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public int addDoc(AddUpdateCommand cmd) throws IOException {
IndexWriter writer = solrCoreState.getIndexWriter(core);
addCommands.incrementAndGet();
addCommandsCumulative.incrementAndGet();
int rc=-1;
// if there is no ID field, don't overwrite
if( idField == null ) {
cmd.overwrite = false;
}
try {
if (cmd.overwrite) {
Term updateTerm;
Term idTerm = new Term(idField.getName(), cmd.getIndexedId());
boolean del = false;
if (cmd.updateTerm == null) {
updateTerm = idTerm;
} else {
del = true;
updateTerm = cmd.updateTerm;
}
Document luceneDocument = cmd.getLuceneDocument();
// SolrCore.verbose("updateDocument",updateTerm,luceneDocument,writer);
writer.updateDocument(updateTerm, luceneDocument);
// SolrCore.verbose("updateDocument",updateTerm,"DONE");
if(del) { // ensure id remains unique
BooleanQuery bq = new BooleanQuery();
bq.add(new BooleanClause(new TermQuery(updateTerm), Occur.MUST_NOT));
bq.add(new BooleanClause(new TermQuery(idTerm), Occur.MUST));
writer.deleteDocuments(bq);
}
} else {
// allow duplicates
writer.addDocument(cmd.getLuceneDocument());
}
// Add to the transaction log *after* successfully adding to the index, if there was no error.
// This ordering ensures that if we log it, it's definitely been added to the the index.
// This also ensures that if a commit sneaks in-between, that we know everything in a particular
// log version was definitely committed.
if (ulog != null) ulog.add(cmd);
if ((cmd.getFlags() & UpdateCommand.IGNORE_AUTOCOMMIT) == 0) {
commitTracker.addedDocument( -1 );
softCommitTracker.addedDocument( cmd.commitWithin );
}
rc = 1;
} finally {
if (rc!=1) {
numErrors.incrementAndGet();
numErrorsCumulative.incrementAndGet();
} else {
numDocsPending.incrementAndGet();
}
}
return rc;
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void delete(DeleteUpdateCommand cmd) throws IOException {
deleteByIdCommands.incrementAndGet();
deleteByIdCommandsCumulative.incrementAndGet();
IndexWriter writer = solrCoreState.getIndexWriter(core);
Term deleteTerm = new Term(idField.getName(), cmd.getIndexedId());
// SolrCore.verbose("deleteDocuments",deleteTerm,writer);
writer.deleteDocuments(deleteTerm);
// SolrCore.verbose("deleteDocuments",deleteTerm,"DONE");
if (ulog != null) ulog.delete(cmd);
updateDeleteTrackers(cmd);
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void deleteByQuery(DeleteUpdateCommand cmd) throws IOException {
deleteByQueryCommands.incrementAndGet();
deleteByQueryCommandsCumulative.incrementAndGet();
boolean madeIt=false;
try {
Query q;
try {
// TODO: move this higher in the stack?
QParser parser = QParser.getParser(cmd.query, "lucene", cmd.req);
q = parser.getQuery();
q = QueryUtils.makeQueryable(q);
// peer-sync can cause older deleteByQueries to be executed and could
// delete newer documents. We prevent this by adding a clause restricting
// version.
if ((cmd.getFlags() & UpdateCommand.PEER_SYNC) != 0) {
BooleanQuery bq = new BooleanQuery();
bq.add(q, Occur.MUST);
SchemaField sf = core.getSchema().getField(VersionInfo.VERSION_FIELD);
ValueSource vs = sf.getType().getValueSource(sf, null);
ValueSourceRangeFilter filt = new ValueSourceRangeFilter(vs, null, Long.toString(Math.abs(cmd.version)), true, true);
FunctionRangeQuery range = new FunctionRangeQuery(filt);
bq.add(range, Occur.MUST);
q = bq;
}
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
}
boolean delAll = MatchAllDocsQuery.class == q.getClass();
//
// synchronized to prevent deleteByQuery from running during the "open new searcher"
// part of a commit. DBQ needs to signal that a fresh reader will be needed for
// a realtime view of the index. When a new searcher is opened after a DBQ, that
// flag can be cleared. If those thing happen concurrently, it's not thread safe.
//
synchronized (this) {
if (delAll) {
deleteAll();
} else {
solrCoreState.getIndexWriter(core).deleteDocuments(q);
}
if (ulog != null) ulog.deleteByQuery(cmd);
}
madeIt = true;
updateDeleteTrackers(cmd);
} finally {
if (!madeIt) {
numErrors.incrementAndGet();
numErrorsCumulative.incrementAndGet();
}
}
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public int mergeIndexes(MergeIndexesCommand cmd) throws IOException {
mergeIndexesCommands.incrementAndGet();
int rc;
log.info("start " + cmd);
IndexReader[] readers = cmd.readers;
if (readers != null && readers.length > 0) {
solrCoreState.getIndexWriter(core).addIndexes(readers);
rc = 1;
} else {
rc = 0;
}
log.info("end_mergeIndexes");
// TODO: consider soft commit issues
if (rc == 1 && commitTracker.getTimeUpperBound() > 0) {
commitTracker.scheduleCommitWithin(commitTracker.getTimeUpperBound());
} else if (rc == 1 && softCommitTracker.getTimeUpperBound() > 0) {
softCommitTracker.scheduleCommitWithin(softCommitTracker.getTimeUpperBound());
}
return rc;
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
public void prepareCommit(CommitUpdateCommand cmd) throws IOException {
boolean error=true;
try {
log.info("start "+cmd);
IndexWriter writer = solrCoreState.getIndexWriter(core);
writer.prepareCommit();
log.info("end_prepareCommit");
error=false;
}
finally {
if (error) numErrors.incrementAndGet();
}
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void commit(CommitUpdateCommand cmd) throws IOException {
if (cmd.prepareCommit) {
prepareCommit(cmd);
return;
}
IndexWriter writer = solrCoreState.getIndexWriter(core);
if (cmd.optimize) {
optimizeCommands.incrementAndGet();
} else {
commitCommands.incrementAndGet();
if (cmd.expungeDeletes) expungeDeleteCommands.incrementAndGet();
}
Future[] waitSearcher = null;
if (cmd.waitSearcher) {
waitSearcher = new Future[1];
}
boolean error=true;
try {
// only allow one hard commit to proceed at once
if (!cmd.softCommit) {
commitLock.lock();
}
log.info("start "+cmd);
// We must cancel pending commits *before* we actually execute the commit.
if (cmd.openSearcher) {
// we can cancel any pending soft commits if this commit will open a new searcher
softCommitTracker.cancelPendingCommit();
}
if (!cmd.softCommit && (cmd.openSearcher || !commitTracker.getOpenSearcher())) {
// cancel a pending hard commit if this commit is of equal or greater "strength"...
// If the autoCommit has openSearcher=true, then this commit must have openSearcher=true
// to cancel.
commitTracker.cancelPendingCommit();
}
if (cmd.optimize) {
writer.forceMerge(cmd.maxOptimizeSegments);
} else if (cmd.expungeDeletes) {
writer.forceMergeDeletes();
}
if (!cmd.softCommit) {
synchronized (this) { // sync is currently needed to prevent preCommit from being called between preSoft and postSoft... see postSoft comments.
if (ulog != null) ulog.preCommit(cmd);
}
// SolrCore.verbose("writer.commit() start writer=",writer);
final Map<String,String> commitData = new HashMap<String,String>();
commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis()));
writer.commit(commitData);
// SolrCore.verbose("writer.commit() end");
numDocsPending.set(0);
callPostCommitCallbacks();
} else {
callPostSoftCommitCallbacks();
}
if (cmd.optimize) {
callPostOptimizeCallbacks();
}
if (cmd.softCommit) {
// ulog.preSoftCommit();
synchronized (this) {
if (ulog != null) ulog.preSoftCommit(cmd);
core.getSearcher(true, false, waitSearcher, true);
if (ulog != null) ulog.postSoftCommit(cmd);
}
// ulog.postSoftCommit();
} else {
synchronized (this) {
if (ulog != null) ulog.preSoftCommit(cmd);
if (cmd.openSearcher) {
core.getSearcher(true, false, waitSearcher);
} else {
// force open a new realtime searcher so realtime-get and versioning code can see the latest
RefCounted<SolrIndexSearcher> searchHolder = core.openNewSearcher(true, true);
searchHolder.decref();
}
if (ulog != null) ulog.postSoftCommit(cmd);
}
if (ulog != null) ulog.postCommit(cmd); // postCommit currently means new searcher has
// also been opened
}
// reset commit tracking
if (cmd.softCommit) {
softCommitTracker.didCommit();
} else {
commitTracker.didCommit();
}
log.info("end_commit_flush");
error=false;
}
finally {
if (!cmd.softCommit) {
commitLock.unlock();
}
addCommands.set(0);
deleteByIdCommands.set(0);
deleteByQueryCommands.set(0);
if (error) numErrors.incrementAndGet();
}
// if we are supposed to wait for the searcher to be registered, then we should do it
// outside any synchronized block so that other update operations can proceed.
if (waitSearcher!=null && waitSearcher[0] != null) {
try {
waitSearcher[0].get();
} catch (InterruptedException e) {
SolrException.log(log,e);
} catch (ExecutionException e) {
SolrException.log(log,e);
}
}
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void newIndexWriter() throws IOException {
solrCoreState.newIndexWriter(core);
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void rollback(RollbackUpdateCommand cmd) throws IOException {
rollbackCommands.incrementAndGet();
boolean error=true;
try {
log.info("start "+cmd);
rollbackWriter();
//callPostRollbackCallbacks();
// reset commit tracking
commitTracker.didRollback();
softCommitTracker.didRollback();
log.info("end_rollback");
error=false;
}
finally {
addCommandsCumulative.set(
addCommandsCumulative.get() - addCommands.getAndSet( 0 ) );
deleteByIdCommandsCumulative.set(
deleteByIdCommandsCumulative.get() - deleteByIdCommands.getAndSet( 0 ) );
deleteByQueryCommandsCumulative.set(
deleteByQueryCommandsCumulative.get() - deleteByQueryCommands.getAndSet( 0 ) );
if (error) numErrors.incrementAndGet();
}
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void close() throws IOException {
log.info("closing " + this);
commitTracker.close();
softCommitTracker.close();
numDocsPending.set(0);
solrCoreState.decref(this);
}
// in core/src/java/org/apache/solr/update/DirectUpdateHandler2.java
Override
public void closeWriter(IndexWriter writer) throws IOException {
boolean clearRequestInfo = false;
commitLock.lock();
try {
SolrQueryRequest req = new LocalSolrQueryRequest(core, new ModifiableSolrParams());
SolrQueryResponse rsp = new SolrQueryResponse();
if (SolrRequestInfo.getRequestInfo() == null) {
clearRequestInfo = true;
SolrRequestInfo.setRequestInfo(new SolrRequestInfo(req, rsp)); // important for debugging
}
if (!commitOnClose) {
if (writer != null) {
writer.rollback();
}
// we shouldn't close the transaction logs either, but leaving them open
// means we can't delete them on windows (needed for tests)
if (ulog != null) ulog.close(false);
return;
}
// do a commit before we quit?
boolean tryToCommit = writer != null && ulog != null && ulog.hasUncommittedChanges() && ulog.getState() == UpdateLog.State.ACTIVE;
try {
if (tryToCommit) {
CommitUpdateCommand cmd = new CommitUpdateCommand(req, false);
cmd.openSearcher = false;
cmd.waitSearcher = false;
cmd.softCommit = false;
// TODO: keep other commit callbacks from being called?
// this.commit(cmd); // too many test failures using this method... is it because of callbacks?
synchronized (this) {
ulog.preCommit(cmd);
}
// todo: refactor this shared code (or figure out why a real CommitUpdateCommand can't be used)
final Map<String,String> commitData = new HashMap<String,String>();
commitData.put(SolrIndexWriter.COMMIT_TIME_MSEC_KEY, String.valueOf(System.currentTimeMillis()));
writer.commit(commitData);
synchronized (this) {
ulog.postCommit(cmd);
}
}
} catch (Throwable th) {
log.error("Error in final commit", th);
}
// we went through the normal process to commit, so we don't have to artificially
// cap any ulog files.
try {
if (ulog != null) ulog.close(false);
} catch (Throwable th) {
log.error("Error closing log files", th);
}
if (writer != null) writer.close();
} finally {
commitLock.unlock();
if (clearRequestInfo) SolrRequestInfo.clearRequestInfo();
}
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
// TODO: check for id field?
int hash = 0;
if (zkEnabled) {
zkCheck();
hash = hash(cmd);
nodes = setupRequest(hash);
} else {
isLeader = getNonZkLeaderAssumption(req);
}
boolean dropCmd = false;
if (!forwardToLeader) {
dropCmd = versionAdd(cmd);
}
if (dropCmd) {
// TODO: do we need to add anything to the response?
return;
}
ModifiableSolrParams params = null;
if (nodes != null) {
params = new ModifiableSolrParams(req.getParams());
params.set(DISTRIB_UPDATE_PARAM,
(isLeader ?
DistribPhase.FROMLEADER.toString() :
DistribPhase.TOLEADER.toString()));
params.remove("commit"); // this will be distributed from the local commit
cmdDistrib.distribAdd(cmd, nodes, params);
}
// TODO: what to do when no idField?
if (returnVersions && rsp != null && idField != null) {
if (addsResponse == null) {
addsResponse = new NamedList<String>();
rsp.add("adds",addsResponse);
}
if (scratch == null) scratch = new CharsRef();
idField.getType().indexedToReadable(cmd.getIndexedId(), scratch);
addsResponse.add(scratch.toString(), cmd.getVersion());
}
// TODO: keep track of errors? needs to be done at a higher level though since
// an id may fail before it gets to this processor.
// Given that, it may also make sense to move the version reporting out of this
// processor too.
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
private void doLocalAdd(AddUpdateCommand cmd) throws IOException {
super.processAdd(cmd);
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
private void doLocalDelete(DeleteUpdateCommand cmd) throws IOException {
super.processDelete(cmd);
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
private boolean versionAdd(AddUpdateCommand cmd) throws IOException {
BytesRef idBytes = cmd.getIndexedId();
if (vinfo == null || idBytes == null) {
super.processAdd(cmd);
return false;
}
// This is only the hash for the bucket, and must be based only on the uniqueKey (i.e. do not use a pluggable hash here)
int bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
// at this point, there is an update we need to try and apply.
// we may or may not be the leader.
// Find any existing version in the document
// TODO: don't reuse update commands any more!
long versionOnUpdate = cmd.getVersion();
if (versionOnUpdate == 0) {
SolrInputField versionField = cmd.getSolrInputDocument().getField(VersionInfo.VERSION_FIELD);
if (versionField != null) {
Object o = versionField.getValue();
versionOnUpdate = o instanceof Number ? ((Number) o).longValue() : Long.parseLong(o.toString());
} else {
// Find the version
String versionOnUpdateS = req.getParams().get(VERSION_FIELD);
versionOnUpdate = versionOnUpdateS == null ? 0 : Long.parseLong(versionOnUpdateS);
}
}
boolean isReplay = (cmd.getFlags() & UpdateCommand.REPLAY) != 0;
boolean leaderLogic = isLeader && !isReplay;
VersionBucket bucket = vinfo.bucket(bucketHash);
vinfo.lockForUpdate();
try {
synchronized (bucket) {
// we obtain the version when synchronized and then do the add so we can ensure that
// if version1 < version2 then version1 is actually added before version2.
// even if we don't store the version field, synchronizing on the bucket
// will enable us to know what version happened first, and thus enable
// realtime-get to work reliably.
// TODO: if versions aren't stored, do we need to set on the cmd anyway for some reason?
// there may be other reasons in the future for a version on the commands
if (versionsStored) {
long bucketVersion = bucket.highest;
if (leaderLogic) {
boolean updated = getUpdatedDocument(cmd);
if (updated && versionOnUpdate == -1) {
versionOnUpdate = 1; // implied "doc must exist" for now...
}
if (versionOnUpdate != 0) {
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
long foundVersion = lastVersion == null ? -1 : lastVersion;
if ( versionOnUpdate == foundVersion || (versionOnUpdate < 0 && foundVersion < 0) || (versionOnUpdate==1 && foundVersion > 0) ) {
// we're ok if versions match, or if both are negative (all missing docs are equal), or if cmd
// specified it must exist (versionOnUpdate==1) and it does.
} else {
throw new SolrException(ErrorCode.CONFLICT, "version conflict for " + cmd.getPrintableId() + " expected=" + versionOnUpdate + " actual=" + foundVersion);
}
}
long version = vinfo.getNewClock();
cmd.setVersion(version);
cmd.getSolrInputDocument().setField(VersionInfo.VERSION_FIELD, version);
bucket.updateHighest(version);
} else {
// The leader forwarded us this update.
cmd.setVersion(versionOnUpdate);
if (ulog.getState() != UpdateLog.State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
// we're not in an active state, and this update isn't from a replay, so buffer it.
cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
ulog.add(cmd);
return true;
}
// if we aren't the leader, then we need to check that updates were not re-ordered
if (bucketVersion != 0 && bucketVersion < versionOnUpdate) {
// we're OK... this update has a version higher than anything we've seen
// in this bucket so far, so we know that no reordering has yet occured.
bucket.updateHighest(versionOnUpdate);
} else {
// there have been updates higher than the current update. we need to check
// the specific version for this id.
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
if (lastVersion != null && Math.abs(lastVersion) >= versionOnUpdate) {
// This update is a repeat, or was reordered. We need to drop this update.
return true;
}
}
}
}
doLocalAdd(cmd);
} // end synchronized (bucket)
} finally {
vinfo.unlockForUpdate();
}
return false;
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
boolean getUpdatedDocument(AddUpdateCommand cmd) throws IOException {
SolrInputDocument sdoc = cmd.getSolrInputDocument();
boolean update = false;
for (SolrInputField sif : sdoc.values()) {
if (sif.getValue() instanceof Map) {
update = true;
break;
}
}
if (!update) return false;
BytesRef id = cmd.getIndexedId();
SolrInputDocument oldDoc = RealTimeGetComponent.getInputDocument(cmd.getReq().getCore(), id);
if (oldDoc == null) {
// not found... allow this in the future (depending on the details of the update, or if the user explicitly sets it).
// could also just not change anything here and let the optimistic locking throw the error
throw new SolrException(ErrorCode.CONFLICT, "Document not found for update. id=" + cmd.getPrintableId());
}
oldDoc.remove(VERSION_FIELD);
for (SolrInputField sif : sdoc.values()) {
Object val = sif.getValue();
if (val instanceof Map) {
for (Entry<String,Object> entry : ((Map<String,Object>) val).entrySet()) {
String key = entry.getKey();
Object fieldVal = entry.getValue();
if ("add".equals(key)) {
oldDoc.addField( sif.getName(), fieldVal, sif.getBoost());
} else if ("set".equals(key)) {
oldDoc.setField(sif.getName(), fieldVal, sif.getBoost());
} else if ("inc".equals(key)) {
SolrInputField numericField = oldDoc.get(sif.getName());
if (numericField == null) {
oldDoc.setField(sif.getName(), fieldVal, sif.getBoost());
} else {
// TODO: fieldtype needs externalToObject?
String oldValS = numericField.getFirstValue().toString();
SchemaField sf = cmd.getReq().getSchema().getField(sif.getName());
BytesRef term = new BytesRef();
sf.getType().readableToIndexed(oldValS, term);
Object oldVal = sf.getType().toObject(sf, term);
String fieldValS = fieldVal.toString();
Number result;
if (oldVal instanceof Long) {
result = ((Long) oldVal).longValue() + Long.parseLong(fieldValS);
} else if (oldVal instanceof Float) {
result = ((Float) oldVal).floatValue() + Float.parseFloat(fieldValS);
} else if (oldVal instanceof Double) {
result = ((Double) oldVal).doubleValue() + Double.parseDouble(fieldValS);
} else {
// int, short, byte
result = ((Integer) oldVal).intValue() + Integer.parseInt(fieldValS);
}
oldDoc.setField(sif.getName(), result, sif.getBoost());
}
}
}
} else {
// normal fields are treated as a "set"
oldDoc.put(sif.getName(), sif);
}
}
cmd.solrDoc = oldDoc;
return true;
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
Override
public void processDelete(DeleteUpdateCommand cmd) throws IOException {
if (!cmd.isDeleteById()) {
doDeleteByQuery(cmd);
return;
}
int hash = 0;
if (zkEnabled) {
zkCheck();
hash = hash(cmd);
nodes = setupRequest(hash);
} else {
isLeader = getNonZkLeaderAssumption(req);
}
boolean dropCmd = false;
if (!forwardToLeader) {
dropCmd = versionDelete(cmd);
}
if (dropCmd) {
// TODO: do we need to add anything to the response?
return;
}
ModifiableSolrParams params = null;
if (nodes != null) {
params = new ModifiableSolrParams(req.getParams());
params.set(DISTRIB_UPDATE_PARAM,
(isLeader ?
DistribPhase.FROMLEADER.toString() :
DistribPhase.TOLEADER.toString()));
params.remove("commit"); // we already will have forwarded this from our local commit
cmdDistrib.distribDelete(cmd, nodes, params);
}
// cmd.getIndexId == null when delete by query
// TODO: what to do when no idField?
if (returnVersions && rsp != null && cmd.getIndexedId() != null && idField != null) {
if (deleteResponse == null) {
deleteResponse = new NamedList<String>();
rsp.add("deletes",deleteResponse);
}
if (scratch == null) scratch = new CharsRef();
idField.getType().indexedToReadable(cmd.getIndexedId(), scratch);
deleteResponse.add(scratch.toString(), cmd.getVersion()); // we're returning the version of the delete.. not the version of the doc we deleted.
}
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
public void doDeleteByQuery(DeleteUpdateCommand cmd) throws IOException {
// even in non zk mode, tests simulate updates from a leader
if(!zkEnabled) {
isLeader = getNonZkLeaderAssumption(req);
} else {
zkCheck();
}
// NONE: we are the first to receive this deleteByQuery
// - it must be forwarded to the leader of every shard
// TO: we are a leader receiving a forwarded deleteByQuery... we must:
// - block all updates (use VersionInfo)
// - flush *all* updates going to our replicas
// - forward the DBQ to our replicas and wait for the response
// - log + execute the local DBQ
// FROM: we are a replica receiving a DBQ from our leader
// - log + execute the local DBQ
DistribPhase phase =
DistribPhase.parseParam(req.getParams().get(DISTRIB_UPDATE_PARAM));
if (zkEnabled && DistribPhase.NONE == phase) {
boolean leaderForAnyShard = false; // start off by assuming we are not a leader for any shard
Map<String,Slice> slices = zkController.getCloudState().getSlices(collection);
if (slices == null) {
throw new SolrException(ErrorCode.BAD_REQUEST,
"Cannot find collection:" + collection + " in "
+ zkController.getCloudState().getCollections());
}
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.TOLEADER.toString());
List<Node> leaders = new ArrayList<Node>(slices.size());
for (Map.Entry<String,Slice> sliceEntry : slices.entrySet()) {
String sliceName = sliceEntry.getKey();
ZkNodeProps leaderProps;
try {
leaderProps = zkController.getZkStateReader().getLeaderProps(collection, sliceName);
} catch (InterruptedException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Exception finding leader for shard " + sliceName, e);
}
// TODO: What if leaders changed in the meantime?
// should we send out slice-at-a-time and if a node returns "hey, I'm not a leader" (or we get an error because it went down) then look up the new leader?
// Am I the leader for this slice?
ZkCoreNodeProps coreLeaderProps = new ZkCoreNodeProps(leaderProps);
String leaderNodeName = coreLeaderProps.getCoreNodeName();
String coreName = req.getCore().getName();
String coreNodeName = zkController.getNodeName() + "_" + coreName;
isLeader = coreNodeName.equals(leaderNodeName);
if (isLeader) {
// don't forward to ourself
leaderForAnyShard = true;
} else {
leaders.add(new StdNode(coreLeaderProps));
}
}
params.remove("commit"); // this will be distributed from the local commit
cmdDistrib.distribDelete(cmd, leaders, params);
if (!leaderForAnyShard) {
return;
}
// change the phase to TOLEADER so we look up and forward to our own replicas (if any)
phase = DistribPhase.TOLEADER;
}
List<Node> replicas = null;
if (zkEnabled && DistribPhase.TOLEADER == phase) {
// This core should be a leader
replicas = setupRequest();
}
if (vinfo == null) {
super.processDelete(cmd);
return;
}
// at this point, there is an update we need to try and apply.
// we may or may not be the leader.
// Find the version
long versionOnUpdate = cmd.getVersion();
if (versionOnUpdate == 0) {
String versionOnUpdateS = req.getParams().get(VERSION_FIELD);
versionOnUpdate = versionOnUpdateS == null ? 0 : Long.parseLong(versionOnUpdateS);
}
versionOnUpdate = Math.abs(versionOnUpdate); // normalize to positive version
boolean isReplay = (cmd.getFlags() & UpdateCommand.REPLAY) != 0;
boolean leaderLogic = isLeader && !isReplay;
if (!leaderLogic && versionOnUpdate==0) {
throw new SolrException(ErrorCode.BAD_REQUEST, "missing _version_ on update from leader");
}
vinfo.blockUpdates();
try {
if (versionsStored) {
if (leaderLogic) {
long version = vinfo.getNewClock();
cmd.setVersion(-version);
// TODO update versions in all buckets
doLocalDelete(cmd);
} else {
cmd.setVersion(-versionOnUpdate);
if (ulog.getState() != UpdateLog.State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
// we're not in an active state, and this update isn't from a replay, so buffer it.
cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
ulog.deleteByQuery(cmd);
return;
}
doLocalDelete(cmd);
}
}
// since we don't know which documents were deleted, the easiest thing to do is to invalidate
// all real-time caches (i.e. UpdateLog) which involves also getting a new version of the IndexReader
// (so cache misses will see up-to-date data)
} finally {
vinfo.unblockUpdates();
}
// TODO: need to handle reorders to replicas somehow
// forward to all replicas
if (leaderLogic && replicas != null) {
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
params.set(VERSION_FIELD, Long.toString(cmd.getVersion()));
params.set(DISTRIB_UPDATE_PARAM, DistribPhase.FROMLEADER.toString());
cmdDistrib.distribDelete(cmd, replicas, params);
cmdDistrib.finish();
}
if (returnVersions && rsp != null) {
if (deleteByQueryResponse == null) {
deleteByQueryResponse = new NamedList<String>();
rsp.add("deleteByQuery",deleteByQueryResponse);
}
deleteByQueryResponse.add(cmd.getQuery(), cmd.getVersion());
}
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
private boolean versionDelete(DeleteUpdateCommand cmd) throws IOException {
BytesRef idBytes = cmd.getIndexedId();
if (vinfo == null || idBytes == null) {
super.processDelete(cmd);
return false;
}
// This is only the hash for the bucket, and must be based only on the uniqueKey (i.e. do not use a pluggable hash here)
int bucketHash = Hash.murmurhash3_x86_32(idBytes.bytes, idBytes.offset, idBytes.length, 0);
// at this point, there is an update we need to try and apply.
// we may or may not be the leader.
// Find the version
long versionOnUpdate = cmd.getVersion();
if (versionOnUpdate == 0) {
String versionOnUpdateS = req.getParams().get(VERSION_FIELD);
versionOnUpdate = versionOnUpdateS == null ? 0 : Long.parseLong(versionOnUpdateS);
}
long signedVersionOnUpdate = versionOnUpdate;
versionOnUpdate = Math.abs(versionOnUpdate); // normalize to positive version
boolean isReplay = (cmd.getFlags() & UpdateCommand.REPLAY) != 0;
boolean leaderLogic = isLeader && !isReplay;
if (!leaderLogic && versionOnUpdate==0) {
throw new SolrException(ErrorCode.BAD_REQUEST, "missing _version_ on update from leader");
}
VersionBucket bucket = vinfo.bucket(bucketHash);
vinfo.lockForUpdate();
try {
synchronized (bucket) {
if (versionsStored) {
long bucketVersion = bucket.highest;
if (leaderLogic) {
if (signedVersionOnUpdate != 0) {
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
long foundVersion = lastVersion == null ? -1 : lastVersion;
if ( (signedVersionOnUpdate == foundVersion) || (signedVersionOnUpdate < 0 && foundVersion < 0) || (signedVersionOnUpdate == 1 && foundVersion > 0) ) {
// we're ok if versions match, or if both are negative (all missing docs are equal), or if cmd
// specified it must exist (versionOnUpdate==1) and it does.
} else {
throw new SolrException(ErrorCode.CONFLICT, "version conflict for " + cmd.getId() + " expected=" + signedVersionOnUpdate + " actual=" + foundVersion);
}
}
long version = vinfo.getNewClock();
cmd.setVersion(-version);
bucket.updateHighest(version);
} else {
cmd.setVersion(-versionOnUpdate);
if (ulog.getState() != UpdateLog.State.ACTIVE && (cmd.getFlags() & UpdateCommand.REPLAY) == 0) {
// we're not in an active state, and this update isn't from a replay, so buffer it.
cmd.setFlags(cmd.getFlags() | UpdateCommand.BUFFERING);
ulog.delete(cmd);
return true;
}
// if we aren't the leader, then we need to check that updates were not re-ordered
if (bucketVersion != 0 && bucketVersion < versionOnUpdate) {
// we're OK... this update has a version higher than anything we've seen
// in this bucket so far, so we know that no reordering has yet occured.
bucket.updateHighest(versionOnUpdate);
} else {
// there have been updates higher than the current update. we need to check
// the specific version for this id.
Long lastVersion = vinfo.lookupVersion(cmd.getIndexedId());
if (lastVersion != null && Math.abs(lastVersion) >= versionOnUpdate) {
// This update is a repeat, or was reordered. We need to drop this update.
return true;
}
}
}
}
doLocalDelete(cmd);
return false;
} // end synchronized (bucket)
} finally {
vinfo.unlockForUpdate();
}
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
Override
public void processCommit(CommitUpdateCommand cmd) throws IOException {
if (zkEnabled) {
zkCheck();
}
if (vinfo != null) {
vinfo.lockForUpdate();
}
try {
if (ulog == null || ulog.getState() == UpdateLog.State.ACTIVE || (cmd.getFlags() & UpdateCommand.REPLAY) != 0) {
super.processCommit(cmd);
} else {
log.info("Ignoring commit while not ACTIVE - state: " + ulog.getState() + " replay:" + (cmd.getFlags() & UpdateCommand.REPLAY));
}
} finally {
if (vinfo != null) {
vinfo.unlockForUpdate();
}
}
// TODO: we should consider this? commit everyone in the current collection
if (zkEnabled) {
ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
if (!params.getBool(COMMIT_END_POINT, false)) {
params.set(COMMIT_END_POINT, true);
String nodeName = req.getCore().getCoreDescriptor().getCoreContainer()
.getZkController().getNodeName();
String shardZkNodeName = nodeName + "_" + req.getCore().getName();
List<Node> nodes = getCollectionUrls(req, req.getCore().getCoreDescriptor()
.getCloudDescriptor().getCollectionName(), shardZkNodeName);
if (nodes != null) {
cmdDistrib.distribCommit(cmd, nodes, params);
finish();
}
}
}
}
// in core/src/java/org/apache/solr/update/processor/DistributedUpdateProcessor.java
Override
public void finish() throws IOException {
doFinish();
if (next != null && nodes == null) next.finish();
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
updateHandler.addDoc(cmd);
super.processAdd(cmd);
changesSinceCommit = true;
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void processDelete(DeleteUpdateCommand cmd) throws IOException {
if( cmd.isDeleteById()) {
updateHandler.delete(cmd);
}
else {
updateHandler.deleteByQuery(cmd);
}
super.processDelete(cmd);
changesSinceCommit = true;
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException {
updateHandler.mergeIndexes(cmd);
super.processMergeIndexes(cmd);
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void processCommit(CommitUpdateCommand cmd) throws IOException
{
updateHandler.commit(cmd);
super.processCommit(cmd);
changesSinceCommit = false;
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void processRollback(RollbackUpdateCommand cmd) throws IOException
{
updateHandler.rollback(cmd);
super.processRollback(cmd);
changesSinceCommit = false;
}
// in core/src/java/org/apache/solr/update/processor/RunUpdateProcessorFactory.java
Override
public void finish() throws IOException {
if (changesSinceCommit && updateHandler.getUpdateLog() != null) {
updateHandler.getUpdateLog().finish(null);
}
super.finish();
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void processAdd(AddUpdateCommand cmd) throws IOException {
if (next != null) next.processAdd(cmd);
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void processDelete(DeleteUpdateCommand cmd) throws IOException {
if (next != null) next.processDelete(cmd);
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException {
if (next != null) next.processMergeIndexes(cmd);
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void processCommit(CommitUpdateCommand cmd) throws IOException
{
if (next != null) next.processCommit(cmd);
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void processRollback(RollbackUpdateCommand cmd) throws IOException
{
if (next != null) next.processRollback(cmd);
}
// in core/src/java/org/apache/solr/update/processor/UpdateRequestProcessor.java
public void finish() throws IOException {
if (next != null) next.finish();
}
// in core/src/java/org/apache/solr/update/processor/FieldMutatingUpdateProcessor.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
final SolrInputDocument doc = cmd.getSolrInputDocument();
// make a copy we can iterate over while mutating the doc
final Collection<String> fieldNames
= new ArrayList<String>(doc.getFieldNames());
for (final String fname : fieldNames) {
if (! selector.shouldMutate(fname)) continue;
final SolrInputField src = doc.get(fname);
SolrInputField dest = null;
try {
dest = mutate(src);
} catch (SolrException e) {
String msg = "Unable to mutate field '"+fname+"': "+e.getMessage();
SolrException.log(log, msg, e);
throw new SolrException(BAD_REQUEST, msg, e);
}
if (null == dest) {
doc.remove(fname);
} else {
// semantics of what happens if dest has diff name are hard
// we could treat it as a copy, or a rename
// for now, don't allow it.
if (! fname.equals(dest.getName()) ) {
throw new SolrException(SERVER_ERROR,
"mutute returned field with different name: "
+ fname + " => " + dest.getName());
}
doc.put(dest.getName(), dest);
}
}
super.processAdd(cmd);
}
// in core/src/java/org/apache/solr/update/processor/URLClassifyProcessor.java
Override
public void processAdd(AddUpdateCommand command) throws IOException {
if (isEnabled()) {
SolrInputDocument document = command.getSolrInputDocument();
if (document.containsKey(urlFieldname)) {
String url = (String) document.getFieldValue(urlFieldname);
try {
URL normalizedURL = getNormalizedURL(url);
document.setField(lengthFieldname, length(normalizedURL));
document.setField(levelsFieldname, levels(normalizedURL));
document.setField(toplevelpageFieldname, isTopLevelPage(normalizedURL) ? 1 : 0);
document.setField(landingpageFieldname, isLandingPage(normalizedURL) ? 1 : 0);
if (domainFieldname != null) {
document.setField(domainFieldname, normalizedURL.getHost());
}
if (canonicalUrlFieldname != null) {
document.setField(canonicalUrlFieldname, getCanonicalUrl(normalizedURL));
}
log.debug(document.toString());
} catch (MalformedURLException e) {
log.warn("cannot get the normalized url for \"" + url + "\" due to " + e.getMessage());
} catch (URISyntaxException e) {
log.warn("cannot get the normalized url for \"" + url + "\" due to " + e.getMessage());
}
}
}
super.processAdd(command);
}
// in core/src/java/org/apache/solr/update/processor/SignatureUpdateProcessorFactory.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
if (enabled) {
SolrInputDocument doc = cmd.getSolrInputDocument();
List<String> currDocSigFields = null;
if (sigFields == null || sigFields.size() == 0) {
Collection<String> docFields = doc.getFieldNames();
currDocSigFields = new ArrayList<String>(docFields.size());
currDocSigFields.addAll(docFields);
Collections.sort(currDocSigFields);
} else {
currDocSigFields = sigFields;
}
Signature sig = req.getCore().getResourceLoader().newInstance(signatureClass, Signature.class);
sig.init(params);
for (String field : currDocSigFields) {
SolrInputField f = doc.getField(field);
if (f != null) {
sig.add(field);
Object o = f.getValue();
if (o instanceof Collection) {
for (Object oo : (Collection)o) {
sig.add(String.valueOf(oo));
}
} else {
sig.add(String.valueOf(o));
}
}
}
byte[] signature = sig.getSignature();
char[] arr = new char[signature.length<<1];
for (int i=0; i<signature.length; i++) {
int b = signature[i];
int idx = i<<1;
arr[idx]= StrUtils.HEX_DIGITS[(b >> 4) & 0xf];
arr[idx+1]= StrUtils.HEX_DIGITS[b & 0xf];
}
String sigString = new String(arr);
doc.addField(signatureField, sigString);
if (overwriteDupes) {
cmd.updateTerm = new Term(signatureField, sigString);
}
}
if (next != null)
next.processAdd(cmd);
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
if (logDebug) { log.debug("PRE_UPDATE " + cmd.toString()); }
// call delegate first so we can log things like the version that get set later
if (next != null) next.processAdd(cmd);
// Add a list of added id's to the response
if (adds == null) {
adds = new ArrayList<String>();
toLog.add("add",adds);
}
if (adds.size() < maxNumToLog) {
long version = cmd.getVersion();
String msg = cmd.getPrintableId();
if (version != 0) msg = msg + " (" + version + ')';
adds.add(msg);
}
numAdds++;
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void processDelete( DeleteUpdateCommand cmd ) throws IOException {
if (logDebug) { log.debug("PRE_UPDATE " + cmd.toString()); }
if (next != null) next.processDelete(cmd);
if (cmd.isDeleteById()) {
if (deletes == null) {
deletes = new ArrayList<String>();
toLog.add("delete",deletes);
}
if (deletes.size() < maxNumToLog) {
long version = cmd.getVersion();
String msg = cmd.getId();
if (version != 0) msg = msg + " (" + version + ')';
deletes.add(msg);
}
} else {
if (toLog.size() < maxNumToLog) {
long version = cmd.getVersion();
String msg = cmd.query;
if (version != 0) msg = msg + " (" + version + ')';
toLog.add("deleteByQuery", msg);
}
}
numDeletes++;
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void processMergeIndexes(MergeIndexesCommand cmd) throws IOException {
if (logDebug) { log.debug("PRE_UPDATE " + cmd.toString()); }
if (next != null) next.processMergeIndexes(cmd);
toLog.add("mergeIndexes", cmd.toString());
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void processCommit( CommitUpdateCommand cmd ) throws IOException {
if (logDebug) { log.debug("PRE_UPDATE " + cmd.toString()); }
if (next != null) next.processCommit(cmd);
final String msg = cmd.optimize ? "optimize" : "commit";
toLog.add(msg, "");
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void processRollback( RollbackUpdateCommand cmd ) throws IOException {
if (logDebug) { log.debug("PRE_UPDATE " + cmd.toString()); }
if (next != null) next.processRollback(cmd);
toLog.add("rollback", "");
}
// in core/src/java/org/apache/solr/update/processor/LogUpdateProcessorFactory.java
Override
public void finish() throws IOException {
if (logDebug) { log.debug("PRE_UPDATE finish()"); }
if (next != null) next.finish();
// LOG A SUMMARY WHEN ALL DONE (INFO LEVEL)
NamedList<Object> stdLog = rsp.getToLog();
StringBuilder sb = new StringBuilder(req.getCore().getLogId());
for (int i=0; i<stdLog.size(); i++) {
String name = stdLog.getName(i);
Object val = stdLog.getVal(i);
if (name != null) {
sb.append(name).append('=');
}
sb.append(val).append(' ');
}
stdLog.clear(); // make it so SolrCore.exec won't log this again
// if id lists were truncated, show how many more there were
if (adds != null && numAdds > maxNumToLog) {
adds.add("... (" + numAdds + " adds)");
}
if (deletes != null && numDeletes > maxNumToLog) {
deletes.add("... (" + numDeletes + " deletes)");
}
long elapsed = rsp.getEndTime() - req.getStartTime();
sb.append(toLog).append(" 0 ").append(elapsed);
log.info(sb.toString());
}
// in core/src/java/org/apache/solr/update/processor/UniqFieldsUpdateProcessorFactory.java
Override
public void processAdd(AddUpdateCommand cmd) throws IOException {
if(fields != null){
SolrInputDocument solrInputDocument = cmd.getSolrInputDocument();
List<Object> uniqList = new ArrayList<Object>();
for (String field : fields) {
uniqList.clear();
Collection<Object> col = solrInputDocument.getFieldValues(field);
if (col != null) {
for (Object o : col) {
if(!uniqList.contains(o))
uniqList.add(o);
}
solrInputDocument.remove(field);
for (Object o : uniqList) {
solrInputDocument.addField(field, o);
}
}
}
}
super.processAdd(cmd);
}
// in core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
Override
public synchronized IndexWriter getIndexWriter(SolrCore core) throws IOException {
if (indexWriter == null) {
indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2", false, false);
}
return indexWriter;
}
// in core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
Override
public synchronized void newIndexWriter(SolrCore core) throws IOException {
if (indexWriter != null) {
indexWriter.close();
}
indexWriter = createMainIndexWriter(core, "DirectUpdateHandler2",
false, true);
}
// in core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
Override
public void decref(IndexWriterCloser closer) throws IOException {
synchronized (this) {
refCnt--;
if (refCnt == 0) {
try {
if (closer != null) {
closer.closeWriter(indexWriter);
} else if (indexWriter != null) {
indexWriter.close();
}
} catch (Throwable t) {
log.error("Error during shutdown of writer.", t);
}
try {
directoryFactory.close();
} catch (Throwable t) {
log.error("Error during shutdown of directory factory.", t);
}
try {
cancelRecovery();
} catch (Throwable t) {
log.error("Error cancelling recovery", t);
}
closed = true;
}
}
}
// in core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
Override
public synchronized void rollbackIndexWriter(SolrCore core) throws IOException {
indexWriter.rollback();
newIndexWriter(core);
}
// in core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
protected SolrIndexWriter createMainIndexWriter(SolrCore core, String name,
boolean removeAllExisting, boolean forceNewDirectory) throws IOException {
return new SolrIndexWriter(name, core.getNewIndexDir(),
core.getDirectoryFactory(), removeAllExisting, core.getSchema(),
core.getSolrConfig().indexConfig, core.getDeletionPolicy(), core.getCodec(), forceNewDirectory);
}
// in core/src/java/org/apache/solr/update/SolrIndexWriter.java
private static InfoStream toInfoStream(SolrIndexConfig config) throws IOException {
String infoStreamFile = config.infoStreamFile;
if (infoStreamFile != null) {
File f = new File(infoStreamFile);
File parent = f.getParentFile();
if (parent != null) parent.mkdirs();
FileOutputStream fos = new FileOutputStream(f, true);
return new PrintStreamInfoStream(new PrintStream(fos, true));
} else {
return InfoStream.NO_OUTPUT;
}
}
// in core/src/java/org/apache/solr/update/SolrIndexWriter.java
Override
public void close() throws IOException {
log.debug("Closing Writer " + name);
Directory directory = getDirectory();
final InfoStream infoStream = isClosed ? null : getConfig().getInfoStream();
try {
super.close();
if(infoStream != null) {
infoStream.close();
}
} finally {
isClosed = true;
directoryFactory.release(directory);
numCloses.incrementAndGet();
}
}
// in core/src/java/org/apache/solr/update/SolrIndexWriter.java
Override
public void rollback() throws IOException {
try {
super.rollback();
} finally {
isClosed = true;
}
}
// in core/src/java/org/apache/solr/update/SolrCmdDistributor.java
public void distribDelete(DeleteUpdateCommand cmd, List<Node> urls, ModifiableSolrParams params) throws IOException {
checkResponses(false);
if (cmd.isDeleteById()) {
doDelete(cmd, urls, params);
} else {
doDelete(cmd, urls, params);
}
}
// in core/src/java/org/apache/solr/update/SolrCmdDistributor.java
public void distribAdd(AddUpdateCommand cmd, List<Node> nodes, ModifiableSolrParams params) throws IOException {
checkResponses(false);
// make sure any pending deletes are flushed
flushDeletes(1);
// TODO: this is brittle
// need to make a clone since these commands may be reused
AddUpdateCommand clone = new AddUpdateCommand(null);
clone.solrDoc = cmd.solrDoc;
clone.commitWithin = cmd.commitWithin;
clone.overwrite = cmd.overwrite;
clone.setVersion(cmd.getVersion());
AddRequest addRequest = new AddRequest();
addRequest.cmd = clone;
addRequest.params = params;
for (Node node : nodes) {
List<AddRequest> alist = adds.get(node);
if (alist == null) {
alist = new ArrayList<AddRequest>(2);
adds.put(node, alist);
}
alist.add(addRequest);
}
flushAdds(maxBufferedAddsPerServer);
}
// in core/src/java/org/apache/solr/update/SolrCmdDistributor.java
public void distribCommit(CommitUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
// Wait for all outstanding responses to make sure that a commit
// can't sneak in ahead of adds or deletes we already sent.
// We could do this on a per-server basis, but it's more complex
// and this solution will lead to commits happening closer together.
checkResponses(true);
// currently, we dont try to piggy back on outstanding adds or deletes
UpdateRequestExt ureq = new UpdateRequestExt();
ureq.setParams(params);
addCommit(ureq, cmd);
for (Node node : nodes) {
submit(ureq, node);
}
// if the command wanted to block until everything was committed,
// then do that here.
if (cmd.waitSearcher) {
checkResponses(true);
}
}
// in core/src/java/org/apache/solr/update/SolrCmdDistributor.java
private void doDelete(DeleteUpdateCommand cmd, List<Node> nodes,
ModifiableSolrParams params) throws IOException {
flushAdds(1);
DeleteUpdateCommand clonedCmd = clone(cmd);
DeleteRequest deleteRequest = new DeleteRequest();
deleteRequest.cmd = clonedCmd;
deleteRequest.params = params;
for (Node node : nodes) {
List<DeleteRequest> dlist = deletes.get(node);
if (dlist == null) {
dlist = new ArrayList<DeleteRequest>(2);
deletes.put(node, dlist);
}
dlist.add(deleteRequest);
}
flushDeletes(maxBufferedDeletesPerServer);
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public Object resolve(Object o, JavaBinCodec codec) throws IOException {
if (o instanceof BytesRef) {
BytesRef br = (BytesRef)o;
codec.writeByteArray(br.bytes, br.offset, br.length);
return null;
}
return o;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public void writeExternString(String s) throws IOException {
if (s == null) {
writeTag(NULL);
return;
}
// no need to synchronize globalStringMap - it's only updated before the first record is written to the log
Integer idx = globalStringMap.get(s);
if (idx == null) {
// write a normal string
writeStr(s);
} else {
// write the extern string
writeTag(EXTERN_STRING, idx);
}
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public String readExternString(FastInputStream fis) throws IOException {
int idx = readSize(fis);
if (idx != 0) {// idx != 0 is the index of the extern string
// no need to synchronize globalStringList - it's only updated before the first record is written to the log
return globalStringList.get(idx - 1);
} else {// idx == 0 means it has a string value
// this shouldn't happen with this codec subclass.
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Corrupt transaction log");
}
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public boolean endsWithCommit() throws IOException {
long size;
synchronized (this) {
fos.flush();
size = fos.size();
}
// the end of the file should have the end message (added during a commit) plus a 4 byte size
byte[] buf = new byte[ END_MESSAGE.length() ];
long pos = size - END_MESSAGE.length() - 4;
if (pos < 0) return false;
ChannelFastInputStream is = new ChannelFastInputStream(channel, pos);
is.read(buf);
for (int i=0; i<buf.length; i++) {
if (buf[i] != END_MESSAGE.charAt(i)) return false;
}
return true;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public void rollback(long pos) throws IOException {
synchronized (this) {
assert snapshot_size == pos;
fos.flush();
raf.setLength(pos);
fos.setWritten(pos);
assert fos.size() == pos;
numRecords = snapshot_numRecords;
}
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
private void readHeader(FastInputStream fis) throws IOException {
// read existing header
fis = fis != null ? fis : new ChannelFastInputStream(channel, 0);
LogCodec codec = new LogCodec();
Map header = (Map)codec.unmarshal(fis);
fis.readInt(); // skip size
// needed to read other records
synchronized (this) {
globalStringList = (List<String>)header.get("strings");
globalStringMap = new HashMap<String, Integer>(globalStringList.size());
for (int i=0; i<globalStringList.size(); i++) {
globalStringMap.put( globalStringList.get(i), i+1);
}
}
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
private void writeLogHeader(LogCodec codec) throws IOException {
long pos = fos.size();
assert pos == 0;
Map header = new LinkedHashMap<String,Object>();
header.put("SOLR_TLOG",1); // a magic string + version number
header.put("strings",globalStringList);
codec.marshal(header, fos);
endRecord(pos);
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
private void endRecord(long startRecordPosition) throws IOException {
fos.writeInt((int)(fos.size() - startRecordPosition));
numRecords++;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public ReverseReader getReverseReader() throws IOException {
return new ReverseReader();
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public Object next() throws IOException, InterruptedException {
long pos = fis.position();
synchronized (TransactionLog.this) {
if (trace) {
log.trace("Reading log record. pos="+pos+" currentSize="+fos.size());
}
if (pos >= fos.size()) {
return null;
}
fos.flushBuffer();
}
if (pos == 0) {
readHeader(fis);
// shouldn't currently happen - header and first record are currently written at the same time
synchronized (TransactionLog.this) {
if (fis.position() >= fos.size()) {
return null;
}
pos = fis.position();
}
}
Object o = codec.readVal(fis);
// skip over record size
int size = fis.readInt();
assert size == fis.position() - pos - 4;
return o;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public SolrInputDocument readSolrInputDocument(FastInputStream dis) throws IOException {
// Given that the SolrInputDocument is last in an add record, it's OK to just skip
// reading it completely.
return null;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public Object next() throws IOException {
if (prevPos <= 0) return null;
long endOfThisRecord = prevPos;
int thisLength = nextLength;
long recordStart = prevPos - thisLength; // back up to the beginning of the next record
prevPos = recordStart - 4; // back up 4 more to read the length of the next record
if (prevPos <= 0) return null; // this record is the header
long bufferPos = fis.getBufferPos();
if (prevPos >= bufferPos) {
// nothing to do... we're within the current buffer
} else {
// Position buffer so that this record is at the end.
// For small records, this will cause subsequent calls to next() to be within the buffer.
long seekPos = endOfThisRecord - fis.getBufferSize();
seekPos = Math.min(seekPos, prevPos); // seek to the start of the record if it's larger then the block size.
seekPos = Math.max(seekPos, 0);
fis.seek(seekPos);
fis.peek(); // cause buffer to be filled
}
fis.seek(prevPos);
nextLength = fis.readInt(); // this is the length of the *next* record (i.e. closer to the beginning)
// TODO: optionally skip document data
Object o = codec.readVal(fis);
// assert fis.position() == prevPos + 4 + thisLength; // this is only true if we read all the data (and we currently skip reading SolrInputDocument
return o;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public int readWrappedStream(byte[] target, int offset, int len) throws IOException {
ByteBuffer bb = ByteBuffer.wrap(target, offset, len);
int ret = ch.read(bb, readFromStream);
return ret;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
public void seek(long position) throws IOException {
if (position <= readFromStream && position >= getBufferPos()) {
// seek within buffer
pos = (int)(position - getBufferPos());
} else {
// long currSize = ch.size(); // not needed - underlying read should handle (unless read never done)
// if (position > currSize) throw new EOFException("Read past EOF: seeking to " + position + " on file of size " + currSize + " file=" + ch);
readFromStream = position;
end = pos = 0;
}
assert position() == position;
}
// in core/src/java/org/apache/solr/update/TransactionLog.java
Override
public void close() throws IOException {
ch.close();
}
// in core/src/java/org/apache/solr/core/StandardDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
return FSDirectory.open(new File(path));
}
// in core/src/java/org/apache/solr/core/RAMDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
return new RAMDirectory();
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
Override
public void close() throws IOException {
synchronized (this) {
for (CacheValue val : byDirectoryCache.values()) {
val.directory.close();
}
byDirectoryCache.clear();
byPathCache.clear();
}
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
private void close(Directory directory) throws IOException {
synchronized (this) {
CacheValue cacheValue = byDirectoryCache.get(directory);
if (cacheValue == null) {
throw new IllegalArgumentException("Unknown directory: " + directory
+ " " + byDirectoryCache);
}
cacheValue.refCnt--;
if (cacheValue.refCnt == 0 && cacheValue.doneWithDir) {
directory.close();
byDirectoryCache.remove(directory);
byPathCache.remove(cacheValue.path);
}
}
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
Override
public final Directory get(String path, String rawLockType)
throws IOException {
return get(path, rawLockType, false);
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
Override
public final Directory get(String path, String rawLockType, boolean forceNew)
throws IOException {
String fullPath = new File(path).getAbsolutePath();
synchronized (this) {
CacheValue cacheValue = byPathCache.get(fullPath);
Directory directory = null;
if (cacheValue != null) {
directory = cacheValue.directory;
if (forceNew) {
cacheValue.doneWithDir = true;
if (cacheValue.refCnt == 0) {
close(cacheValue.directory);
}
}
}
if (directory == null || forceNew) {
directory = create(fullPath);
CacheValue newCacheValue = new CacheValue();
newCacheValue.directory = directory;
newCacheValue.path = fullPath;
injectLockFactory(directory, path, rawLockType);
byDirectoryCache.put(directory, newCacheValue);
byPathCache.put(fullPath, newCacheValue);
} else {
cacheValue.refCnt++;
}
return directory;
}
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
Override
public void release(Directory directory) throws IOException {
if (directory == null) {
throw new NullPointerException();
}
close(directory);
}
// in core/src/java/org/apache/solr/core/CachingDirectoryFactory.java
private static Directory injectLockFactory(Directory dir, String lockPath,
String rawLockType) throws IOException {
if (null == rawLockType) {
// we default to "simple" for backwards compatibility
log.warn("No lockType configured for " + dir + " assuming 'simple'");
rawLockType = "simple";
}
final String lockType = rawLockType.toLowerCase(Locale.ENGLISH).trim();
if ("simple".equals(lockType)) {
// multiple SimpleFSLockFactory instances should be OK
dir.setLockFactory(new SimpleFSLockFactory(lockPath));
} else if ("native".equals(lockType)) {
dir.setLockFactory(new NativeFSLockFactory(lockPath));
} else if ("single".equals(lockType)) {
if (!(dir.getLockFactory() instanceof SingleInstanceLockFactory)) dir
.setLockFactory(new SingleInstanceLockFactory());
} else if ("none".equals(lockType)) {
// Recipe for disaster
log.error("CONFIGURATION WARNING: locks are disabled on " + dir);
dir.setLockFactory(NoLockFactory.getNoLockFactory());
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Unrecognized lockType: " + rawLockType);
}
return dir;
}
// in core/src/java/org/apache/solr/core/NIOFSDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
return new NIOFSDirectory(new File(path));
}
// in core/src/java/org/apache/solr/core/MMapDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
MMapDirectory mapDirectory = new MMapDirectory(new File(path));
try {
mapDirectory.setUseUnmap(unmapHack);
} catch (Exception e) {
log.warn("Unmap not supported on this JVM, continuing on without setting unmap", e);
}
mapDirectory.setMaxChunkSize(maxChunk);
return mapDirectory;
}
// in core/src/java/org/apache/solr/core/SolrCore.java
public SolrCore reload(SolrResourceLoader resourceLoader) throws IOException,
ParserConfigurationException, SAXException {
// TODO - what if indexwriter settings have changed
SolrConfig config = new SolrConfig(resourceLoader,
getSolrConfig().getName(), null);
IndexSchema schema = new IndexSchema(config,
getSchema().getResourceName(), null);
updateHandler.incref();
SolrCore core = new SolrCore(getName(), null, config,
schema, coreDescriptor, updateHandler);
return core;
}
// in core/src/java/org/apache/solr/core/SolrCore.java
public SolrIndexSearcher newSearcher(String name) throws IOException {
return new SolrIndexSearcher(this, getNewIndexDir(), schema, getSolrConfig().indexConfig, name, false, directoryFactory);
}
// in core/src/java/org/apache/solr/core/SolrCore.java
public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher) throws IOException {
return getSearcher(forceNew, returnSearcher, waitSearcher, false);
}
// in core/src/java/org/apache/solr/core/SolrCore.java
public RefCounted<SolrIndexSearcher> getSearcher(boolean forceNew, boolean returnSearcher, final Future[] waitSearcher, boolean updateHandlerReopens) throws IOException {
// it may take some time to open an index.... we may need to make
// sure that two threads aren't trying to open one at the same time
// if it isn't necessary.
synchronized (searcherLock) {
// see if we can return the current searcher
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// check to see if we can wait for someone else's searcher to be set
if (onDeckSearchers>0 && !forceNew && _searcher==null) {
try {
searcherLock.wait();
} catch (InterruptedException e) {
log.info(SolrException.toStr(e));
}
}
// check again: see if we can return right now
if (_searcher!=null && !forceNew) {
if (returnSearcher) {
_searcher.incref();
return _searcher;
} else {
return null;
}
}
// At this point, we know we need to open a new searcher...
// first: increment count to signal other threads that we are
// opening a new searcher.
onDeckSearchers++;
if (onDeckSearchers < 1) {
// should never happen... just a sanity check
log.error(logid+"ERROR!!! onDeckSearchers is " + onDeckSearchers);
onDeckSearchers=1; // reset
} else if (onDeckSearchers > maxWarmingSearchers) {
onDeckSearchers--;
String msg="Error opening new searcher. exceeded limit of maxWarmingSearchers="+maxWarmingSearchers + ", try again later.";
log.warn(logid+""+ msg);
// HTTP 503==service unavailable, or 409==Conflict
throw new SolrException(SolrException.ErrorCode.SERVICE_UNAVAILABLE,msg);
} else if (onDeckSearchers > 1) {
log.warn(logid+"PERFORMANCE WARNING: Overlapping onDeckSearchers=" + onDeckSearchers);
}
}
// a signal to decrement onDeckSearchers if something goes wrong.
final boolean[] decrementOnDeckCount=new boolean[]{true};
RefCounted<SolrIndexSearcher> currSearcherHolder = null; // searcher we are autowarming from
RefCounted<SolrIndexSearcher> searchHolder = null;
boolean success = false;
openSearcherLock.lock();
try {
searchHolder = openNewSearcher(updateHandlerReopens, false);
// the searchHolder will be incremented once already (and it will eventually be assigned to _searcher when registered)
// increment it again if we are going to return it to the caller.
if (returnSearcher) {
searchHolder.incref();
}
final RefCounted<SolrIndexSearcher> newSearchHolder = searchHolder;
final SolrIndexSearcher newSearcher = newSearchHolder.get();
boolean alreadyRegistered = false;
synchronized (searcherLock) {
if (_searcher == null) {
// if there isn't a current searcher then we may
// want to register this one before warming is complete instead of waiting.
if (solrConfig.useColdSearcher) {
registerSearcher(newSearchHolder);
decrementOnDeckCount[0]=false;
alreadyRegistered=true;
}
} else {
// get a reference to the current searcher for purposes of autowarming.
currSearcherHolder=_searcher;
currSearcherHolder.incref();
}
}
final SolrIndexSearcher currSearcher = currSearcherHolder==null ? null : currSearcherHolder.get();
Future future=null;
// warm the new searcher based on the current searcher.
// should this go before the other event handlers or after?
if (currSearcher != null) {
future = searcherExecutor.submit(
new Callable() {
public Object call() throws Exception {
try {
newSearcher.warm(currSearcher);
} catch (Throwable e) {
SolrException.log(log,e);
}
return null;
}
}
);
}
if (currSearcher==null && firstSearcherListeners.size() > 0) {
future = searcherExecutor.submit(
new Callable() {
public Object call() throws Exception {
try {
for (SolrEventListener listener : firstSearcherListeners) {
listener.newSearcher(newSearcher,null);
}
} catch (Throwable e) {
SolrException.log(log,null,e);
}
return null;
}
}
);
}
// in core/src/java/org/apache/solr/core/SolrCore.java
private void registerSearcher(RefCounted<SolrIndexSearcher> newSearcherHolder) throws IOException {
synchronized (searcherLock) {
try {
if (_searcher != null) {
_searcher.decref(); // dec refcount for this._searcher
_searcher=null;
}
_searcher = newSearcherHolder;
SolrIndexSearcher newSearcher = newSearcherHolder.get();
/***
// a searcher may have been warming asynchronously while the core was being closed.
// if this happens, just close the searcher.
if (isClosed()) {
// NOTE: this should not happen now - see close() for details.
// *BUT* if we left it enabled, this could still happen before
// close() stopped the executor - so disable this test for now.
log.error("Ignoring searcher register on closed core:" + newSearcher);
_searcher.decref();
}
***/
newSearcher.register(); // register subitems (caches)
log.info(logid+"Registered new searcher " + newSearcher);
} catch (Throwable e) {
// an exception in register() shouldn't be fatal.
log(e);
} finally {
// wake up anyone waiting for a searcher
// even in the face of errors.
onDeckSearchers--;
searcherLock.notifyAll();
}
}
}
// in core/src/java/org/apache/solr/core/SolrCore.java
Override
public void write(Writer writer, SolrQueryRequest request, SolrQueryResponse response) throws IOException {
getWrappedWriter().write(writer, request, response);
}
// in core/src/java/org/apache/solr/core/SolrResourceLoader.java
public List<String> getLines(String resource) throws IOException {
return getLines(resource, UTF_8);
}
// in core/src/java/org/apache/solr/core/SolrResourceLoader.java
public List<String> getLines(String resource,
String encoding) throws IOException {
return getLines(resource, Charset.forName(encoding));
}
// in core/src/java/org/apache/solr/core/SolrResourceLoader.java
public List<String> getLines(String resource, Charset charset) throws IOException{
BufferedReader input = null;
ArrayList<String> lines;
try {
input = new BufferedReader(new InputStreamReader(openResource(resource),
charset.newDecoder()
.onMalformedInput(CodingErrorAction.REPORT)
.onUnmappableCharacter(CodingErrorAction.REPORT)));
lines = new ArrayList<String>();
for (String word=null; (word=input.readLine())!=null;) {
// skip initial bom marker
if (lines.isEmpty() && word.length() > 0 && word.charAt(0) == '\uFEFF')
word = word.substring(1);
// skip comments
if (word.startsWith("#")) continue;
word=word.trim();
// skip blank lines
if (word.length()==0) continue;
lines.add(word);
}
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
public CoreContainer initialize() throws IOException,
ParserConfigurationException, SAXException {
CoreContainer cores = null;
String solrHome = SolrResourceLoader.locateSolrHome();
File fconf = new File(solrHome, containerConfigFilename == null ? "solr.xml"
: containerConfigFilename);
log.info("looking for solr.xml: " + fconf.getAbsolutePath());
cores = new CoreContainer(solrHome);
if (fconf.exists()) {
cores.load(solrHome, fconf);
} else {
log.info("no solr.xml file found - using default");
cores.load(solrHome, new InputSource(new ByteArrayInputStream(DEF_SOLR_XML.getBytes("UTF-8"))));
cores.configFile = fconf;
}
containerConfigFilename = cores.getConfigFile().getName();
return cores;
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
public void load(String dir, File configFile ) throws ParserConfigurationException, IOException, SAXException {
this.configFile = configFile;
this.load(dir, new InputSource(configFile.toURI().toASCIIString()));
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
public void load(String dir, InputSource cfgis)
throws ParserConfigurationException, IOException, SAXException {
if (null == dir) {
// don't rely on SolrResourceLoader(), determine explicitly first
dir = SolrResourceLoader.locateSolrHome();
}
log.info("Loading CoreContainer using Solr Home: '{}'", dir);
this.loader = new SolrResourceLoader(dir);
solrHome = loader.getInstanceDir();
Config cfg = new Config(loader, null, cfgis, null, false);
// keep orig config for persist to consult
try {
this.cfg = new Config(loader, null, copyDoc(cfg.getDocument()));
} catch (TransformerException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "", e);
}
cfg.substituteProperties();
// Initialize Logging
if(cfg.getBool("solr/logging/@enabled",true)) {
String slf4jImpl = null;
String fname = cfg.get("solr/logging/watcher/@class", null);
try {
slf4jImpl = StaticLoggerBinder.getSingleton().getLoggerFactoryClassStr();
if(fname==null) {
if( slf4jImpl.indexOf("Log4j") > 0) {
log.warn("Log watching is not yet implemented for log4j" );
}
else if( slf4jImpl.indexOf("JDK") > 0) {
fname = "JUL";
}
}
}
catch(Throwable ex) {
log.warn("Unable to read SLF4J version. LogWatcher will be disabled: "+ex);
}
// Now load the framework
if(fname!=null) {
if("JUL".equalsIgnoreCase(fname)) {
logging = new JulWatcher(slf4jImpl);
}
// else if( "Log4j".equals(fname) ) {
// logging = new Log4jWatcher(slf4jImpl);
// }
else {
try {
logging = loader.newInstance(fname, LogWatcher.class);
}
catch (Throwable e) {
log.warn("Unable to load LogWatcher", e);
}
}
if( logging != null ) {
ListenerConfig v = new ListenerConfig();
v.size = cfg.getInt("solr/logging/watcher/@size",50);
v.threshold = cfg.get("solr/logging/watcher/@threshold",null);
if(v.size>0) {
log.info("Registering Log Listener");
logging.registerListener(v, this);
}
}
}
}
String dcoreName = cfg.get("solr/cores/@defaultCoreName", null);
if(dcoreName != null && !dcoreName.isEmpty()) {
defaultCoreName = dcoreName;
}
persistent = cfg.getBool("solr/@persistent", false);
libDir = cfg.get("solr/@sharedLib", null);
zkHost = cfg.get("solr/@zkHost" , null);
adminPath = cfg.get("solr/cores/@adminPath", null);
shareSchema = cfg.getBool("solr/cores/@shareSchema", DEFAULT_SHARE_SCHEMA);
zkClientTimeout = cfg.getInt("solr/cores/@zkClientTimeout", DEFAULT_ZK_CLIENT_TIMEOUT);
hostPort = cfg.get("solr/cores/@hostPort", DEFAULT_HOST_PORT);
hostContext = cfg.get("solr/cores/@hostContext", DEFAULT_HOST_CONTEXT);
host = cfg.get("solr/cores/@host", null);
if(shareSchema){
indexSchemaCache = new ConcurrentHashMap<String ,IndexSchema>();
}
adminHandler = cfg.get("solr/cores/@adminHandler", null );
managementPath = cfg.get("solr/cores/@managementPath", null );
zkClientTimeout = Integer.parseInt(System.getProperty("zkClientTimeout", Integer.toString(zkClientTimeout)));
initZooKeeper(zkHost, zkClientTimeout);
if (libDir != null) {
File f = FileUtils.resolvePath(new File(dir), libDir);
log.info( "loading shared library: "+f.getAbsolutePath() );
libLoader = SolrResourceLoader.createClassLoader(f, null);
}
if (adminPath != null) {
if (adminHandler == null) {
coreAdminHandler = new CoreAdminHandler(this);
} else {
coreAdminHandler = this.createMultiCoreHandler(adminHandler);
}
}
try {
containerProperties = readProperties(cfg, ((NodeList) cfg.evaluate(DEFAULT_HOST_CONTEXT, XPathConstants.NODESET)).item(0));
} catch (Throwable e) {
SolrException.log(log,null,e);
}
NodeList nodes = (NodeList)cfg.evaluate("solr/cores/core", XPathConstants.NODESET);
for (int i=0; i<nodes.getLength(); i++) {
Node node = nodes.item(i);
try {
String rawName = DOMUtil.getAttr(node, "name", null);
if (null == rawName) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Each core in solr.xml must have a 'name'");
}
String name = rawName;
CoreDescriptor p = new CoreDescriptor(this, name, DOMUtil.getAttr(node, "instanceDir", null));
// deal with optional settings
String opt = DOMUtil.getAttr(node, "config", null);
if (opt != null) {
p.setConfigName(opt);
}
opt = DOMUtil.getAttr(node, "schema", null);
if (opt != null) {
p.setSchemaName(opt);
}
if (zkController != null) {
opt = DOMUtil.getAttr(node, "shard", null);
if (opt != null && opt.length() > 0) {
p.getCloudDescriptor().setShardId(opt);
}
opt = DOMUtil.getAttr(node, "collection", null);
if (opt != null) {
p.getCloudDescriptor().setCollectionName(opt);
}
opt = DOMUtil.getAttr(node, "roles", null);
if(opt != null){
p.getCloudDescriptor().setRoles(opt);
}
}
opt = DOMUtil.getAttr(node, "properties", null);
if (opt != null) {
p.setPropertiesName(opt);
}
opt = DOMUtil.getAttr(node, CoreAdminParams.DATA_DIR, null);
if (opt != null) {
p.setDataDir(opt);
}
p.setCoreProperties(readProperties(cfg, node));
SolrCore core = create(p);
register(name, core, false);
// track original names
coreToOrigName.put(core, rawName);
}
catch (Throwable ex) {
SolrException.log(log,null,ex);
}
}
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
public SolrCore create(CoreDescriptor dcore) throws ParserConfigurationException, IOException, SAXException {
// Make the instanceDir relative to the cores instanceDir if not absolute
File idir = new File(dcore.getInstanceDir());
if (!idir.isAbsolute()) {
idir = new File(solrHome, dcore.getInstanceDir());
}
String instanceDir = idir.getPath();
log.info("Creating SolrCore '{}' using instanceDir: {}",
dcore.getName(), instanceDir);
// Initialize the solr config
SolrResourceLoader solrLoader = null;
SolrConfig config = null;
String zkConfigName = null;
if(zkController == null) {
solrLoader = new SolrResourceLoader(instanceDir, libLoader, getCoreProps(instanceDir, dcore.getPropertiesName(),dcore.getCoreProperties()));
config = new SolrConfig(solrLoader, dcore.getConfigName(), null);
} else {
try {
String collection = dcore.getCloudDescriptor().getCollectionName();
zkController.createCollectionZkNode(dcore.getCloudDescriptor());
zkConfigName = zkController.readConfigName(collection);
if (zkConfigName == null) {
log.error("Could not find config name for collection:" + collection);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"Could not find config name for collection:" + collection);
}
solrLoader = new ZkSolrResourceLoader(instanceDir, zkConfigName, libLoader, getCoreProps(instanceDir, dcore.getPropertiesName(),dcore.getCoreProperties()), zkController);
config = getSolrConfigFromZk(zkConfigName, dcore.getConfigName(), solrLoader);
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
}
}
IndexSchema schema = null;
if (indexSchemaCache != null) {
if (zkController != null) {
File schemaFile = new File(dcore.getSchemaName());
if (!schemaFile.isAbsolute()) {
schemaFile = new File(solrLoader.getInstanceDir() + "conf"
+ File.separator + dcore.getSchemaName());
}
if (schemaFile.exists()) {
String key = schemaFile.getAbsolutePath()
+ ":"
+ new SimpleDateFormat("yyyyMMddHHmmss", Locale.US).format(new Date(
schemaFile.lastModified()));
schema = indexSchemaCache.get(key);
if (schema == null) {
log.info("creating new schema object for core: " + dcore.name);
schema = new IndexSchema(config, dcore.getSchemaName(), null);
indexSchemaCache.put(key, schema);
} else {
log.info("re-using schema object for core: " + dcore.name);
}
}
} else {
// TODO: handle caching from ZooKeeper - perhaps using ZooKeepers versioning
// Don't like this cache though - how does it empty as last modified changes?
}
}
if(schema == null){
if(zkController != null) {
try {
schema = getSchemaFromZk(zkConfigName, dcore.getSchemaName(), config, solrLoader);
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
}
} else {
schema = new IndexSchema(config, dcore.getSchemaName(), null);
}
}
SolrCore core = new SolrCore(dcore.getName(), null, config, schema, dcore);
if (zkController == null && core.getUpdateHandler().getUpdateLog() != null) {
// always kick off recovery if we are in standalone mode.
core.getUpdateHandler().getUpdateLog().recoverFromLog();
}
return core;
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
public void reload(String name) throws ParserConfigurationException, IOException, SAXException {
name= checkDefault(name);
SolrCore core;
synchronized(cores) {
core = cores.get(name);
}
if (core == null)
throw new SolrException( SolrException.ErrorCode.BAD_REQUEST, "No such core: " + name );
CoreDescriptor cd = core.getCoreDescriptor();
File instanceDir = new File(cd.getInstanceDir());
if (!instanceDir.isAbsolute()) {
instanceDir = new File(getSolrHome(), cd.getInstanceDir());
}
log.info("Reloading SolrCore '{}' using instanceDir: {}",
cd.getName(), instanceDir.getAbsolutePath());
SolrResourceLoader solrLoader;
if(zkController == null) {
solrLoader = new SolrResourceLoader(instanceDir.getAbsolutePath(), libLoader, getCoreProps(instanceDir.getAbsolutePath(), cd.getPropertiesName(),cd.getCoreProperties()));
} else {
try {
String collection = cd.getCloudDescriptor().getCollectionName();
zkController.createCollectionZkNode(cd.getCloudDescriptor());
String zkConfigName = zkController.readConfigName(collection);
if (zkConfigName == null) {
log.error("Could not find config name for collection:" + collection);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"Could not find config name for collection:" + collection);
}
solrLoader = new ZkSolrResourceLoader(instanceDir.getAbsolutePath(), zkConfigName, libLoader, getCoreProps(instanceDir.getAbsolutePath(), cd.getPropertiesName(),cd.getCoreProperties()), zkController);
} catch (KeeperException e) {
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
} catch (InterruptedException e) {
// Restore the interrupted status
Thread.currentThread().interrupt();
log.error("", e);
throw new ZooKeeperException(SolrException.ErrorCode.SERVER_ERROR,
"", e);
}
}
SolrCore newCore = core.reload(solrLoader);
// keep core to orig name link
String origName = coreToOrigName.remove(core);
if (origName != null) {
coreToOrigName.put(newCore, origName);
}
register(name, newCore, false);
}
// in core/src/java/org/apache/solr/core/CoreContainer.java
private SolrConfig getSolrConfigFromZk(String zkConfigName, String solrConfigFileName,
SolrResourceLoader resourceLoader) throws IOException,
ParserConfigurationException, SAXException, KeeperException,
InterruptedException {
byte[] config = zkController.getConfigFileData(zkConfigName, solrConfigFileName);
InputSource is = new InputSource(new ByteArrayInputStream(config));
is.setSystemId(SystemIdResolver.createSystemIdFromResourceName(solrConfigFileName));
SolrConfig cfg = solrConfigFileName == null ? new SolrConfig(
resourceLoader, SolrConfig.DEFAULT_CONF_FILE, is) : new SolrConfig(
resourceLoader, solrConfigFileName, is);
return cfg;
}
// in core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
public void onInit(List commits) throws IOException {
log.info("SolrDeletionPolicy.onInit: commits:" + str(commits));
updateCommits((List<IndexCommit>) commits);
}
// in core/src/java/org/apache/solr/core/SolrDeletionPolicy.java
public void onCommit(List commits) throws IOException {
log.info("SolrDeletionPolicy.onCommit: commits:" + str(commits));
updateCommits((List<IndexCommit>) commits);
}
// in core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
public void onInit(List list) throws IOException {
List<IndexCommitWrapper> wrapperList = wrap(list);
deletionPolicy.onInit(wrapperList);
updateCommitPoints(wrapperList);
cleanReserves();
}
// in core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
public void onCommit(List list) throws IOException {
List<IndexCommitWrapper> wrapperList = wrap(list);
deletionPolicy.onCommit(wrapperList);
updateCommitPoints(wrapperList);
cleanReserves();
}
// in core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
Override
public Collection getFileNames() throws IOException {
return delegate.getFileNames();
}
// in core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
Override
public Map getUserData() throws IOException {
return delegate.getUserData();
}
// in core/src/java/org/apache/solr/core/IndexDeletionPolicyWrapper.java
public static long getCommitTimestamp(IndexCommit commit) throws IOException {
final Map<String,String> commitData = commit.getUserData();
String commitTime = commitData.get(SolrIndexWriter.COMMIT_TIME_MSEC_KEY);
if (commitTime != null) {
return Long.parseLong(commitTime);
} else {
return 0;
}
}
// in core/src/java/org/apache/solr/core/SolrXMLSerializer.java
void persist(Writer w, SolrXMLDef solrXMLDef) throws IOException {
w.write("<?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n");
w.write("<solr");
Map<String,String> rootSolrAttribs = solrXMLDef.solrAttribs;
Set<String> solrAttribKeys = rootSolrAttribs.keySet();
for (String key : solrAttribKeys) {
String value = rootSolrAttribs.get(key);
writeAttribute(w, key, value);
}
w.write(">\n");
Properties containerProperties = solrXMLDef.containerProperties;
if (containerProperties != null && !containerProperties.isEmpty()) {
writeProperties(w, containerProperties, " ");
}
w.write(INDENT + "<cores");
Map<String,String> coresAttribs = solrXMLDef.coresAttribs;
Set<String> coreAttribKeys = coresAttribs.keySet();
for (String key : coreAttribKeys) {
String value = coresAttribs.get(key);
writeAttribute(w, key, value);
}
w.write(">\n");
for (SolrCoreXMLDef coreDef : solrXMLDef.coresDefs) {
persist(w, coreDef);
}
w.write(INDENT + "</cores>\n");
w.write("</solr>\n");
}
// in core/src/java/org/apache/solr/core/SolrXMLSerializer.java
private void persist(Writer w, SolrCoreXMLDef coreDef) throws IOException {
w.write(INDENT + INDENT + "<core");
Set<String> keys = coreDef.coreAttribs.keySet();
for (String key : keys) {
writeAttribute(w, key, coreDef.coreAttribs.get(key));
}
Properties properties = coreDef.coreProperties;
if (properties == null || properties.isEmpty()) w.write("/>\n"); // core
else {
w.write(">\n");
writeProperties(w, properties, " ");
w.write(INDENT + INDENT + "</core>\n");
}
}
// in core/src/java/org/apache/solr/core/SolrXMLSerializer.java
private void writeProperties(Writer w, Properties props, String indent)
throws IOException {
for (Map.Entry<Object,Object> entry : props.entrySet()) {
w.write(indent + "<property");
writeAttribute(w, "name", entry.getKey());
writeAttribute(w, "value", entry.getValue());
w.write("/>\n");
}
}
// in core/src/java/org/apache/solr/core/SolrXMLSerializer.java
private void writeAttribute(Writer w, String name, Object value)
throws IOException {
if (value == null) return;
w.write(" ");
w.write(name);
w.write("=\"");
XML.escapeAttributeValue(value.toString(), w);
w.write("\"");
}
// in core/src/java/org/apache/solr/core/SolrXMLSerializer.java
private static void fileCopy(File src, File dest) throws IOException {
IOException xforward = null;
FileInputStream fis = null;
FileOutputStream fos = null;
FileChannel fcin = null;
FileChannel fcout = null;
try {
fis = new FileInputStream(src);
fos = new FileOutputStream(dest);
fcin = fis.getChannel();
fcout = fos.getChannel();
// do the file copy 32Mb at a time
final int MB32 = 32 * 1024 * 1024;
long size = fcin.size();
long position = 0;
while (position < size) {
position += fcin.transferTo(position, MB32, fcout);
}
} catch (IOException xio) {
xforward = xio;
} finally {
if (fis != null) try {
fis.close();
fis = null;
} catch (IOException xio) {}
if (fos != null) try {
fos.close();
fos = null;
} catch (IOException xio) {}
if (fcin != null && fcin.isOpen()) try {
fcin.close();
fcin = null;
} catch (IOException xio) {}
if (fcout != null && fcout.isOpen()) try {
fcout.close();
fcout = null;
} catch (IOException xio) {}
}
if (xforward != null) {
throw xforward;
}
}
// in core/src/java/org/apache/solr/core/SimpleFSDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
return new SimpleFSDirectory(new File(path));
}
// in core/src/java/org/apache/solr/core/NRTCachingDirectoryFactory.java
Override
protected Directory create(String path) throws IOException {
return new NRTCachingDirectory(FSDirectory.open(new File(path)), 4, 48);
}
// in core/src/java/org/apache/solr/core/StandardIndexReaderFactory.java
Override
public DirectoryReader newReader(Directory indexDir, SolrCore core) throws IOException {
return DirectoryReader.open(indexDir, termInfosIndexDivisor);
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static int numDocs(SolrIndexSearcher s, Query q, Query f)
throws IOException {
return (null == f) ? s.getDocSet(q).size() : s.numDocs(q,f);
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static void optimizePreFetchDocs(ResponseBuilder rb,
DocList docs,
Query query,
SolrQueryRequest req,
SolrQueryResponse res) throws IOException {
SolrIndexSearcher searcher = req.getSearcher();
if(!searcher.enableLazyFieldLoading) {
// nothing to do
return;
}
ReturnFields returnFields = res.getReturnFields();
if(returnFields.getLuceneFieldNames() != null) {
Set<String> fieldFilter = returnFields.getLuceneFieldNames();
if (rb.doHighlights) {
// copy return fields list
fieldFilter = new HashSet<String>(fieldFilter);
// add highlight fields
SolrHighlighter highlighter = HighlightComponent.getHighlighter(req.getCore());
for (String field: highlighter.getHighlightFields(query, req, null))
fieldFilter.add(field);
// fetch unique key if one exists.
SchemaField keyField = req.getSearcher().getSchema().getUniqueKeyField();
if(null != keyField)
fieldFilter.add(keyField.getName());
}
// get documents
DocIterator iter = docs.iterator();
for (int i=0; i<docs.size(); i++) {
searcher.doc(iter.nextDoc(), fieldFilter);
}
}
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static NamedList doStandardDebug(SolrQueryRequest req,
String userQuery,
Query query,
DocList results, boolean dbgQuery, boolean dbgResults)
throws IOException {
NamedList dbg = null;
dbg = new SimpleOrderedMap();
SolrIndexSearcher searcher = req.getSearcher();
IndexSchema schema = req.getSchema();
boolean explainStruct
= req.getParams().getBool(CommonParams.EXPLAIN_STRUCT, false);
if (dbgQuery) {
/* userQuery may have been pre-processed .. expose that */
dbg.add("rawquerystring", req.getParams().get(CommonParams.Q));
dbg.add("querystring", userQuery);
/* QueryParsing.toString isn't perfect, use it to see converted
* values, use regular toString to see any attributes of the
* underlying Query it may have missed.
*/
dbg.add("parsedquery", QueryParsing.toString(query, schema));
dbg.add("parsedquery_toString", query.toString());
}
if (dbgResults) {
NamedList<Explanation> explain
= getExplanations(query, results, searcher, schema);
dbg.add("explain", explainStruct ?
explanationsToNamedLists(explain) :
explanationsToStrings(explain));
String otherQueryS = req.getParams().get(CommonParams.EXPLAIN_OTHER);
if (otherQueryS != null && otherQueryS.length() > 0) {
DocList otherResults = doSimpleQuery
(otherQueryS, req, 0, 10);
dbg.add("otherQuery", otherQueryS);
NamedList<Explanation> explainO
= getExplanations(query, otherResults, searcher, schema);
dbg.add("explainOther", explainStruct ?
explanationsToNamedLists(explainO) :
explanationsToStrings(explainO));
}
}
return dbg;
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static NamedList<Explanation> getExplanations
(Query query,
DocList docs,
SolrIndexSearcher searcher,
IndexSchema schema) throws IOException {
NamedList<Explanation> explainList = new SimpleOrderedMap<Explanation>();
DocIterator iterator = docs.iterator();
for (int i=0; i<docs.size(); i++) {
int id = iterator.nextDoc();
Document doc = searcher.doc(id);
String strid = schema.printableUniqueKey(doc);
explainList.add(strid, searcher.explain(query, id) );
}
return explainList;
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static DocList doSimpleQuery(String sreq,
SolrQueryRequest req,
int start, int limit) throws IOException {
List<String> commands = StrUtils.splitSmart(sreq,';');
String qs = commands.size() >= 1 ? commands.get(0) : "";
try {
Query query = QParser.getParser(qs, null, req).getQuery();
// If the first non-query, non-filter command is a simple sort on an indexed field, then
// we can use the Lucene sort ability.
Sort sort = null;
if (commands.size() >= 2) {
sort = QueryParsing.parseSort(commands.get(1), req);
}
DocList results = req.getSearcher().getDocList(query,(DocSet)null, sort, start, limit);
return results;
} catch (ParseException e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing query: " + qs);
}
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public boolean regenerateItem(SolrIndexSearcher newSearcher,
SolrCache newCache,
SolrCache oldCache,
Object oldKey,
Object oldVal)
throws IOException {
newCache.put(oldKey,oldVal);
return true;
}
// in core/src/java/org/apache/solr/util/SolrPluginUtils.java
public static SolrDocumentList docListToSolrDocumentList(
DocList docs,
SolrIndexSearcher searcher,
Set<String> fields,
Map<SolrDocument, Integer> ids ) throws IOException
{
IndexSchema schema = searcher.getSchema();
SolrDocumentList list = new SolrDocumentList();
list.setNumFound(docs.matches());
list.setMaxScore(docs.maxScore());
list.setStart(docs.offset());
DocIterator dit = docs.iterator();
while (dit.hasNext()) {
int docid = dit.nextDoc();
Document luceneDoc = searcher.doc(docid, fields);
SolrDocument doc = new SolrDocument();
for( IndexableField field : luceneDoc) {
if (null == fields || fields.contains(field.name())) {
SchemaField sf = schema.getField( field.name() );
doc.addField( field.name(), sf.getType().toObject( field ) );
}
}
if (docs.hasScores() && (null == fields || fields.contains("score"))) {
doc.addField("score", dit.score());
}
list.add( doc );
if( ids != null ) {
ids.put( doc, new Integer(docid) );
}
}
return list;
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public void write(int c) throws IOException {
write((char)c);
}
// in core/src/java/org/apache/solr/util/FastWriter.java
public void write(char c) throws IOException {
if (pos >= buf.length) {
sink.write(buf,0,pos);
pos=0;
}
buf[pos++] = c;
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public FastWriter append(char c) throws IOException {
if (pos >= buf.length) {
sink.write(buf,0,pos);
pos=0;
}
buf[pos++] = c;
return this;
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public void write(char cbuf[], int off, int len) throws IOException {
int space = buf.length - pos;
if (len < space) {
System.arraycopy(cbuf, off, buf, pos, len);
pos += len;
} else if (len<BUFSIZE) {
// if the data to write is small enough, buffer it.
System.arraycopy(cbuf, off, buf, pos, space);
sink.write(buf, 0, buf.length);
pos = len-space;
System.arraycopy(cbuf, off+space, buf, 0, pos);
} else {
sink.write(buf,0,pos); // flush
pos=0;
// don't buffer, just write to sink
sink.write(cbuf, off, len);
}
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public void write(String str, int off, int len) throws IOException {
int space = buf.length - pos;
if (len < space) {
str.getChars(off, off+len, buf, pos);
pos += len;
} else if (len<BUFSIZE) {
// if the data to write is small enough, buffer it.
str.getChars(off, off+space, buf, pos);
sink.write(buf, 0, buf.length);
str.getChars(off+space, off+len, buf, 0);
pos = len-space;
} else {
sink.write(buf,0,pos); // flush
pos=0;
// don't buffer, just write to sink
sink.write(str, off, len);
}
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public void flush() throws IOException {
sink.write(buf,0,pos);
pos=0;
sink.flush();
}
// in core/src/java/org/apache/solr/util/FastWriter.java
Override
public void close() throws IOException {
flush();
sink.close();
}
// in core/src/java/org/apache/solr/util/FastWriter.java
public void flushBuffer() throws IOException {
sink.write(buf, 0, pos);
pos=0;
}
// in core/src/java/org/apache/solr/util/SystemIdResolver.java
URI resolveRelativeURI(String baseURI, String systemId) throws IOException,URISyntaxException {
URI uri;
// special case for backwards compatibility: if relative systemId starts with "/" (we convert that to an absolute solrres:-URI)
if (systemId.startsWith("/")) {
uri = new URI(RESOURCE_LOADER_URI_SCHEME, RESOURCE_LOADER_AUTHORITY_ABSOLUTE, "/", null, null).resolve(systemId);
} else {
// simply parse as URI
uri = new URI(systemId);
}
// do relative resolving
if (baseURI != null ) {
uri = new URI(baseURI).resolve(uri);
}
return uri;
}
// in core/src/java/org/apache/solr/util/SystemIdResolver.java
public InputSource resolveEntity(String name, String publicId, String baseURI, String systemId) throws IOException {
if (systemId == null)
return null;
try {
final URI uri = resolveRelativeURI(baseURI, systemId);
// check schema and resolve with ResourceLoader
if (RESOURCE_LOADER_URI_SCHEME.equals(uri.getScheme())) {
String path = uri.getPath(), authority = uri.getAuthority();
if (!RESOURCE_LOADER_AUTHORITY_ABSOLUTE.equals(authority)) {
path = path.substring(1);
}
try {
final InputSource is = new InputSource(loader.openResource(path));
is.setSystemId(uri.toASCIIString());
is.setPublicId(publicId);
return is;
} catch (RuntimeException re) {
// unfortunately XInclude fallback only works with IOException, but openResource() never throws that one
throw (IOException) (new IOException(re.getMessage()).initCause(re));
}
} else {
// resolve all other URIs using the standard resolver
return null;
}
} catch (URISyntaxException use) {
log.warn("An URI systax problem occurred during resolving SystemId, falling back to default resolver", use);
return null;
}
}
// in core/src/java/org/apache/solr/util/SystemIdResolver.java
public InputSource resolveEntity(String publicId, String systemId) throws IOException {
return resolveEntity(null, publicId, null, systemId);
}
// in core/src/java/org/apache/solr/util/xslt/TransformerProvider.java
public synchronized Transformer getTransformer(SolrConfig solrConfig, String filename,int cacheLifetimeSeconds) throws IOException {
// For now, the Templates are blindly reloaded once cacheExpires is over.
// It'd be better to check the file modification time to reload only if needed.
if(lastTemplates!=null && filename.equals(lastFilename) && System.currentTimeMillis() < cacheExpires) {
if(log.isDebugEnabled()) {
log.debug("Using cached Templates:" + filename);
}
} else {
lastTemplates = getTemplates(solrConfig.getResourceLoader(), filename,cacheLifetimeSeconds);
}
Transformer result = null;
try {
result = lastTemplates.newTransformer();
} catch(TransformerConfigurationException tce) {
log.error(getClass().getName(), "getTransformer", tce);
final IOException ioe = new IOException("newTransformer fails ( " + lastFilename + ")");
ioe.initCause(tce);
throw ioe;
}
return result;
}
// in core/src/java/org/apache/solr/util/xslt/TransformerProvider.java
private Templates getTemplates(ResourceLoader loader, String filename,int cacheLifetimeSeconds) throws IOException {
Templates result = null;
lastFilename = null;
try {
if(log.isDebugEnabled()) {
log.debug("compiling XSLT templates:" + filename);
}
final String fn = "xslt/" + filename;
final TransformerFactory tFactory = TransformerFactory.newInstance();
tFactory.setURIResolver(new SystemIdResolver(loader).asURIResolver());
tFactory.setErrorListener(xmllog);
final StreamSource src = new StreamSource(loader.openResource(fn),
SystemIdResolver.createSystemIdFromResourceName(fn));
try {
result = tFactory.newTemplates(src);
} finally {
// some XML parsers are broken and don't close the byte stream (but they should according to spec)
IOUtils.closeQuietly(src.getInputStream());
}
} catch (Exception e) {
log.error(getClass().getName(), "newTemplates", e);
final IOException ioe = new IOException("Unable to initialize Templates '" + filename + "'");
ioe.initCause(e);
throw ioe;
}
lastFilename = filename;
lastTemplates = result;
cacheExpires = System.currentTimeMillis() + (cacheLifetimeSeconds * 1000);
return result;
}
// in core/src/java/org/apache/solr/util/SuggestMissingFactories.java
public static void main(String[] args) throws ClassNotFoundException, IOException, NoSuchMethodException {
final File[] files = new File[args.length];
for (int i = 0; i < args.length; i++) {
files[i] = new File(args[i]);
}
final FindClasses finder = new FindClasses(files);
final ClassLoader cl = finder.getClassLoader();
final Class TOKENSTREAM
= cl.loadClass("org.apache.lucene.analysis.TokenStream");
final Class TOKENIZER
= cl.loadClass("org.apache.lucene.analysis.Tokenizer");
final Class TOKENFILTER
= cl.loadClass("org.apache.lucene.analysis.TokenFilter");
final Class TOKENIZERFACTORY
= cl.loadClass("org.apache.solr.analysis.TokenizerFactory");
final Class TOKENFILTERFACTORY
= cl.loadClass("org.apache.solr.analysis.TokenFilterFactory");
final HashSet<Class> result
= new HashSet<Class>(finder.findExtends(TOKENIZER));
result.addAll(finder.findExtends(TOKENFILTER));
result.removeAll(finder.findMethodReturns
(finder.findExtends(TOKENIZERFACTORY),
"create",
Reader.class).values());
result.removeAll(finder.findMethodReturns
(finder.findExtends(TOKENFILTERFACTORY),
"create",
TOKENSTREAM).values());
for (final Class c : result) {
System.out.println(c.getName());
}
}
// in core/src/java/org/apache/solr/util/SuggestMissingFactories.java
public static void main(String[] args)
throws ClassNotFoundException, IOException, NoSuchMethodException {
FindClasses finder = new FindClasses(new File(args[1]));
ClassLoader cl = finder.getClassLoader();
Class clazz = cl.loadClass(args[0]);
if (args.length == 2) {
System.out.println("Finding all extenders of " + clazz.getName());
for (Class c : finder.findExtends(clazz)) {
System.out.println(c.getName());
}
} else {
String methName = args[2];
System.out.println("Finding all extenders of " + clazz.getName() +
" with method: " + methName);
Class[] methArgs = new Class[args.length-3];
for (int i = 3; i < args.length; i++) {
methArgs[i-3] = cl.loadClass(args[i]);
}
Map<Class,Class> map = finder.findMethodReturns
(finder.findExtends(clazz),methName, methArgs);
for (Class key : map.keySet()) {
System.out.println(key.getName() + " => " + map.get(key).getName());
}
}
}
// in core/src/java/org/apache/solr/util/SimplePostTool.java
private static void pipe(InputStream source, OutputStream dest) throws IOException {
byte[] buf = new byte[1024];
int read = 0;
while ( (read = source.read(buf) ) >= 0) {
if (null != dest) dest.write(buf, 0, read);
}
if (null != dest) dest.flush();
}
// in core/src/java/org/apache/solr/util/FileUtils.java
public static void copyFile(File src , File destination) throws IOException {
FileChannel in = null;
FileChannel out = null;
try {
in = new FileInputStream(src).getChannel();
out = new FileOutputStream(destination).getChannel();
in.transferTo(0, in.size(), out);
} finally {
try { if (in != null) in.close(); } catch (IOException e) {}
try { if (out != null) out.close(); } catch (IOException e) {}
}
}
// in core/src/java/org/apache/solr/util/FileUtils.java
public static void sync(File fullFile) throws IOException {
if (fullFile == null || !fullFile.exists())
throw new FileNotFoundException("File does not exist " + fullFile);
boolean success = false;
int retryCount = 0;
IOException exc = null;
while(!success && retryCount < 5) {
retryCount++;
RandomAccessFile file = null;
try {
try {
file = new RandomAccessFile(fullFile, "rw");
file.getFD().sync();
success = true;
} finally {
if (file != null)
file.close();
}
} catch (IOException ioe) {
if (exc == null)
exc = ioe;
try {
// Pause 5 msec
Thread.sleep(5);
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
}
}
}
if (!success)
// Throw original exception
throw exc;
}