SolrClient | R Documentation |
Solr connection client
host |
(character) Host url. Deafault: 127.0.0.1 |
path |
(character) url path. |
port |
(character/numeric) Port. Default: 8389 |
scheme |
(character) http scheme, one of http or https. Default: http |
proxy |
List of arguments for a proxy connection, including one or more of: url, port, username, password, and auth. See crul::proxy for help, which is used to construct the proxy connection. |
errors |
(character) One of |
auth |
an object of class |
SolrClient
creates a R6 class object. The object is
not cloneable and is portable, so it can be inherited across packages
without complication.
SolrClient
is used to initialize a client that knows about your
Solr instance, with options for setting host, port, http scheme,
and simple vs. complete error reporting
Various output, see help files for each grouping of methods.
Each of these methods also has a matching standalone exported
function that you can use by passing in the connection object made
by calling SolrClient$new()
. Also, see the docs for each method for
parameter definitions and their default values.
ping(name, wt = 'json', raw = FALSE, ...)
schema(name, what = '', raw = FALSE, ...)
commit(name, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, wt = 'json', raw = FALSE, ...)
optimize(name, max_segments = 1, wait_searcher = TRUE, soft_commit = FALSE, wt = 'json', raw = FALSE, ...)
config_get(name, what = NULL, wt = "json", raw = FALSE, ...)
config_params(name, param = NULL, set = NULL, unset = NULL, update = NULL, ...)
config_overlay(name, omitHeader = FALSE, ...)
config_set(name, set = NULL, unset = NULL, ...)
collection_exists(name, ...)
collection_list(raw = FALSE, ...)
collection_create(name, numShards = 1, maxShardsPerNode = 1, createNodeSet = NULL, collection.configName = NULL, replicationFactor = 1, router.name = NULL, shards = NULL, createNodeSet.shuffle = TRUE, router.field = NULL, autoAddReplicas = FALSE, async = NULL, raw = FALSE, callopts=list(), ...)
collection_addreplica(name, shard = NULL, route = NULL, node = NULL, instanceDir = NULL, dataDir = NULL, async = NULL, raw = FALSE, callopts=list(), ...)
collection_addreplicaprop(name, shard, replica, property, property.value, shardUnique = FALSE, raw = FALSE, callopts=list())
collection_addrole(role = "overseer", node, raw = FALSE, ...)
collection_balanceshardunique(name, property, onlyactivenodes = TRUE, shardUnique = NULL, raw = FALSE, ...)
collection_clusterprop(name, val, raw = FALSE, callopts=list())
collection_clusterstatus(name = NULL, shard = NULL, raw = FALSE, ...)
collection_createalias(alias, collections, raw = FALSE, ...)
collection_createshard(name, shard, createNodeSet = NULL, raw = FALSE, ...)
collection_delete(name, raw = FALSE, ...)
collection_deletealias(alias, raw = FALSE, ...)
collection_deletereplica(name, shard = NULL, replica = NULL, onlyIfDown = FALSE, raw = FALSE, callopts=list(), ...)
collection_deletereplicaprop(name, shard, replica, property, raw = FALSE, callopts=list())
collection_deleteshard(name, shard, raw = FALSE, ...)
collection_migrate(name, target.collection, split.key, forward.timeout = NULL, async = NULL, raw = FALSE, ...)
collection_overseerstatus(raw = FALSE, ...)
collection_rebalanceleaders(name, maxAtOnce = NULL, maxWaitSeconds = NULL, raw = FALSE, ...)
collection_reload(name, raw = FALSE, ...)
collection_removerole(role = "overseer", node, raw = FALSE, ...)
collection_requeststatus(requestid, raw = FALSE, ...)
collection_splitshard(name, shard, ranges = NULL, split.key = NULL, async = NULL, raw = FALSE, ...)
core_status(name = NULL, indexInfo = TRUE, raw = FALSE, callopts=list())
core_exists(name, callopts = list())
core_create(name, instanceDir = NULL, config = NULL, schema = NULL, dataDir = NULL, configSet = NULL, collection = NULL, shard = NULL, async=NULL, raw = FALSE, callopts=list(), ...)
core_unload(name, deleteIndex = FALSE, deleteDataDir = FALSE, deleteInstanceDir = FALSE, async = NULL, raw = FALSE, callopts = list())
core_rename(name, other, async = NULL, raw = FALSE, callopts=list())
core_reload(name, raw = FALSE, callopts=list())
core_swap(name, other, async = NULL, raw = FALSE, callopts=list())
core_mergeindexes(name, indexDir = NULL, srcCore = NULL, async = NULL, raw = FALSE, callopts = list())
core_requeststatus(requestid, raw = FALSE, callopts = list())
core_split(name, path = NULL, targetCore = NULL, ranges = NULL, split.key = NULL, async = NULL, raw = FALSE, callopts=list())
search(name = NULL, params = NULL, body = NULL, callopts = list(), raw = FALSE, parsetype = 'df', concat = ',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, progress = NULL, ...)
facet(name = NULL, params = NULL, body = NULL, callopts = list(), raw = FALSE, parsetype = 'df', concat = ',', progress = NULL, ...)
stats(name = NULL, params = list(q = '*:*', stats.field = NULL, stats.facet = NULL), body = NULL, callopts=list(), raw = FALSE, parsetype = 'df', progress = NULL, ...)
highlight(name = NULL, params = NULL, body = NULL, callopts=list(), raw = FALSE, parsetype = 'df', progress = NULL, ...)
group(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', progress = NULL, ...)
mlt(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, progress = NULL, ...)
all(name = NULL, params = NULL, body = NULL, callopts=list(), raw=FALSE, parsetype='df', concat=',', optimizeMaxRows = TRUE, minOptimizedRows = 50000L, progress = NULL, ...)
json_request(name = NULL, body = NULL, callopts=list(), progress = NULL)
get(ids, name, fl = NULL, wt = 'json', raw = FALSE, ...)
add(x, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)
delete_by_id(ids, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)
delete_by_query(query, name, commit = TRUE, commit_within = NULL, overwrite = TRUE, boost = NULL, wt = 'json', raw = FALSE, ...)
update_json(files, name, commit = TRUE, optimize = FALSE, max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...)
update_xml(files, name, commit = TRUE, optimize = FALSE, max_segments = 1, expunge_deletes = FALSE, wait_searcher = TRUE, soft_commit = FALSE, prepare_commit = NULL, wt = 'json', raw = FALSE, ...)
update_csv(files, name, separator = ',', header = TRUE, fieldnames = NULL, skip = NULL, skipLines = 0, trim = FALSE, encapsulator = NULL, escape = NULL, keepEmpty = FALSE, literal = NULL, map = NULL, split = NULL, rowid = NULL, rowidOffset = NULL, overwrite = NULL, commit = NULL, wt = 'json', raw = FALSE, ...)
update_atomic_json(body, name, wt = 'json', raw = FALSE, ...)
update_atomic_xml(body, name, wt = 'json', raw = FALSE, ...)
When the $search()
method returns a data.frame, metadata doesn't fit
into the output data.frame itself. You can access number of results
(numFound
) in the attributes of the results. For example,
attr(x, "numFound")
for number of results, and attr(x, "start")
for the offset value (if one was given). Or you can get all
attributes like attributes(x)
. These metadata are not in the
attributes when requesting raw xml or json though as those metadata
are in the payload (unless wt="csv"
).
## Not run: # make a client (cli <- SolrClient$new()) # variables cli$host cli$port cli$path cli$scheme # ping ## ping to make sure it's up cli$ping("gettingstarted") # version ## get Solr version information cli$schema("gettingstarted") cli$schema("gettingstarted", "fields") cli$schema("gettingstarted", "name") cli$schema("gettingstarted", "version")$version # Search cli$search("gettingstarted", params = list(q = "*:*")) cli$search("gettingstarted", body = list(query = "*:*")) # set a different host SolrClient$new(host = 'stuff.com') # set a different port SolrClient$new(host = 3456) # set a different http scheme SolrClient$new(scheme = 'https') # set a proxy SolrClient$new(proxy = list(url = "187.62.207.130:3128")) prox <- list(url = "187.62.207.130:3128", user = "foo", pwd = "bar") cli <- SolrClient$new(proxy = prox) cli$proxy # set simple authentication details SolrClient$new(auth = crul::auth(user = "hello", pwd = "world")) # A remote Solr instance to which you don't have admin access (cli <- SolrClient$new(host = "api.plos.org", path = "search", port = NULL)) res <- cli$search(params = list(q = "memory")) res attr(res, "numFound") attr(res, "start") attr(res, "maxScore") ## End(Not run)
Add the following code to your website.
For more information on customizing the embed code, read Embedding Snippets.