bloodhound-0.24.0.0: Elasticsearch client library for Haskell
Copyright(C) 2014 2018 Chris Allen
LicenseBSD-style (see the file LICENSE)
MaintainerChris Allen <cma@bitemyapp.com>
Stabilityprovisional
PortabilityGHC
Safe HaskellNone
LanguageHaskell2010

Database.Bloodhound.Common.Requests

Description

Request to be run against Elasticsearch servers..

Synopsis

Bloodhound client functions

Indices

createIndex :: IndexSettings -> IndexName -> BHRequest StatusDependant Acknowledged Source #

createIndex will create an index given a Server, IndexSettings, and an IndexName.

>>> response <- runBH' $ createIndex defaultIndexSettings (IndexName "didimakeanindex")
>>> isSuccess response
True
>>> runBH' $ indexExists (IndexName "didimakeanindex")
True

createIndexWith Source #

Create an index, providing it with any number of settings. This is more expressive than createIndex but makes is more verbose for the common case of configuring only the shard count and replica count.

deleteIndex :: IndexName -> BHRequest StatusDependant Acknowledged Source #

deleteIndex will delete an index given a Server and an IndexName.

>>> _ <- runBH' $ createIndex defaultIndexSettings (IndexName "didimakeanindex")
>>> response <- runBH' $ deleteIndex (IndexName "didimakeanindex")
>>> isSuccess response
True
>>> runBH' $ indexExists (IndexName "didimakeanindex")
False

updateIndexSettings :: NonEmpty UpdatableIndexSetting -> IndexName -> BHRequest StatusIndependant Acknowledged Source #

updateIndexSettings will apply a non-empty list of setting updates to an index

>>> _ <- runBH' $ createIndex defaultIndexSettings (IndexName "unconfiguredindex")
>>> response <- runBH' $ updateIndexSettings (BlocksWrite False :| []) (IndexName "unconfiguredindex")
>>> isSuccess response
True

forceMergeIndex :: IndexSelection -> ForceMergeIndexSettings -> BHRequest StatusDependant ShardsResult Source #

forceMergeIndex

The force merge API allows to force merging of one or more indices through an API. The merge relates to the number of segments a Lucene index holds within each shard. The force merge operation allows to reduce the number of segments by merging them.

This call will block until the merge is complete. If the http connection is lost, the request will continue in the background, and any new requests will block until the previous force merge is complete.

indexExists :: IndexName -> BHRequest StatusDependant Bool Source #

indexExists enables you to check if an index exists. Returns Bool in IO

>>> exists <- runBH' $ indexExists testIndex

openIndex :: IndexName -> BHRequest StatusIndependant Acknowledged Source #

openIndex opens an index given a Server and an IndexName. Explained in further detail at http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html

>>> response <- runBH' $ openIndex testIndex

closeIndex :: IndexName -> BHRequest StatusIndependant Acknowledged Source #

closeIndex closes an index given a Server and an IndexName. Explained in further detail at http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html

>>> response <- runBH' $ closeIndex testIndex

listIndices :: BHRequest StatusDependant [IndexName] Source #

listIndices returns a list of all index names on a given Server

catIndices :: BHRequest StatusDependant [(IndexName, Int)] Source #

catIndices returns a list of all index names on a given Server as well as their doc counts

waitForYellowIndex :: IndexName -> BHRequest StatusIndependant HealthStatus Source #

Block until the index becomes available for indexing documents. This is useful for integration tests in which indices are rapidly created and deleted.

Index Aliases

updateIndexAliases :: NonEmpty IndexAliasAction -> BHRequest StatusIndependant Acknowledged Source #

updateIndexAliases updates the server's index alias table. Operations are atomic. Explained in further detail at https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html

>>> let src = IndexName "a-real-index"
>>> let aliasName = IndexName "an-alias"
>>> let iAlias = IndexAlias src (IndexAliasName aliasName)
>>> let aliasCreate = IndexAliasCreate Nothing Nothing
>>> _ <- runBH' $ deleteIndex src
>>> isSuccess <$> runBH' (createIndex defaultIndexSettings src)
True
>>> runBH' $ indexExists src
True
>>> isSuccess <$> runBH' (updateIndexAliases (AddAlias iAlias aliasCreate :| []))
True
>>> runBH' $ indexExists aliasName
True

getIndexAliases :: BHRequest StatusDependant IndexAliasesSummary Source #

Get all aliases configured on the server.

deleteIndexAlias :: IndexAliasName -> BHRequest StatusIndependant Acknowledged Source #

Delete a single alias, removing it from all indices it is currently associated with.

Index Templates

putTemplate :: IndexTemplate -> TemplateName -> BHRequest StatusIndependant Acknowledged Source #

putTemplate creates a template given an IndexTemplate and a TemplateName. Explained in further detail at https://www.elastic.co/guide/en/elasticsearch/reference/1.7/indices-templates.html

>>> let idxTpl = IndexTemplate [IndexPattern "tweet-*"] (Just (IndexSettings (ShardCount 1) (ReplicaCount 1))) [toJSON TweetMapping]
>>> resp <- runBH' $ putTemplate idxTpl (TemplateName "tweet-tpl")

templateExists :: TemplateName -> BHRequest StatusDependant Bool Source #

templateExists checks to see if a template exists.

>>> exists <- runBH' $ templateExists (TemplateName "tweet-tpl")

deleteTemplate :: TemplateName -> BHRequest StatusIndependant Acknowledged Source #

deleteTemplate is an HTTP DELETE and deletes a template.

>>> let idxTpl = IndexTemplate [IndexPattern "tweet-*"] (Just (IndexSettings (ShardCount 1) (ReplicaCount 1))) [toJSON TweetMapping]
>>> _ <- runBH' $ putTemplate idxTpl (TemplateName "tweet-tpl")
>>> resp <- runBH' $ deleteTemplate (TemplateName "tweet-tpl")

Mapping

putMapping :: (FromJSON r, ToJSON a) => IndexName -> a -> BHRequest StatusDependant r Source #

putMapping is an HTTP PUT and has upsert semantics. Mappings are schemas for documents in indexes.

>>> _ <- runBH' $ createIndex defaultIndexSettings testIndex
>>> resp <- runBH' $ putMapping testIndex TweetMapping
>>> print resp
Response {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("transfer-encoding","chunked")], responseBody = "{\"acknowledged\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}

Documents

indexDocument :: ToJSON doc => IndexName -> IndexDocumentSettings -> doc -> DocId -> BHRequest StatusDependant IndexedDocument Source #

indexDocument is the primary way to save a single document in Elasticsearch. The document itself is simply something we can convert into a JSON Value. The DocId will function as the primary key for the document. You are encouraged to generate your own id's and not rely on Elasticsearch's automatic id generation. Read more about it here: https://github.com/bitemyapp/bloodhound/issues/107

>>> resp <- runBH' $ indexDocument testIndex defaultIndexDocumentSettings exampleTweet (DocId "1")
>>> print resp
Response {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("content-length","152")], responseBody = "{\"_index\":\"twitter\",\"_type\":\"_doc\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\",\"_shards\":{\"total\":1,\"successful\":1,\"failed\":0},\"_seq_no\":1,\"_primary_term\":1}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}

updateDocument :: ToJSON patch => IndexName -> IndexDocumentSettings -> patch -> DocId -> BHRequest StatusDependant IndexedDocument Source #

updateDocument provides a way to perform an partial update of a an already indexed document.

getDocument :: FromJSON a => IndexName -> DocId -> BHRequest StatusIndependant (EsResult a) Source #

getDocument is a straight-forward way to fetch a single document from Elasticsearch using a Server, IndexName, and a DocId. The DocId is the primary key for your Elasticsearch document.

>>> yourDoc <- runBH' $ getDocument testIndex (DocId "1")

documentExists :: IndexName -> DocId -> BHRequest StatusDependant Bool Source #

documentExists enables you to check if a document exists.

deleteDocument :: IndexName -> DocId -> BHRequest StatusDependant IndexedDocument Source #

deleteDocument is the primary way to delete a single document.

>>> _ <- runBH' $ deleteDocument testIndex (DocId "1")

deleteByQuery :: IndexName -> Query -> BHRequest StatusDependant DeletedDocuments Source #

deleteByQuery performs a deletion on every document that matches a query.

>>> let query = TermQuery (Term "user" "bitemyapp") Nothing
>>> _ <- runBH' $ deleteDocument testIndex query

Searching

searchAll :: FromJSON a => Search -> BHRequest StatusDependant (SearchResult a) Source #

searchAll, given a Search, will perform that search against all indexes on an Elasticsearch server. Try to avoid doing this if it can be helped.

>>> let query = TermQuery (Term "user" "bitemyapp") Nothing
>>> let search = mkSearch (Just query) Nothing
>>> response <- runBH' $ searchAll search

searchByIndex :: FromJSON a => IndexName -> Search -> BHRequest StatusDependant (SearchResult a) Source #

searchByIndex, given a Search and an IndexName, will perform that search within an index on an Elasticsearch server.

>>> let query = TermQuery (Term "user" "bitemyapp") Nothing
>>> let search = mkSearch (Just query) Nothing
>>> response <- runBH' $ searchByIndex testIndex search

searchByIndices :: FromJSON a => NonEmpty IndexName -> Search -> BHRequest StatusDependant (SearchResult a) Source #

searchByIndices is a variant of searchByIndex that executes a Search over many indices. This is much faster than using mapM to searchByIndex over a collection since it only causes a single HTTP request to be emitted.

searchByIndexTemplate :: FromJSON a => IndexName -> SearchTemplate -> BHRequest StatusDependant (SearchResult a) Source #

searchByIndexTemplate, given a SearchTemplate and an IndexName, will perform that search within an index on an Elasticsearch server.

>>> let query = SearchTemplateSource "{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } }, \"size\" : \"{{my_size}}\"}"
>>> let search = mkSearchTemplate (Right query) Nothing
>>> response <- runBH' $ searchByIndexTemplate testIndex search

searchByIndicesTemplate :: FromJSON a => NonEmpty IndexName -> SearchTemplate -> BHRequest StatusDependant (SearchResult a) Source #

searchByIndicesTemplate is a variant of searchByIndexTemplate that executes a SearchTemplate over many indices. This is much faster than using mapM to searchByIndexTemplate over a collection since it only causes a single HTTP request to be emitted.

getInitialScroll :: FromJSON a => IndexName -> Search -> BHRequest StatusDependant (ParsedEsResponse (SearchResult a)) Source #

For a given search, request a scroll for efficient streaming of search results. Note that the search is put into SearchTypeScan mode and thus results will not be sorted. Combine this with advanceScroll to efficiently stream through the full result set

getInitialSortedScroll :: FromJSON a => IndexName -> Search -> BHRequest StatusDependant (SearchResult a) Source #

For a given search, request a scroll for efficient streaming of search results. Combine this with advanceScroll to efficiently stream through the full result set. Note that this search respects sorting and may be less efficient than getInitialScroll.

advanceScroll Source #

Arguments

:: FromJSON a 
=> ScrollId 
-> NominalDiffTime

How long should the snapshot of data be kept around? This timeout is updated every time advanceScroll is used, so don't feel the need to set it to the entire duration of your search processing. Note that durations < 1s will be rounded up. Also note that NominalDiffTime is an instance of Num so literals like 60 will be interpreted as seconds. 60s is a reasonable default.

-> BHRequest StatusDependant (SearchResult a) 

Use the given scroll to fetch the next page of documents. If there are no further pages, 'SearchResult.searchHits.hits' will be '[]'.

refreshIndex :: IndexName -> BHRequest StatusDependant ShardResult Source #

refreshIndex will force a refresh on an index. You must do this if you want to read what you wrote.

>>> _ <- runBH' $ createIndex defaultIndexSettings testIndex
>>> _ <- runBH' $ refreshIndex testIndex

mkSearch :: Maybe Query -> Maybe Filter -> Search Source #

mkSearch is a helper function for defaulting additional fields of a Search to Nothing in case you only care about your Query and Filter. Use record update syntax if you want to add things like aggregations or highlights while still using this helper function.

>>> let query = TermQuery (Term "user" "bitemyapp") Nothing
>>> mkSearch (Just query) Nothing
Search {queryBody = Just (TermQuery (Term {termField = "user", termValue = "bitemyapp"}) Nothing), filterBody = Nothing, searchAfterKey = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 0, size = Size 10, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}

mkAggregateSearch :: Maybe Query -> Aggregations -> Search Source #

mkAggregateSearch is a helper function that defaults everything in a Search except for the Query and the Aggregation.

>>> let terms = TermsAgg $ (mkTermsAggregation "user") { termCollectMode = Just BreadthFirst }
>>> terms
TermsAgg (TermsAggregation {term = Left "user", termInclude = Nothing, termExclude = Nothing, termOrder = Nothing, termMinDocCount = Nothing, termSize = Nothing, termShardSize = Nothing, termCollectMode = Just BreadthFirst, termExecutionHint = Nothing, termAggs = Nothing})
>>> let myAggregation = mkAggregateSearch Nothing $ mkAggregations "users" terms

mkHighlightSearch :: Maybe Query -> Highlights -> Search Source #

mkHighlightSearch is a helper function that defaults everything in a Search except for the Query and the Aggregation.

>>> let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")
>>> let testHighlight = Highlights Nothing [FieldHighlight (FieldName "message") Nothing]
>>> let search = mkHighlightSearch (Just query) testHighlight

mkSearchTemplate :: Either SearchTemplateId SearchTemplateSource -> TemplateQueryKeyValuePairs -> SearchTemplate Source #

mkSearchTemplate is a helper function for defaulting additional fields of a SearchTemplate to Nothing. Use record update syntax if you want to add things.

bulk :: ParseBHResponse contextualized => Vector BulkOperation -> BHRequest contextualized BulkResponse Source #

bulk uses Elasticsearch's bulk API to perform bulk operations. The BulkOperation data type encodes the index/update/delete/create operations. You pass a Vector of BulkOperations and a Server to bulk in order to send those operations up to your Elasticsearch server to be performed. I changed from [BulkOperation] to a Vector due to memory overhead.

>>> let stream = V.fromList [BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))]
>>> _ <- runBH' $ bulk stream
>>> _ <- runBH' $ refreshIndex testIndex

pageSearch Source #

Arguments

:: From

The result offset

-> Size

The number of results to return

-> Search

The current seach

-> Search

The paged search

pageSearch is a helper function that takes a search and assigns the from and size fields for the search. The from parameter defines the offset from the first result you want to fetch. The size parameter allows you to configure the maximum amount of hits to be returned.

>>> let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")
>>> let search = mkSearch (Just query) Nothing
>>> search
Search {queryBody = Just (QueryMatchQuery (MatchQuery {matchQueryField = FieldName "_all", matchQueryQueryString = QueryString "haskell", matchQueryOperator = Or, matchQueryZeroTerms = ZeroTermsNone, matchQueryCutoffFrequency = Nothing, matchQueryMatchType = Nothing, matchQueryAnalyzer = Nothing, matchQueryMaxExpansions = Nothing, matchQueryLenient = Nothing, matchQueryBoost = Nothing})), filterBody = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 0, size = Size 10, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}
>>> pageSearch (From 10) (Size 100) search
Search {queryBody = Just (QueryMatchQuery (MatchQuery {matchQueryField = FieldName "_all", matchQueryQueryString = QueryString "haskell", matchQueryOperator = Or, matchQueryZeroTerms = ZeroTermsNone, matchQueryCutoffFrequency = Nothing, matchQueryMatchType = Nothing, matchQueryAnalyzer = Nothing, matchQueryMaxExpansions = Nothing, matchQueryLenient = Nothing, matchQueryBoost = Nothing})), filterBody = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 10, size = Size 100, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}

mkShardCount :: Int -> Maybe ShardCount Source #

mkShardCount is a straight-forward smart constructor for ShardCount which rejects Int values below 1 and above 1000.

>>> mkShardCount 10
Just (ShardCount 10)

mkReplicaCount :: Int -> Maybe ReplicaCount Source #

mkReplicaCount is a straight-forward smart constructor for ReplicaCount which rejects Int values below 0 and above 1000.

>>> mkReplicaCount 10
Just (ReplicaCount 10)

getStatus :: BHRequest StatusDependant Status Source #

getStatus fetches the Status of a Server

>>> serverStatus <- runBH' getStatus
>>> fmap tagline (serverStatus)
Just "You Know, for Search"

Templates

Snapshot/Restore

Snapshot Repos

getSnapshotRepos :: SnapshotRepoSelection -> BHRequest StatusDependant [GenericSnapshotRepo] Source #

getSnapshotRepos gets the definitions of a subset of the defined snapshot repos.

verifySnapshotRepo :: SnapshotRepoName -> BHRequest StatusDependant SnapshotVerification Source #

Verify if a snapshot repo is working. NOTE: this API did not make it into Elasticsearch until 1.4. If you use an older version, you will get an error here.

Snapshots

getSnapshots :: SnapshotRepoName -> SnapshotSelection -> BHRequest StatusDependant [SnapshotInfo] Source #

Get info about known snapshots given a pattern and repo name.

deleteSnapshot :: SnapshotRepoName -> SnapshotName -> BHRequest StatusIndependant Acknowledged Source #

Delete a snapshot. Cancels if it is running.

Restoring Snapshots

Reindex

Task

Nodes

Request Utilities

encodeBulkOperations :: Vector BulkOperation -> ByteString Source #

encodeBulkOperations is a convenience function for dumping a vector of BulkOperation into an ByteString

>>> let bulkOps = V.fromList [BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))]
>>> encodeBulkOperations bulkOps
"\n{\"index\":{\"_id\":\"2\",\"_index\":\"twitter\"}}\n{\"name\":\"blah\"}\n"

encodeBulkOperation :: BulkOperation -> ByteString Source #

encodeBulkOperation is a convenience function for dumping a single BulkOperation into an ByteString

>>> let bulkOp = BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))
>>> encodeBulkOperation bulkOp
"{\"index\":{\"_id\":\"2\",\"_index\":\"twitter\"}}\n{\"name\":\"blah\"}"

BHResponse-handling tools

isVersionConflict :: BHResponse parsingContext a -> Bool Source #

Was there an optimistic concurrency control conflict when indexing a document? (Check '409' status code.)

isSuccess :: BHResponse parsingContext a -> Bool Source #

Check '2xx' status codes

isCreated :: BHResponse parsingContext a -> Bool Source #

Check '201' status code

parseEsResponse :: FromJSON body => BHResponse parsingContext body -> Either EsProtocolException (ParsedEsResponse body) Source #

Tries to parse a response body as the expected type body and failing that tries to parse it as an EsError. All well-formed, JSON responses from elasticsearch should fall into these two categories. If they don't, a EsProtocolException will be thrown. If you encounter this, please report the full body it reports along with your Elasticsearch version.

parseEsResponseWith :: (MonadThrow m, FromJSON body) => (body -> Either String parsed) -> BHResponse parsingContext body -> m parsed Source #

Parse BHResponse with an arbitrary parser

Count

Generic

Performing Requests

tryPerformBHRequest :: (MonadBH m, MonadThrow m, ParseBHResponse contextualized) => BHRequest contextualized a -> m (ParsedEsResponse a) Source #

performBHRequest :: (MonadBH m, MonadThrow m, ParseBHResponse contextualized) => BHRequest contextualized a -> m a Source #

withBHResponse :: forall a parsingContext b. (Either EsProtocolException (ParsedEsResponse a) -> BHResponse StatusDependant a -> b) -> BHRequest parsingContext a -> BHRequest StatusDependant b Source #

Work with the full BHResponse

withBHResponse_ :: forall a parsingContext b. (BHResponse StatusDependant a -> b) -> BHRequest parsingContext a -> BHRequest StatusDependant b Source #

Work with the full BHResponse

withBHResponseParsedEsResponse :: forall a parsingContext. BHRequest parsingContext a -> BHRequest StatusDependant (ParsedEsResponse a) Source #

Enable working with ParsedEsResponse

keepBHResponse :: forall a parsingContext. BHRequest parsingContext a -> BHRequest StatusDependant (BHResponse StatusDependant a, a) Source #

Keep with the full BHResponse

joinBHResponse :: forall a parsingContext. BHRequest parsingContext (Either EsProtocolException (ParsedEsResponse a)) -> BHRequest parsingContext a Source #