| Copyright | (C) 2014 2018 Chris Allen |
|---|---|
| License | BSD-style (see the file LICENSE) |
| Maintainer | Chris Allen <cma@bitemyapp.com> |
| Stability | provisional |
| Portability | GHC |
| Safe Haskell | None |
| Language | Haskell2010 |
Database.Bloodhound.Common.Client
Description
Client side functions for talking to Elasticsearch servers.
Synopsis
- withBH :: ManagerSettings -> Server -> BH IO a -> IO a
- createIndex :: MonadBH m => IndexSettings -> IndexName -> m Acknowledged
- createIndexWith :: MonadBH m => [UpdatableIndexSetting] -> Int -> IndexName -> m Acknowledged
- flushIndex :: MonadBH m => IndexName -> m ShardResult
- deleteIndex :: MonadBH m => IndexName -> m Acknowledged
- updateIndexSettings :: MonadBH m => NonEmpty UpdatableIndexSetting -> IndexName -> m Acknowledged
- getIndexSettings :: MonadBH m => IndexName -> m IndexSettingsSummary
- forceMergeIndex :: MonadBH m => IndexSelection -> ForceMergeIndexSettings -> m ShardsResult
- indexExists :: MonadBH m => IndexName -> m Bool
- openIndex :: MonadBH m => IndexName -> m Acknowledged
- closeIndex :: MonadBH m => IndexName -> m Acknowledged
- listIndices :: MonadBH m => m [IndexName]
- catIndices :: MonadBH m => m [(IndexName, Int)]
- waitForYellowIndex :: MonadBH m => IndexName -> m HealthStatus
- data HealthStatus = HealthStatus {
- healthStatusClusterName :: Text
- healthStatusStatus :: Text
- healthStatusTimedOut :: Bool
- healthStatusNumberOfNodes :: Int
- healthStatusNumberOfDataNodes :: Int
- healthStatusActivePrimaryShards :: Int
- healthStatusActiveShards :: Int
- healthStatusRelocatingShards :: Int
- healthStatusInitializingShards :: Int
- healthStatusUnassignedShards :: Int
- healthStatusDelayedUnassignedShards :: Int
- healthStatusNumberOfPendingTasks :: Int
- healthStatusNumberOfInFlightFetch :: Int
- healthStatusTaskMaxWaitingInQueueMillis :: Int
- healthStatusActiveShardsPercentAsNumber :: Float
- updateIndexAliases :: MonadBH m => NonEmpty IndexAliasAction -> m Acknowledged
- getIndexAliases :: MonadBH m => m IndexAliasesSummary
- deleteIndexAlias :: MonadBH m => IndexAliasName -> m Acknowledged
- putTemplate :: MonadBH m => IndexTemplate -> TemplateName -> m Acknowledged
- templateExists :: MonadBH m => TemplateName -> m Bool
- deleteTemplate :: MonadBH m => TemplateName -> m Acknowledged
- putMapping :: forall r a m. (MonadBH m, FromJSON r, ToJSON a) => IndexName -> a -> m r
- indexDocument :: forall doc m. (MonadBH m, ToJSON doc) => IndexName -> IndexDocumentSettings -> doc -> DocId -> m IndexedDocument
- updateDocument :: forall patch m. (MonadBH m, ToJSON patch) => IndexName -> IndexDocumentSettings -> patch -> DocId -> m IndexedDocument
- updateByQuery :: (MonadBH m, FromJSON a) => IndexName -> Query -> Maybe Script -> m a
- getDocument :: FromJSON a => IndexName -> DocId -> BHRequest StatusIndependant (EsResult a)
- documentExists :: MonadBH m => IndexName -> DocId -> m Bool
- deleteDocument :: MonadBH m => IndexName -> DocId -> m IndexedDocument
- deleteByQuery :: MonadBH m => IndexName -> Query -> m DeletedDocuments
- data IndexedDocument = IndexedDocument {}
- data DeletedDocuments = DeletedDocuments {
- delDocsTook :: Int
- delDocsTimedOut :: Bool
- delDocsTotal :: Int
- delDocsDeleted :: Int
- delDocsBatches :: Int
- delDocsVersionConflicts :: Int
- delDocsNoops :: Int
- delDocsRetries :: DeletedDocumentsRetries
- delDocsThrottledMillis :: Int
- delDocsRequestsPerSecond :: Float
- delDocsThrottledUntilMillis :: Int
- delDocsFailures :: [Value]
- data DeletedDocumentsRetries = DeletedDocumentsRetries {}
- searchAll :: forall a m. (MonadBH m, FromJSON a) => Search -> m (SearchResult a)
- searchByIndex :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (SearchResult a)
- searchByIndices :: forall a m. (MonadBH m, FromJSON a) => NonEmpty IndexName -> Search -> m (SearchResult a)
- searchByIndexTemplate :: forall a m. (MonadBH m, FromJSON a) => IndexName -> SearchTemplate -> m (SearchResult a)
- searchByIndicesTemplate :: forall a m. (MonadBH m, FromJSON a) => NonEmpty IndexName -> SearchTemplate -> m (SearchResult a)
- scanSearch :: (FromJSON a, MonadBH m) => IndexName -> Search -> m [Hit a]
- getInitialScroll :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (ParsedEsResponse (SearchResult a))
- getInitialSortedScroll :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (SearchResult a)
- advanceScroll :: forall a m. (MonadBH m, FromJSON a) => ScrollId -> NominalDiffTime -> m (SearchResult a)
- refreshIndex :: MonadBH m => IndexName -> m ShardResult
- mkSearch :: Maybe Query -> Maybe Filter -> Search
- mkAggregateSearch :: Maybe Query -> Aggregations -> Search
- mkHighlightSearch :: Maybe Query -> Highlights -> Search
- mkSearchTemplate :: Either SearchTemplateId SearchTemplateSource -> TemplateQueryKeyValuePairs -> SearchTemplate
- bulk :: MonadBH m => Vector BulkOperation -> m BulkResponse
- pageSearch :: From -> Size -> Search -> Search
- mkShardCount :: Int -> Maybe ShardCount
- mkReplicaCount :: Int -> Maybe ReplicaCount
- getStatus :: MonadBH m => m Status
- storeSearchTemplate :: MonadBH m => SearchTemplateId -> SearchTemplateSource -> m Acknowledged
- getSearchTemplate :: MonadBH m => SearchTemplateId -> m GetTemplateScript
- deleteSearchTemplate :: MonadBH m => SearchTemplateId -> m Acknowledged
- getSnapshotRepos :: MonadBH m => SnapshotRepoSelection -> m [GenericSnapshotRepo]
- updateSnapshotRepo :: (MonadBH m, SnapshotRepo repo) => SnapshotRepoUpdateSettings -> repo -> m Acknowledged
- verifySnapshotRepo :: MonadBH m => SnapshotRepoName -> m SnapshotVerification
- deleteSnapshotRepo :: MonadBH m => SnapshotRepoName -> m Acknowledged
- createSnapshot :: MonadBH m => SnapshotRepoName -> SnapshotName -> SnapshotCreateSettings -> m Acknowledged
- getSnapshots :: MonadBH m => SnapshotRepoName -> SnapshotSelection -> m [SnapshotInfo]
- deleteSnapshot :: MonadBH m => SnapshotRepoName -> SnapshotName -> m Acknowledged
- restoreSnapshot :: MonadBH m => SnapshotRepoName -> SnapshotName -> SnapshotRestoreSettings -> m Accepted
- reindex :: MonadBH m => ReindexRequest -> m ReindexResponse
- reindexAsync :: MonadBH m => ReindexRequest -> m TaskNodeId
- getTask :: (MonadBH m, FromJSON a) => TaskNodeId -> m (TaskResponse a)
- getNodesInfo :: MonadBH m => NodeSelection -> m NodesInfo
- getNodesStats :: MonadBH m => NodeSelection -> m NodesStats
- encodeBulkOperations :: Vector BulkOperation -> ByteString
- encodeBulkOperation :: BulkOperation -> ByteString
- basicAuthHook :: Monad m => EsUsername -> EsPassword -> Request -> m Request
- countByIndex :: MonadBH m => IndexName -> CountQuery -> m CountResponse
- newtype Acknowledged = Acknowledged {}
- newtype Accepted = Accepted {
- isAccepted :: Bool
- data IgnoredBody = IgnoredBody
Bloodhound client functions
The examples in this module assume the following code has been run. The :{ and :} will only work in GHCi. You'll only need the data types and typeclass instances for the functions that make use of them.
>>>:set -XOverloadedStrings>>>:set -XDeriveGeneric>>>import Database.Bloodhound>>>import Network.HTTP.Client>>>let testServer = (Server "http://localhost:9200")>>>let runBH' = withBH defaultManagerSettings testServer>>>let testIndex = IndexName "twitter">>>let defaultIndexSettings = IndexSettings (ShardCount 1) (ReplicaCount 0)>>>data TweetMapping = TweetMapping deriving stock (Eq, Show)>>>_ <- runBH' $ deleteIndex testIndex>>>_ <- runBH' $ deleteIndex (IndexName "didimakeanindex")>>>import GHC.Generics>>>import Data.Time.Calendar (Day (..))>>>import Data.Time.Clock (UTCTime (..), secondsToDiffTime)>>>:{instance ToJSON TweetMapping where toJSON TweetMapping = object ["properties" .= object ["location" .= object ["type" .= ("geo_point" :: Text)]]] data Location = Location { lat :: Double , lon :: Double } deriving stock (Eq, Generic, Show) data Tweet = Tweet { user :: Text , postDate :: UTCTime , message :: Text , age :: Int , location :: Location } deriving stock (Eq, Generic, Show) exampleTweet = Tweet { user = "bitemyapp" , postDate = UTCTime (ModifiedJulianDay 55000) (secondsToDiffTime 10) , message = "Use haskell!" , age = 10000 , location = Location 40.12 (-71.34) } instance ToJSON Tweet where toJSON = genericToJSON defaultOptions instance FromJSON Tweet where parseJSON = genericParseJSON defaultOptions instance ToJSON Location where toJSON = genericToJSON defaultOptions instance FromJSON Location where parseJSON = genericParseJSON defaultOptions data BulkTest = BulkTest { name :: Text } deriving stock (Eq, Generic, Show) instance FromJSON BulkTest where parseJSON = genericParseJSON defaultOptions instance ToJSON BulkTest where toJSON = genericToJSON defaultOptions :}
withBH :: ManagerSettings -> Server -> BH IO a -> IO a Source #
Convenience function that sets up a manager and BHEnv and runs
the given set of bloodhound operations. Connections will be
pipelined automatically in accordance with the given manager
settings in IO. If you've got your own monad transformer stack, you
should use runBH directly.
Indices
createIndex :: MonadBH m => IndexSettings -> IndexName -> m Acknowledged Source #
createIndex will create an index given a Server, IndexSettings, and an IndexName.
>>>response <- runBH' $ createIndex defaultIndexSettings (IndexName "didimakeanindex")>>>isSuccess responseTrue>>>runBH' $ indexExists (IndexName "didimakeanindex")True
Arguments
| :: MonadBH m | |
| => [UpdatableIndexSetting] | |
| -> Int | shard count |
| -> IndexName | |
| -> m Acknowledged |
Create an index, providing it with any number of settings. This
is more expressive than createIndex but makes is more verbose
for the common case of configuring only the shard count and
replica count.
flushIndex :: MonadBH m => IndexName -> m ShardResult Source #
flushIndex will flush an index given a Server and an IndexName.
deleteIndex :: MonadBH m => IndexName -> m Acknowledged Source #
deleteIndex will delete an index given a Server and an IndexName.
>>>_ <- runBH' $ createIndex defaultIndexSettings (IndexName "didimakeanindex")>>>response <- runBH' $ deleteIndex (IndexName "didimakeanindex")>>>isSuccess responseTrue>>>runBH' $ indexExists (IndexName "didimakeanindex")False
updateIndexSettings :: MonadBH m => NonEmpty UpdatableIndexSetting -> IndexName -> m Acknowledged Source #
updateIndexSettings will apply a non-empty list of setting updates to an index
>>>_ <- runBH' $ createIndex defaultIndexSettings (IndexName "unconfiguredindex")>>>response <- runBH' $ updateIndexSettings (BlocksWrite False :| []) (IndexName "unconfiguredindex")>>>isSuccess responseTrue
getIndexSettings :: MonadBH m => IndexName -> m IndexSettingsSummary Source #
forceMergeIndex :: MonadBH m => IndexSelection -> ForceMergeIndexSettings -> m ShardsResult Source #
The force merge API allows to force merging of one or more indices through an API. The merge relates to the number of segments a Lucene index holds within each shard. The force merge operation allows to reduce the number of segments by merging them.
This call will block until the merge is complete. If the http connection is lost, the request will continue in the background, and any new requests will block until the previous force merge is complete.
indexExists :: MonadBH m => IndexName -> m Bool Source #
indexExists enables you to check if an index exists. Returns Bool
in IO
>>>exists <- runBH' $ indexExists testIndex
openIndex :: MonadBH m => IndexName -> m Acknowledged Source #
openIndex opens an index given a Server and an IndexName. Explained in further detail at
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
>>>response <- runBH' $ openIndex testIndex
closeIndex :: MonadBH m => IndexName -> m Acknowledged Source #
closeIndex closes an index given a Server and an IndexName. Explained in further detail at
http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-open-close.html
>>>response <- runBH' $ closeIndex testIndex
listIndices :: MonadBH m => m [IndexName] Source #
listIndices returns a list of all index names on a given Server
catIndices :: MonadBH m => m [(IndexName, Int)] Source #
catIndices returns a list of all index names on a given Server as well as their doc counts
waitForYellowIndex :: MonadBH m => IndexName -> m HealthStatus Source #
Block until the index becomes available for indexing documents. This is useful for integration tests in which indices are rapidly created and deleted.
data HealthStatus Source #
Constructors
Instances
| FromJSON HealthStatus Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes | |
| Show HealthStatus Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods showsPrec :: Int -> HealthStatus -> ShowS # show :: HealthStatus -> String # showList :: [HealthStatus] -> ShowS # | |
| Eq HealthStatus Source # | |
Index Aliases
updateIndexAliases :: MonadBH m => NonEmpty IndexAliasAction -> m Acknowledged Source #
updateIndexAliases updates the server's index alias
table. Operations are atomic. Explained in further detail at
https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html
>>>let src = IndexName "a-real-index">>>let aliasName = IndexName "an-alias">>>let iAlias = IndexAlias src (IndexAliasName aliasName)>>>let aliasCreate = IndexAliasCreate Nothing Nothing>>>_ <- runBH' $ deleteIndex src>>>isSuccess <$> runBH' (createIndex defaultIndexSettings src)True>>>runBH' $ indexExists srcTrue>>>isSuccess <$> runBH' (updateIndexAliases (AddAlias iAlias aliasCreate :| []))True>>>runBH' $ indexExists aliasNameTrue
getIndexAliases :: MonadBH m => m IndexAliasesSummary Source #
Get all aliases configured on the server.
deleteIndexAlias :: MonadBH m => IndexAliasName -> m Acknowledged Source #
Delete a single alias, removing it from all indices it is currently associated with.
Index Templates
putTemplate :: MonadBH m => IndexTemplate -> TemplateName -> m Acknowledged Source #
putTemplate creates a template given an IndexTemplate and a TemplateName.
Explained in further detail at
https://www.elastic.co/guide/en/elasticsearch/reference/1.7/indices-templates.html
>>>let idxTpl = IndexTemplate [IndexPattern "tweet-*"] (Just (IndexSettings (ShardCount 1) (ReplicaCount 1))) [toJSON TweetMapping]>>>resp <- runBH' $ putTemplate idxTpl (TemplateName "tweet-tpl")
templateExists :: MonadBH m => TemplateName -> m Bool Source #
templateExists checks to see if a template exists.
>>>exists <- runBH' $ templateExists (TemplateName "tweet-tpl")
deleteTemplate :: MonadBH m => TemplateName -> m Acknowledged Source #
deleteTemplate is an HTTP DELETE and deletes a template.
>>>let idxTpl = IndexTemplate [IndexPattern "tweet-*"] (Just (IndexSettings (ShardCount 1) (ReplicaCount 1))) [toJSON TweetMapping]>>>_ <- runBH' $ putTemplate idxTpl (TemplateName "tweet-tpl")>>>resp <- runBH' $ deleteTemplate (TemplateName "tweet-tpl")
Mapping
putMapping :: forall r a m. (MonadBH m, FromJSON r, ToJSON a) => IndexName -> a -> m r Source #
putMapping is an HTTP PUT and has upsert semantics. Mappings are schemas
for documents in indexes.
>>>_ <- runBH' $ createIndex defaultIndexSettings testIndex>>>resp <- runBH' $ putMapping testIndex TweetMapping>>>print respResponse {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("transfer-encoding","chunked")], responseBody = "{\"acknowledged\":true}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
Documents
indexDocument :: forall doc m. (MonadBH m, ToJSON doc) => IndexName -> IndexDocumentSettings -> doc -> DocId -> m IndexedDocument Source #
indexDocument is the primary way to save a single document in
Elasticsearch. The document itself is simply something we can
convert into a JSON Value. The DocId will function as the
primary key for the document. You are encouraged to generate
your own id's and not rely on Elasticsearch's automatic id
generation. Read more about it here:
https://github.com/bitemyapp/bloodhound/issues/107
>>>resp <- runBH' $ indexDocument testIndex defaultIndexDocumentSettings exampleTweet (DocId "1")>>>print respResponse {responseStatus = Status {statusCode = 200, statusMessage = "OK"}, responseVersion = HTTP/1.1, responseHeaders = [("content-type","application/json; charset=UTF-8"),("content-encoding","gzip"),("content-length","152")], responseBody = "{\"_index\":\"twitter\",\"_type\":\"_doc\",\"_id\":\"1\",\"_version\":2,\"result\":\"updated\",\"_shards\":{\"total\":1,\"successful\":1,\"failed\":0},\"_seq_no\":1,\"_primary_term\":1}", responseCookieJar = CJ {expose = []}, responseClose' = ResponseClose}
updateDocument :: forall patch m. (MonadBH m, ToJSON patch) => IndexName -> IndexDocumentSettings -> patch -> DocId -> m IndexedDocument Source #
updateDocument provides a way to perform an partial update of a
an already indexed document.
getDocument :: FromJSON a => IndexName -> DocId -> BHRequest StatusIndependant (EsResult a) Source #
getDocument is a straight-forward way to fetch a single document from
Elasticsearch using a Server, IndexName, and a DocId.
The DocId is the primary key for your Elasticsearch document.
>>>yourDoc <- runBH' $ getDocument testIndex (DocId "1")
documentExists :: MonadBH m => IndexName -> DocId -> m Bool Source #
documentExists enables you to check if a document exists.
deleteDocument :: MonadBH m => IndexName -> DocId -> m IndexedDocument Source #
deleteDocument is the primary way to delete a single document.
>>>_ <- runBH' $ deleteDocument testIndex (DocId "1")
deleteByQuery :: MonadBH m => IndexName -> Query -> m DeletedDocuments Source #
deleteByQuery performs a deletion on every document that matches a query.
>>>let query = TermQuery (Term "user" "bitemyapp") Nothing>>>_ <- runBH' $ deleteDocument testIndex query
data IndexedDocument Source #
Constructors
| IndexedDocument | |
Fields
| |
Instances
| FromJSON IndexedDocument Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods parseJSON :: Value -> Parser IndexedDocument # parseJSONList :: Value -> Parser [IndexedDocument] # | |
| Show IndexedDocument Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods showsPrec :: Int -> IndexedDocument -> ShowS # show :: IndexedDocument -> String # showList :: [IndexedDocument] -> ShowS # | |
| Eq IndexedDocument Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods (==) :: IndexedDocument -> IndexedDocument -> Bool # (/=) :: IndexedDocument -> IndexedDocument -> Bool # | |
data DeletedDocuments Source #
Constructors
Instances
| FromJSON DeletedDocuments Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods parseJSON :: Value -> Parser DeletedDocuments # parseJSONList :: Value -> Parser [DeletedDocuments] # | |
| Show DeletedDocuments Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods showsPrec :: Int -> DeletedDocuments -> ShowS # show :: DeletedDocuments -> String # showList :: [DeletedDocuments] -> ShowS # | |
| Eq DeletedDocuments Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods (==) :: DeletedDocuments -> DeletedDocuments -> Bool # (/=) :: DeletedDocuments -> DeletedDocuments -> Bool # | |
data DeletedDocumentsRetries Source #
Constructors
| DeletedDocumentsRetries | |
Fields | |
Instances
| FromJSON DeletedDocumentsRetries Source # | |
| Show DeletedDocumentsRetries Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods showsPrec :: Int -> DeletedDocumentsRetries -> ShowS # show :: DeletedDocumentsRetries -> String # showList :: [DeletedDocumentsRetries] -> ShowS # | |
| Eq DeletedDocumentsRetries Source # | |
Defined in Database.Bloodhound.Internal.Versions.Common.Types.Nodes Methods (==) :: DeletedDocumentsRetries -> DeletedDocumentsRetries -> Bool # (/=) :: DeletedDocumentsRetries -> DeletedDocumentsRetries -> Bool # | |
Searching
searchByIndex :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (SearchResult a) Source #
searchByIndex, given a Search and an IndexName, will perform that search
within an index on an Elasticsearch server.
>>>let query = TermQuery (Term "user" "bitemyapp") Nothing>>>let search = mkSearch (Just query) Nothing>>>response <- runBH' $ searchByIndex testIndex search
searchByIndices :: forall a m. (MonadBH m, FromJSON a) => NonEmpty IndexName -> Search -> m (SearchResult a) Source #
searchByIndices is a variant of searchByIndex that executes a
Search over many indices. This is much faster than using
mapM to searchByIndex over a collection since it only
causes a single HTTP request to be emitted.
searchByIndexTemplate :: forall a m. (MonadBH m, FromJSON a) => IndexName -> SearchTemplate -> m (SearchResult a) Source #
searchByIndexTemplate, given a SearchTemplate and an IndexName, will perform that search
within an index on an Elasticsearch server.
>>>let query = SearchTemplateSource "{\"query\": { \"match\" : { \"{{my_field}}\" : \"{{my_value}}\" } }, \"size\" : \"{{my_size}}\"}">>>let search = mkSearchTemplate (Right query) Nothing>>>response <- runBH' $ searchByIndexTemplate testIndex search
searchByIndicesTemplate :: forall a m. (MonadBH m, FromJSON a) => NonEmpty IndexName -> SearchTemplate -> m (SearchResult a) Source #
searchByIndicesTemplate is a variant of searchByIndexTemplate that executes a
SearchTemplate over many indices. This is much faster than using
mapM to searchByIndexTemplate over a collection since it only
causes a single HTTP request to be emitted.
scanSearch :: (FromJSON a, MonadBH m) => IndexName -> Search -> m [Hit a] Source #
scanSearch uses the scroll API of elastic,
for a given IndexName. Note that this will
consume the entire search result set and will be doing O(n) list
appends so this may not be suitable for large result sets. In that
case, getInitialScroll and advanceScroll are good low level
tools. You should be able to hook them up trivially to conduit,
pipes, or your favorite streaming IO abstraction of choice. Note
that ordering on the search would destroy performance and thus is
ignored.
getInitialScroll :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (ParsedEsResponse (SearchResult a)) Source #
For a given search, request a scroll for efficient streaming of
search results. Note that the search is put into SearchTypeScan
mode and thus results will not be sorted. Combine this with
advanceScroll to efficiently stream through the full result set
getInitialSortedScroll :: forall a m. (MonadBH m, FromJSON a) => IndexName -> Search -> m (SearchResult a) Source #
For a given search, request a scroll for efficient streaming of
search results. Combine this with advanceScroll to efficiently
stream through the full result set. Note that this search respects
sorting and may be less efficient than getInitialScroll.
Arguments
| :: forall a m. (MonadBH m, FromJSON a) | |
| => ScrollId | |
| -> NominalDiffTime | How long should the snapshot of data be kept around? This timeout is updated every time |
| -> m (SearchResult a) |
Use the given scroll to fetch the next page of documents. If there are no further pages, 'SearchResult.searchHits.hits' will be '[]'.
refreshIndex :: MonadBH m => IndexName -> m ShardResult Source #
refreshIndex will force a refresh on an index. You must
do this if you want to read what you wrote.
>>>_ <- runBH' $ createIndex defaultIndexSettings testIndex>>>_ <- runBH' $ refreshIndex testIndex
mkSearch :: Maybe Query -> Maybe Filter -> Search Source #
mkSearch is a helper function for defaulting additional fields of a Search
to Nothing in case you only care about your Query and Filter. Use record update
syntax if you want to add things like aggregations or highlights while still using
this helper function.
>>>let query = TermQuery (Term "user" "bitemyapp") Nothing>>>mkSearch (Just query) NothingSearch {queryBody = Just (TermQuery (Term {termField = "user", termValue = "bitemyapp"}) Nothing), filterBody = Nothing, searchAfterKey = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 0, size = Size 10, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}
mkAggregateSearch :: Maybe Query -> Aggregations -> Search Source #
mkAggregateSearch is a helper function that defaults everything in a Search except for
the Query and the Aggregation.
>>>let terms = TermsAgg $ (mkTermsAggregation "user") { termCollectMode = Just BreadthFirst }>>>termsTermsAgg (TermsAggregation {term = Left "user", termInclude = Nothing, termExclude = Nothing, termOrder = Nothing, termMinDocCount = Nothing, termSize = Nothing, termShardSize = Nothing, termCollectMode = Just BreadthFirst, termExecutionHint = Nothing, termAggs = Nothing})>>>let myAggregation = mkAggregateSearch Nothing $ mkAggregations "users" terms
mkHighlightSearch :: Maybe Query -> Highlights -> Search Source #
mkHighlightSearch is a helper function that defaults everything in a Search except for
the Query and the Aggregation.
>>>let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")>>>let testHighlight = Highlights Nothing [FieldHighlight (FieldName "message") Nothing]>>>let search = mkHighlightSearch (Just query) testHighlight
mkSearchTemplate :: Either SearchTemplateId SearchTemplateSource -> TemplateQueryKeyValuePairs -> SearchTemplate Source #
mkSearchTemplate is a helper function for defaulting additional fields of a SearchTemplate
to Nothing. Use record update syntax if you want to add things.
bulk :: MonadBH m => Vector BulkOperation -> m BulkResponse Source #
bulk uses
Elasticsearch's bulk API
to perform bulk operations. The BulkOperation data type encodes the
index/update/delete/create operations. You pass a Vector of BulkOperations
and a Server to bulk in order to send those operations up to your Elasticsearch
server to be performed. I changed from [BulkOperation] to a Vector due to memory overhead.
>>>let stream = V.fromList [BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))]>>>_ <- runBH' $ bulk stream>>>_ <- runBH' $ refreshIndex testIndex
Arguments
| :: From | The result offset |
| -> Size | The number of results to return |
| -> Search | The current seach |
| -> Search | The paged search |
pageSearch is a helper function that takes a search and assigns the from
and size fields for the search. The from parameter defines the offset
from the first result you want to fetch. The size parameter allows you to
configure the maximum amount of hits to be returned.
>>>let query = QueryMatchQuery $ mkMatchQuery (FieldName "_all") (QueryString "haskell")>>>let search = mkSearch (Just query) Nothing>>>searchSearch {queryBody = Just (QueryMatchQuery (MatchQuery {matchQueryField = FieldName "_all", matchQueryQueryString = QueryString "haskell", matchQueryOperator = Or, matchQueryZeroTerms = ZeroTermsNone, matchQueryCutoffFrequency = Nothing, matchQueryMatchType = Nothing, matchQueryAnalyzer = Nothing, matchQueryMaxExpansions = Nothing, matchQueryLenient = Nothing, matchQueryBoost = Nothing})), filterBody = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 0, size = Size 10, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}>>>pageSearch (From 10) (Size 100) searchSearch {queryBody = Just (QueryMatchQuery (MatchQuery {matchQueryField = FieldName "_all", matchQueryQueryString = QueryString "haskell", matchQueryOperator = Or, matchQueryZeroTerms = ZeroTermsNone, matchQueryCutoffFrequency = Nothing, matchQueryMatchType = Nothing, matchQueryAnalyzer = Nothing, matchQueryMaxExpansions = Nothing, matchQueryLenient = Nothing, matchQueryBoost = Nothing})), filterBody = Nothing, sortBody = Nothing, aggBody = Nothing, highlight = Nothing, trackSortScores = False, from = From 10, size = Size 100, searchType = SearchTypeQueryThenFetch, fields = Nothing, source = Nothing}
mkShardCount :: Int -> Maybe ShardCount Source #
mkShardCount is a straight-forward smart constructor for ShardCount
which rejects Int values below 1 and above 1000.
>>>mkShardCount 10Just (ShardCount 10)
mkReplicaCount :: Int -> Maybe ReplicaCount Source #
mkReplicaCount is a straight-forward smart constructor for ReplicaCount
which rejects Int values below 0 and above 1000.
>>>mkReplicaCount 10Just (ReplicaCount 10)
Templates
storeSearchTemplate :: MonadBH m => SearchTemplateId -> SearchTemplateSource -> m Acknowledged Source #
storeSearchTemplate, saves a SearchTemplateSource to be used later.
getSearchTemplate :: MonadBH m => SearchTemplateId -> m GetTemplateScript Source #
getSearchTemplate, get info of an stored SearchTemplateSource.
deleteSearchTemplate :: MonadBH m => SearchTemplateId -> m Acknowledged Source #
Snapshot/Restore
Snapshot Repos
getSnapshotRepos :: MonadBH m => SnapshotRepoSelection -> m [GenericSnapshotRepo] Source #
getSnapshotRepos gets the definitions of a subset of the
defined snapshot repos.
Arguments
| :: (MonadBH m, SnapshotRepo repo) | |
| => SnapshotRepoUpdateSettings | Use |
| -> repo | |
| -> m Acknowledged |
Create or update a snapshot repo
verifySnapshotRepo :: MonadBH m => SnapshotRepoName -> m SnapshotVerification Source #
Verify if a snapshot repo is working. NOTE: this API did not make it into Elasticsearch until 1.4. If you use an older version, you will get an error here.
deleteSnapshotRepo :: MonadBH m => SnapshotRepoName -> m Acknowledged Source #
Snapshots
createSnapshot :: MonadBH m => SnapshotRepoName -> SnapshotName -> SnapshotCreateSettings -> m Acknowledged Source #
Create and start a snapshot
getSnapshots :: MonadBH m => SnapshotRepoName -> SnapshotSelection -> m [SnapshotInfo] Source #
Get info about known snapshots given a pattern and repo name.
deleteSnapshot :: MonadBH m => SnapshotRepoName -> SnapshotName -> m Acknowledged Source #
Delete a snapshot. Cancels if it is running.
Restoring Snapshots
Arguments
| :: MonadBH m | |
| => SnapshotRepoName | |
| -> SnapshotName | |
| -> SnapshotRestoreSettings | Start with |
| -> m Accepted |
Restore a snapshot to the cluster See https://www.elastic.co/guide/en/elasticsearch/reference/1.7/modules-snapshots.html#_restore for more details.
Reindex
reindex :: MonadBH m => ReindexRequest -> m ReindexResponse Source #
reindexAsync :: MonadBH m => ReindexRequest -> m TaskNodeId Source #
Task
getTask :: (MonadBH m, FromJSON a) => TaskNodeId -> m (TaskResponse a) Source #
Nodes
getNodesInfo :: MonadBH m => NodeSelection -> m NodesInfo Source #
getNodesStats :: MonadBH m => NodeSelection -> m NodesStats Source #
Request Utilities
encodeBulkOperations :: Vector BulkOperation -> ByteString Source #
encodeBulkOperations is a convenience function for dumping a vector of BulkOperation
into an ByteString
>>>let bulkOps = V.fromList [BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))]>>>encodeBulkOperations bulkOps"\n{\"index\":{\"_id\":\"2\",\"_index\":\"twitter\"}}\n{\"name\":\"blah\"}\n"
encodeBulkOperation :: BulkOperation -> ByteString Source #
encodeBulkOperation is a convenience function for dumping a single BulkOperation
into an ByteString
>>>let bulkOp = BulkIndex testIndex (DocId "2") (toJSON (BulkTest "blah"))>>>encodeBulkOperation bulkOp"{\"index\":{\"_id\":\"2\",\"_index\":\"twitter\"}}\n{\"name\":\"blah\"}"
Authentication
basicAuthHook :: Monad m => EsUsername -> EsPassword -> Request -> m Request Source #
This is a hook that can be set via the bhRequestHook function
that will authenticate all requests using an HTTP Basic
Authentication header. Note that it is *strongly* recommended that
this option only be used over an SSL connection.
> (mkBHEnv myServer myManager) { bhRequestHook = basicAuthHook (EsUsername "myuser") (EsPassword "mypass") }Count
countByIndex :: MonadBH m => IndexName -> CountQuery -> m CountResponse Source #
Generic
newtype Acknowledged Source #
Constructors
| Acknowledged | |
Fields | |
Instances
| FromJSON Acknowledged Source # | |
| Show Acknowledged Source # | |
Defined in Database.Bloodhound.Internal.Client.BHRequest Methods showsPrec :: Int -> Acknowledged -> ShowS # show :: Acknowledged -> String # showList :: [Acknowledged] -> ShowS # | |
| Eq Acknowledged Source # | |
Constructors
| Accepted | |
Fields
| |
data IgnoredBody Source #
Constructors
| IgnoredBody |
Instances
| FromJSON IgnoredBody Source # | |
| Show IgnoredBody Source # | |
Defined in Database.Bloodhound.Internal.Client.BHRequest Methods showsPrec :: Int -> IgnoredBody -> ShowS # show :: IgnoredBody -> String # showList :: [IgnoredBody] -> ShowS # | |
| Eq IgnoredBody Source # | |