package face
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"github.com/Azure/go-autorest/tracing"
"io"
"net/http"
)
// Client is the an API for face detection, verification, and identification.
type Client struct {
BaseClient
}
// NewClient creates an instance of the Client client.
func NewClient(endpoint string) Client {
return Client{New(endpoint)}
}
// DetectWithStream detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and
// attributes.
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of
// the face feature and will be used in [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s)
// will expire and be deleted 24 hours after the original detection call.
// * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile,
// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results
// returned for specific attributes may not be highly accurate.
// * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.
// * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.
// * For optimal results when querying [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true),
// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).
// * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with
// dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
// * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to
// [How to specify a detection
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'detection_01': | The default detection model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal
// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image
// orientation, the faces in such cases may not be detected. |
// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry
// faces. |
//
// * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are
// needed, please specify the recognition model with 'recognitionModel' parameter. The default value for
// 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this
// parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More
// details, please refer to [How to specify a recognition
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'recognition_01': | The default recognition model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created
// before 2019 March are bonded with this recognition model. |
// | 'recognition_02': | Recognition model released in 2019 March. |
// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall
// accuracy is improved compared with 'recognition_01' and 'recognition_02'. |
// Parameters:
// imageParameter - an image stream.
// returnFaceID - a value indicating whether the operation should return faceIds of detected faces.
// returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected
// faces.
// returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated
// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose,
// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational
// and time cost.
// recognitionModel - name of recognition model. Recognition model is used when the face features are extracted
// and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be
// provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The
// default value is 'recognition_01', if latest model needed, please explicitly specify the model you need.
// returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in
// response.
// detectionModel - name of detection model. Detection model is used to detect faces in the submitted image. A
// detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or
// (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please
// explicitly specify it.
func (client Client) DetectWithStream(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithStream")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
req, err := client.DetectWithStreamPreparer(ctx, imageParameter, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", nil, "Failure preparing request")
return
}
resp, err := client.DetectWithStreamSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", resp, "Failure sending request")
return
}
result, err = client.DetectWithStreamResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithStream", resp, "Failure responding to request")
return
}
return
}
// DetectWithStreamPreparer prepares the DetectWithStream request.
func (client Client) DetectWithStreamPreparer(ctx context.Context, imageParameter io.ReadCloser, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
queryParameters := map[string]interface{}{}
if returnFaceID != nil {
queryParameters["returnFaceId"] = autorest.Encode("query", *returnFaceID)
} else {
queryParameters["returnFaceId"] = autorest.Encode("query", true)
}
if returnFaceLandmarks != nil {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", *returnFaceLandmarks)
} else {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", false)
}
if returnFaceAttributes != nil && len(returnFaceAttributes) > 0 {
queryParameters["returnFaceAttributes"] = autorest.Encode("query", returnFaceAttributes, ",")
}
if len(string(recognitionModel)) > 0 {
queryParameters["recognitionModel"] = autorest.Encode("query", recognitionModel)
} else {
queryParameters["recognitionModel"] = autorest.Encode("query", "recognition_01")
}
if returnRecognitionModel != nil {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", *returnRecognitionModel)
} else {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", false)
}
if len(string(detectionModel)) > 0 {
queryParameters["detectionModel"] = autorest.Encode("query", detectionModel)
} else {
queryParameters["detectionModel"] = autorest.Encode("query", "detection_01")
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/octet-stream"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/detect"),
autorest.WithFile(imageParameter),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DetectWithStreamSender sends the DetectWithStream request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DetectWithStreamSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DetectWithStreamResponder handles the response to the DetectWithStream request. The method always
// closes the http.Response Body.
func (client Client) DetectWithStreamResponder(resp *http.Response) (result ListDetectedFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// DetectWithURL detect human faces in an image, return face rectangles, and optionally with faceIds, landmarks, and
// attributes.
// * No image will be stored. Only the extracted face feature will be stored on server. The faceId is an identifier of
// the face feature and will be used in [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar). The stored face feature(s)
// will expire and be deleted 24 hours after the original detection call.
// * Optional parameters include faceId, landmarks, and attributes. Attributes include age, gender, headPose, smile,
// facialHair, glasses, emotion, hair, makeup, occlusion, accessories, blur, exposure and noise. Some of the results
// returned for specific attributes may not be highly accurate.
// * JPEG, PNG, GIF (the first frame), and BMP format are supported. The allowed image file size is from 1KB to 6MB.
// * Up to 100 faces can be returned for an image. Faces are ranked by face rectangle size from large to small.
// * For optimal results when querying [Face -
// Identify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/identify), [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface), and [Face - Find
// Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) ('returnFaceId' is true),
// please use faces that are: frontal, clear, and with a minimum size of 200x200 pixels (100 pixels between eyes).
// * The minimum detectable face size is 36x36 pixels in an image no larger than 1920x1080 pixels. Images with
// dimensions higher than 1920x1080 pixels will need a proportionally larger minimum face size.
// * Different 'detectionModel' values can be provided. To use and compare different detection models, please refer to
// [How to specify a detection
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-detection-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'detection_01': | The default detection model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). Recommend for near frontal
// face detection. For scenarios with exceptionally large angle (head-pose) faces, occluded faces or wrong image
// orientation, the faces in such cases may not be detected. |
// | 'detection_02': | Detection model released in 2019 May with improved accuracy especially on small, side and blurry
// faces. |
//
// * Different 'recognitionModel' values are provided. If follow-up operations like Verify, Identify, Find Similar are
// needed, please specify the recognition model with 'recognitionModel' parameter. The default value for
// 'recognitionModel' is 'recognition_01', if latest model needed, please explicitly specify the model you need in this
// parameter. Once specified, the detected faceIds will be associated with the specified recognition model. More
// details, please refer to [How to specify a recognition
// model](https://docs.microsoft.com/azure/cognitive-services/face/face-api-how-to-topics/specify-recognition-model)
// | Model | Recommended use-case(s) |
// | ---------- | -------- |
// | 'recognition_01': | The default recognition model for [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl). All those faceIds created
// before 2019 March are bonded with this recognition model. |
// | 'recognition_02': | Recognition model released in 2019 March. |
// | 'recognition_03': | Recognition model released in 2020 May. 'recognition_03' is recommended since its overall
// accuracy is improved compared with 'recognition_01' and 'recognition_02'. |
// Parameters:
// imageURL - a JSON document with a URL pointing to the image that is to be analyzed.
// returnFaceID - a value indicating whether the operation should return faceIds of detected faces.
// returnFaceLandmarks - a value indicating whether the operation should return landmarks of the detected
// faces.
// returnFaceAttributes - analyze and return the one or more specified face attributes in the comma-separated
// string like "returnFaceAttributes=age,gender". Supported face attributes include age, gender, headPose,
// smile, facialHair, glasses and emotion. Note that each face attribute analysis has additional computational
// and time cost.
// recognitionModel - name of recognition model. Recognition model is used when the face features are extracted
// and associated with detected faceIds, (Large)FaceList or (Large)PersonGroup. A recognition model name can be
// provided when performing Face - Detect or (Large)FaceList - Create or (Large)PersonGroup - Create. The
// default value is 'recognition_01', if latest model needed, please explicitly specify the model you need.
// returnRecognitionModel - a value indicating whether the operation should return 'recognitionModel' in
// response.
// detectionModel - name of detection model. Detection model is used to detect faces in the submitted image. A
// detection model name can be provided when performing Face - Detect or (Large)FaceList - Add Face or
// (Large)PersonGroup - Add Face. The default value is 'detection_01', if another model is needed, please
// explicitly specify it.
func (client Client) DetectWithURL(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (result ListDetectedFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.DetectWithURL")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: imageURL,
Constraints: []validation.Constraint{{Target: "imageURL.URL", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "DetectWithURL", err.Error())
}
req, err := client.DetectWithURLPreparer(ctx, imageURL, returnFaceID, returnFaceLandmarks, returnFaceAttributes, recognitionModel, returnRecognitionModel, detectionModel)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", nil, "Failure preparing request")
return
}
resp, err := client.DetectWithURLSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", resp, "Failure sending request")
return
}
result, err = client.DetectWithURLResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "DetectWithURL", resp, "Failure responding to request")
return
}
return
}
// DetectWithURLPreparer prepares the DetectWithURL request.
func (client Client) DetectWithURLPreparer(ctx context.Context, imageURL ImageURL, returnFaceID *bool, returnFaceLandmarks *bool, returnFaceAttributes []AttributeType, recognitionModel RecognitionModel, returnRecognitionModel *bool, detectionModel DetectionModel) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
queryParameters := map[string]interface{}{}
if returnFaceID != nil {
queryParameters["returnFaceId"] = autorest.Encode("query", *returnFaceID)
} else {
queryParameters["returnFaceId"] = autorest.Encode("query", true)
}
if returnFaceLandmarks != nil {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", *returnFaceLandmarks)
} else {
queryParameters["returnFaceLandmarks"] = autorest.Encode("query", false)
}
if returnFaceAttributes != nil && len(returnFaceAttributes) > 0 {
queryParameters["returnFaceAttributes"] = autorest.Encode("query", returnFaceAttributes, ",")
}
if len(string(recognitionModel)) > 0 {
queryParameters["recognitionModel"] = autorest.Encode("query", recognitionModel)
} else {
queryParameters["recognitionModel"] = autorest.Encode("query", "recognition_01")
}
if returnRecognitionModel != nil {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", *returnRecognitionModel)
} else {
queryParameters["returnRecognitionModel"] = autorest.Encode("query", false)
}
if len(string(detectionModel)) > 0 {
queryParameters["detectionModel"] = autorest.Encode("query", detectionModel)
} else {
queryParameters["detectionModel"] = autorest.Encode("query", "detection_01")
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/detect"),
autorest.WithJSON(imageURL),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// DetectWithURLSender sends the DetectWithURL request. The method will close the
// http.Response Body if it receives an error.
func (client Client) DetectWithURLSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// DetectWithURLResponder handles the response to the DetectWithURL request. The method always
// closes the http.Response Body.
func (client Client) DetectWithURLResponder(resp *http.Response) (result ListDetectedFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// FindSimilar given query face's faceId, to search the similar-looking faces from a faceId array, a face list or a
// large face list. faceId array contains the faces created by [Face -
// Detect](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/detectwithurl), which will expire 24 hours
// after creation. A "faceListId" is created by [FaceList -
// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/facelist/create) containing persistedFaceIds that
// will not expire. And a "largeFaceListId" is created by [LargeFaceList -
// Create](https://docs.microsoft.com/rest/api/cognitiveservices/face/largefacelist/create) containing persistedFaceIds
// that will also not expire. Depending on the input the returned similar faces list contains faceIds or
// persistedFaceIds ranked by similarity.
//
Find similar has two working modes, "matchPerson" and "matchFace". "matchPerson" is the default mode that it
// tries to find faces of the same person as possible by using internal same-person thresholds. It is useful to find a
// known person's other photos. Note that an empty list will be returned if no faces pass the internal thresholds.
// "matchFace" mode ignores same-person thresholds and returns ranked similar faces anyway, even the similarity is low.
// It can be used in the cases like searching celebrity-looking faces.
//
The 'recognitionModel' associated with the query face's faceId should be the same as the 'recognitionModel'
// used by the target faceId array, face list or large face list.
// Parameters:
// body - request body for Find Similar.
func (client Client) FindSimilar(ctx context.Context, body FindSimilarRequest) (result ListSimilarFace, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.FindSimilar")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.FaceListID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.FaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.FaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargeFaceListID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargeFaceListID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargeFaceListID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.FaceIds", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 1000, Chain: nil}}},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(1000), Chain: nil},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("face.Client", "FindSimilar", err.Error())
}
req, err := client.FindSimilarPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", nil, "Failure preparing request")
return
}
resp, err := client.FindSimilarSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", resp, "Failure sending request")
return
}
result, err = client.FindSimilarResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "FindSimilar", resp, "Failure responding to request")
return
}
return
}
// FindSimilarPreparer prepares the FindSimilar request.
func (client Client) FindSimilarPreparer(ctx context.Context, body FindSimilarRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/findsimilars"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// FindSimilarSender sends the FindSimilar request. The method will close the
// http.Response Body if it receives an error.
func (client Client) FindSimilarSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// FindSimilarResponder handles the response to the FindSimilar request. The method always
// closes the http.Response Body.
func (client Client) FindSimilarResponder(resp *http.Response) (result ListSimilarFace, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Group divide candidate faces into groups based on face similarity.
// * The output is one or more disjointed face groups and a messyGroup. A face group contains faces that have similar
// looking, often of the same person. Face groups are ranked by group size, i.e. number of faces. Notice that faces
// belonging to a same person might be split into several groups in the result.
// * MessyGroup is a special face group containing faces that cannot find any similar counterpart face from original
// faces. The messyGroup will not appear in the result if all faces found their counterparts.
// * Group API needs at least 2 candidate faces and 1000 at most. We suggest to try [Face -
// Verify](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/verifyfacetoface) when you only have 2
// candidate faces.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same.
// Parameters:
// body - request body for grouping.
func (client Client) Group(ctx context.Context, body GroupRequest) (result GroupResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Group")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceIds", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 1000, Chain: nil}}}}}}); err != nil {
return result, validation.NewError("face.Client", "Group", err.Error())
}
req, err := client.GroupPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Group", nil, "Failure preparing request")
return
}
resp, err := client.GroupSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "Group", resp, "Failure sending request")
return
}
result, err = client.GroupResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Group", resp, "Failure responding to request")
return
}
return
}
// GroupPreparer prepares the Group request.
func (client Client) GroupPreparer(ctx context.Context, body GroupRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/group"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// GroupSender sends the Group request. The method will close the
// http.Response Body if it receives an error.
func (client Client) GroupSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// GroupResponder handles the response to the Group request. The method always
// closes the http.Response Body.
func (client Client) GroupResponder(resp *http.Response) (result GroupResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// Identify 1-to-many identification to find the closest matches of the specific query person face from a person group
// or large person group.
//
For each face in the faceIds array, Face Identify will compute similarities between the query face and all the
// faces in the person group (given by personGroupId) or large person group (given by largePersonGroupId), and return
// candidate person(s) for that face ranked by similarity confidence. The person group/large person group should be
// trained to make it ready for identification. See more in [PersonGroup -
// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/persongroup/train) and [LargePersonGroup -
// Train](https://docs.microsoft.com/rest/api/cognitiveservices/face/largepersongroup/train).
//
//
// Remarks:
// * The algorithm allows more than one face to be identified independently at the same request, but no more than 10
// faces.
// * Each person in the person group/large person group could have more than one face, but no more than 248 faces.
// * Higher face image quality means better identification precision. Please consider high-quality faces: frontal,
// clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.
// * Number of candidates returned is restricted by maxNumOfCandidatesReturned and confidenceThreshold. If no person is
// identified, the returned candidates will be an empty array.
// * Try [Face - Find Similar](https://docs.microsoft.com/rest/api/cognitiveservices/face/face/findsimilar) when you
// need to find similar faces from a face list/large face list instead of a person group/large person group.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used
// by the target person group or large person group.
// Parameters:
// body - request body for identify operation.
func (client Client) Identify(ctx context.Context, body IdentifyRequest) (result ListIdentifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.Identify")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceIds", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "body.FaceIds", Name: validation.MaxItems, Rule: 10, Chain: nil}}},
{Target: "body.PersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.PersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargePersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMaximum, Rule: int64(5), Chain: nil},
{Target: "body.MaxNumOfCandidatesReturned", Name: validation.InclusiveMinimum, Rule: int64(1), Chain: nil},
}}}}}); err != nil {
return result, validation.NewError("face.Client", "Identify", err.Error())
}
req, err := client.IdentifyPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Identify", nil, "Failure preparing request")
return
}
resp, err := client.IdentifySender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "Identify", resp, "Failure sending request")
return
}
result, err = client.IdentifyResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "Identify", resp, "Failure responding to request")
return
}
return
}
// IdentifyPreparer prepares the Identify request.
func (client Client) IdentifyPreparer(ctx context.Context, body IdentifyRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/identify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// IdentifySender sends the Identify request. The method will close the
// http.Response Body if it receives an error.
func (client Client) IdentifySender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// IdentifyResponder handles the response to the Identify request. The method always
// closes the http.Response Body.
func (client Client) IdentifyResponder(resp *http.Response) (result ListIdentifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result.Value),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// VerifyFaceToFace verify whether two faces belong to a same person or whether one face belongs to a person.
//
// Remarks:
// * Higher face image quality means better identification precision. Please consider high-quality faces: frontal,
// clear, and face size is 200x200 pixels (100 pixels between eyes) or bigger.
// * For the scenarios that are sensitive to accuracy please make your own judgment.
// * The 'recognitionModel' associated with the query faces' faceIds should be the same as the 'recognitionModel' used
// by the target face, person group or large person group.
// Parameters:
// body - request body for face to face verification.
func (client Client) VerifyFaceToFace(ctx context.Context, body VerifyFaceToFaceRequest) (result VerifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.VerifyFaceToFace")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID1", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.FaceID2", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "VerifyFaceToFace", err.Error())
}
req, err := client.VerifyFaceToFacePreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", nil, "Failure preparing request")
return
}
resp, err := client.VerifyFaceToFaceSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", resp, "Failure sending request")
return
}
result, err = client.VerifyFaceToFaceResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToFace", resp, "Failure responding to request")
return
}
return
}
// VerifyFaceToFacePreparer prepares the VerifyFaceToFace request.
func (client Client) VerifyFaceToFacePreparer(ctx context.Context, body VerifyFaceToFaceRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/verify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// VerifyFaceToFaceSender sends the VerifyFaceToFace request. The method will close the
// http.Response Body if it receives an error.
func (client Client) VerifyFaceToFaceSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// VerifyFaceToFaceResponder handles the response to the VerifyFaceToFace request. The method always
// closes the http.Response Body.
func (client Client) VerifyFaceToFaceResponder(resp *http.Response) (result VerifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// VerifyFaceToPerson verify whether two faces belong to a same person. Compares a face Id with a Person Id
// Parameters:
// body - request body for face to person verification.
func (client Client) VerifyFaceToPerson(ctx context.Context, body VerifyFaceToPersonRequest) (result VerifyResult, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/Client.VerifyFaceToPerson")
defer func() {
sc := -1
if result.Response.Response != nil {
sc = result.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.FaceID", Name: validation.Null, Rule: true, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.PersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.PersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.LargePersonGroupID", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "body.LargePersonGroupID", Name: validation.MaxLength, Rule: 64, Chain: nil},
{Target: "body.LargePersonGroupID", Name: validation.Pattern, Rule: `^[a-z0-9-_]+$`, Chain: nil},
}},
{Target: "body.PersonID", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
return result, validation.NewError("face.Client", "VerifyFaceToPerson", err.Error())
}
req, err := client.VerifyFaceToPersonPreparer(ctx, body)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", nil, "Failure preparing request")
return
}
resp, err := client.VerifyFaceToPersonSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", resp, "Failure sending request")
return
}
result, err = client.VerifyFaceToPersonResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "face.Client", "VerifyFaceToPerson", resp, "Failure responding to request")
return
}
return
}
// VerifyFaceToPersonPreparer prepares the VerifyFaceToPerson request.
func (client Client) VerifyFaceToPersonPreparer(ctx context.Context, body VerifyFaceToPersonRequest) (*http.Request, error) {
urlParameters := map[string]interface{}{
"Endpoint": client.Endpoint,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithCustomBaseURL("{Endpoint}/face/v1.0", urlParameters),
autorest.WithPath("/verify"),
autorest.WithJSON(body))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// VerifyFaceToPersonSender sends the VerifyFaceToPerson request. The method will close the
// http.Response Body if it receives an error.
func (client Client) VerifyFaceToPersonSender(req *http.Request) (*http.Response, error) {
return client.Send(req, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
}
// VerifyFaceToPersonResponder handles the response to the VerifyFaceToPerson request. The method always
// closes the http.Response Body.
func (client Client) VerifyFaceToPersonResponder(resp *http.Response) (result VerifyResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}