POST
/
evaluation
/
metrics
/
alignment
# To use the Python SDK, install the plugin:
# pip install --upgrade pinecone pinecone-plugin-assistant
# pip install requests

import requests
from pinecone_plugins.assistant.models.chat import Message

qa_data = {
        "question": "What are the capital cities of France, England and Spain?",
        "ground_truth_answer": "Paris is the capital city of France, London of England and Madrid of Spain"
    }

for qa in qa_data:
    chat_context = [Message(role="user", content=qa["question"])]
    response = assistant.chat(messages=chat_context)
    
    answer = response.message.content # The answer from the Assistant - see https://docs.pinecone.io/guides/assistant/chat-with-assistant
    
    eval_data = {
        "question": qa["question"],
        "answer": answer,
        "ground_truth_answer": qa["ground_truth_answer"]
    }

    response = requests.post(
        "https://prod-1-data.ke.pinecone.io/assistant/evaluation/metrics/alignment",
        headers={
            "Api-Key": os.environ["PINECONE_API_KEY"],
            "Content-Type": "application/json"
        },
        json=eval_data
    )

print(response.text)
{
  "metrics": {
    "correctness": 1.0,
    "completeness": 1.0,
    "alignment": 1.0
  },
  "reasoning": {
    "evaluated_facts": [
      {
        "fact": {
          "content": "<string>"
        },
        "entailment": "entailed"
      }
    ]
  },
  "usage": {
    "prompt_tokens": 123,
    "completion_tokens": 123,
    "total_tokens": 123
  }
}
# To use the Python SDK, install the plugin:
# pip install --upgrade pinecone pinecone-plugin-assistant
# pip install requests

import requests
from pinecone_plugins.assistant.models.chat import Message

qa_data = {
        "question": "What are the capital cities of France, England and Spain?",
        "ground_truth_answer": "Paris is the capital city of France, London of England and Madrid of Spain"
    }

for qa in qa_data:
    chat_context = [Message(role="user", content=qa["question"])]
    response = assistant.chat(messages=chat_context)
    
    answer = response.message.content # The answer from the Assistant - see https://docs.pinecone.io/guides/assistant/chat-with-assistant
    
    eval_data = {
        "question": qa["question"],
        "answer": answer,
        "ground_truth_answer": qa["ground_truth_answer"]
    }

    response = requests.post(
        "https://prod-1-data.ke.pinecone.io/assistant/evaluation/metrics/alignment",
        headers={
            "Api-Key": os.environ["PINECONE_API_KEY"],
            "Content-Type": "application/json"
        },
        json=eval_data
    )

print(response.text)
{
  "metrics": {
    "correctness": 1.0,
    "completeness": 1.0,
    "alignment": 1.0
  },
  "reasoning": {
    "evaluated_facts": [
      {
        "fact": {
          "content": "<string>"
        },
        "entailment": "entailed"
      }
    ]
  },
  "usage": {
    "prompt_tokens": 123,
    "completion_tokens": 123,
    "total_tokens": 123
  }
}

Authorizations

Api-Key
string
header
required

An API Key is required to call Pinecone APIs. Get yours from the console.

Body

application/json
The request body for the alignment evaluation.

The request for the alignment evaluation.

question
string
required

The question for which the answer was generated.

answer
string
required

The generated answer.

ground_truth_answer
string
required

The ground truth answer to the question.

Response

200
application/json
The evaluation metrics and reasoning for the generated answer.

The response for the alignment evaluation.

metrics
object
required

The metrics returned for the alignment evaluation.

reasoning
object
required

The reasoning behind the alignment evaluation.

usage
object
required

Token counts for the input prompt and completion.