Skip to content

Instantly share code, notes, and snippets.

@LucioD3v
Last active September 24, 2025 20:43
Show Gist options
  • Select an option

  • Save LucioD3v/dae03b3ef42f19125cfa073097a2f8c3 to your computer and use it in GitHub Desktop.

Select an option

Save LucioD3v/dae03b3ef42f19125cfa073097a2f8c3 to your computer and use it in GitHub Desktop.
Código para integrar Amazon Nova Lite en una Alexa Skill
import json
import boto3
bedrock = boto3.client('bedrock-runtime')
def lambda_handler(event, context):
try:
intent_name = event['request']['intent']['name']
if intent_name == 'GenerarTextoNova':
prompt = event['request']['intent']['slots']['consulta']['value']
response = invoke_nova_micro(prompt)
speech_text = response
else:
speech_text = "Lo siento, no entiendo esa solicitud."
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': speech_text
},
'shouldEndSession': True
}
}
except Exception as e:
print(f"Error: {e}")
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': "Lo siento, hubo un problema al procesar tu solicitud."
},
'shouldEndSession': True
}
}
def invoke_nova_micro(prompt):
model_id = 'amazon.titan-text-lite-v1' # Reemplaza con el ARN exacto de Nova Micro si es diferente
accept = 'application/json'
content_type = 'application/json'
body = json.dumps({
"inputText": prompt,
"textGenerationConfig": {
"maxTokens": 200, # Ajusta según tus necesidades
"temperature": 0.7, # Ajusta para controlar la aleatoriedad
"topP": 0.9
}
})
response = bedrock.invoke_model(
body=body,
modelId=model_id,
accept=accept,
contentType=content_type
)
response_body = json.loads(response.get('body').read())
generated_text = response_body.get('results')[0].get('outputText')
return generated_text
import json
import boto3
bedrock = boto3.client('bedrock-runtime', region_name="us-east-1")
def lambda_handler(event, context):
try:
intent_name = event['request']['intent']['name']
if intent_name == 'GenerarTextoNova':
prompt = event['request']['intent']['slots']['consulta']['value']
response = invoke_titan_text(prompt)
speech_text = response
else:
speech_text = "Lo siento, no entiendo esa solicitud."
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': speech_text
},
'shouldEndSession': True
}
}
except Exception as e:
print(f"Error: {e}")
return {
'version': '1.0',
'response': {
'outputSpeech': {
'type': 'PlainText',
'text': "Lo siento, hubo un problema al procesar tu solicitud."
},
'shouldEndSession': True
}
}
def invoke_titan_text(prompt):
model_id = 'amazon.titan-text-lite-v1'
accept = 'application/json'
content_type = 'application/json'
body = json.dumps({
"inputText": prompt,
"textGenerationConfig": {
"maxTokenCount": 200, # ✅ nombre correcto
"temperature": 0.7,
"topP": 0.9
}
})
response = bedrock.invoke_model(
body=body,
modelId=model_id,
accept=accept,
contentType=content_type
)
response_body = json.loads(response['body'].read())
generated_text = "Nova Lite responde: " + response_body['results'][0]['outputText']
return generated_text
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment