diff --git a/tuts/090-amazon-comprehend-gs/README.md b/tuts/090-amazon-comprehend-gs/README.md
new file mode 100644
index 0000000..6e7a0e4
--- /dev/null
+++ b/tuts/090-amazon-comprehend-gs/README.md
@@ -0,0 +1,59 @@
+# Comprehend: Detect sentiment, entities, and key phrases
+
+Analyze text using Amazon Comprehend's real-time APIs for language detection, sentiment analysis, entity recognition, key phrase extraction, and PII detection.
+
+## Source
+
+https://docs.aws.amazon.com/comprehend/latest/dg/get-started-api.html
+
+## Use case
+
+- ID: comprehend/getting-started
+- Phase: create
+- Complexity: beginner
+- Core actions: comprehend:DetectSentiment, comprehend:DetectEntities, comprehend:DetectKeyPhrases
+
+## What it does
+
+1. Detects the dominant language of sample text
+2. Analyzes sentiment (positive, negative, neutral, mixed)
+3. Extracts named entities (people, organizations, dates)
+4. Extracts key phrases
+5. Detects PII entities (names, emails, phone numbers)
+
+## Running
+
+```bash
+bash amazon-comprehend-gs.sh
+```
+
+## Resources created
+
+None. Comprehend is a stateless API.
+
+## Estimated time
+
+- Run: ~5 seconds
+
+## Cost
+
+Comprehend pricing is per unit (100 characters). This tutorial analyzes ~500 characters, costing less than $0.01.
+
+## Related docs
+
+- [Real-time analysis with Amazon Comprehend](https://docs.aws.amazon.com/comprehend/latest/dg/get-started-api.html)
+- [Sentiment analysis](https://docs.aws.amazon.com/comprehend/latest/dg/how-sentiment.html)
+- [Entity recognition](https://docs.aws.amazon.com/comprehend/latest/dg/how-entities.html)
+- [PII detection](https://docs.aws.amazon.com/comprehend/latest/dg/how-pii.html)
+
+---
+
+## Appendix: Generation details
+
+| Field | Value |
+|-------|-------|
+| Generation date | 2026-04-14 |
+| Source script | New, 57 lines |
+| Script test result | EXIT 0, 5s, 5 steps, stateless API |
+| Issues encountered | None — stateless API, no resource management needed |
+| Iterations | v1 (direct to publish) |
diff --git a/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.md b/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.md
new file mode 100644
index 0000000..20763e5
--- /dev/null
+++ b/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.md
@@ -0,0 +1,57 @@
+# Detect sentiment, entities, and key phrases with Amazon Comprehend
+
+This tutorial shows you how to use the Amazon Comprehend real-time analysis APIs to detect the dominant language, sentiment, entities, key phrases, and PII in text.
+
+## Prerequisites
+
+- AWS CLI configured with credentials and a default region
+- Permissions to call Amazon Comprehend APIs
+
+## Step 1: Detect the dominant language
+
+```bash
+aws comprehend detect-dominant-language --text "Your text here" \
+ --query 'Languages[0].{Language:LanguageCode,Confidence:Score}' --output table
+```
+
+## Step 2: Detect sentiment
+
+```bash
+aws comprehend detect-sentiment --text "Your text here" --language-code en \
+ --query '{Sentiment:Sentiment,Positive:SentimentScore.Positive,Negative:SentimentScore.Negative}' --output table
+```
+
+## Step 3: Detect entities
+
+Identifies people, places, organizations, dates, and other entity types.
+
+```bash
+aws comprehend detect-entities --text "Your text here" --language-code en \
+ --query 'Entities[].{Text:Text,Type:Type,Score:Score}' --output table
+```
+
+## Step 4: Detect key phrases
+
+```bash
+aws comprehend detect-key-phrases --text "Your text here" --language-code en \
+ --query 'KeyPhrases[].{Text:Text,Score:Score}' --output table
+```
+
+## Step 5: Detect PII entities
+
+Identifies personally identifiable information such as names, email addresses, phone numbers, and account numbers.
+
+```bash
+aws comprehend detect-pii-entities --text "Contact Jane at jane@example.com" --language-code en \
+ --query 'Entities[].{Type:Type,Score:Score}' --output table
+```
+
+## Cleanup
+
+No cleanup needed. Comprehend is a stateless API — no resources are created.
+
+The script automates all steps:
+
+```bash
+bash amazon-comprehend-gs.sh
+```
diff --git a/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.sh b/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.sh
new file mode 100644
index 0000000..6e61ee0
--- /dev/null
+++ b/tuts/090-amazon-comprehend-gs/amazon-comprehend-gs.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+# Tutorial: Detect sentiment, entities, and key phrases with Amazon Comprehend
+# Source: https://docs.aws.amazon.com/comprehend/latest/dg/get-started-api.html
+
+WORK_DIR=$(mktemp -d)
+LOG_FILE="$WORK_DIR/comprehend-$(date +%Y%m%d-%H%M%S).log"
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null)}}
+if [ -z "$REGION" ]; then
+ echo "ERROR: No AWS region configured. Set one with: export AWS_DEFAULT_REGION=us-east-1"
+ exit 1
+fi
+export AWS_DEFAULT_REGION="$REGION"
+echo "Region: $REGION"
+
+TEXT="Amazon Comprehend is a natural language processing service that uses machine learning to find insights and relationships in text. The service can identify the language of the text, extract key phrases, places, people, brands, or events, and understand how positive or negative the text is."
+
+echo ""
+echo "Sample text:"
+echo " $TEXT"
+echo ""
+
+# Step 1: Detect dominant language
+echo "Step 1: Detecting dominant language"
+aws comprehend detect-dominant-language --text "$TEXT" \
+ --query 'Languages[0].{Language:LanguageCode,Confidence:Score}' --output table
+
+# Step 2: Detect sentiment
+echo ""
+echo "Step 2: Detecting sentiment"
+aws comprehend detect-sentiment --text "$TEXT" --language-code en \
+ --query '{Sentiment:Sentiment,Positive:SentimentScore.Positive,Negative:SentimentScore.Negative,Neutral:SentimentScore.Neutral}' --output table
+
+# Step 3: Detect entities
+echo ""
+echo "Step 3: Detecting entities"
+aws comprehend detect-entities --text "$TEXT" --language-code en \
+ --query 'Entities[].{Text:Text,Type:Type,Score:Score}' --output table
+
+# Step 4: Detect key phrases
+echo ""
+echo "Step 4: Detecting key phrases"
+aws comprehend detect-key-phrases --text "$TEXT" --language-code en \
+ --query 'KeyPhrases[].{Text:Text,Score:Score}' --output table
+
+# Step 5: Detect PII entities
+echo ""
+echo "Step 5: Detecting PII entities"
+PII_TEXT="Please contact Jane Smith at jane.smith@example.com or call 555-0123. Her account number is 1234567890."
+echo " PII sample: $PII_TEXT"
+aws comprehend detect-pii-entities --text "$PII_TEXT" --language-code en \
+ --query 'Entities[].{Type:Type,Score:Score}' --output table
+
+echo ""
+echo "Tutorial complete. No resources were created — Comprehend is a stateless API."
+rm -rf "$WORK_DIR"
diff --git a/tuts/091-amazon-translate-gs/README.md b/tuts/091-amazon-translate-gs/README.md
new file mode 100644
index 0000000..4df39f1
--- /dev/null
+++ b/tuts/091-amazon-translate-gs/README.md
@@ -0,0 +1,59 @@
+# Translate: Translate text between languages
+
+Translate text between languages using Amazon Translate, with auto-detection of the source language.
+
+## Source
+
+https://docs.aws.amazon.com/translate/latest/dg/get-started.html
+
+## Use case
+
+- ID: translate/getting-started
+- Phase: create
+- Complexity: beginner
+- Core actions: translate:TranslateText, translate:ListLanguages
+
+## What it does
+
+1. Translates English text to Spanish
+2. Translates English text to French
+3. Translates English text to Japanese
+4. Auto-detects source language (German → English)
+5. Lists supported languages
+
+## Running
+
+```bash
+bash amazon-translate-gs.sh
+```
+
+## Resources created
+
+None. Translate is a stateless API.
+
+## Estimated time
+
+- Run: ~5 seconds
+
+## Cost
+
+Translate pricing is per character. This tutorial translates ~600 characters, costing less than $0.01.
+
+## Related docs
+
+- [Getting started with Amazon Translate](https://docs.aws.amazon.com/translate/latest/dg/get-started.html)
+- [Translating text using the API](https://docs.aws.amazon.com/translate/latest/dg/get-started-api.html)
+- [Supported languages](https://docs.aws.amazon.com/translate/latest/dg/what-is-languages.html)
+- [Automatic source language detection](https://docs.aws.amazon.com/translate/latest/dg/auto-detect.html)
+
+---
+
+## Appendix: Generation details
+
+| Field | Value |
+|-------|-------|
+| Generation date | 2026-04-14 |
+| Source script | New, 60 lines |
+| Script test result | EXIT 0, 5s, 5 steps, stateless API |
+| Issues encountered | None — stateless API, no resource management needed |
+| Iterations | v1 (direct to publish) |
diff --git a/tuts/091-amazon-translate-gs/amazon-translate-gs.md b/tuts/091-amazon-translate-gs/amazon-translate-gs.md
new file mode 100644
index 0000000..63dce28
--- /dev/null
+++ b/tuts/091-amazon-translate-gs/amazon-translate-gs.md
@@ -0,0 +1,63 @@
+# Translate text between languages with Amazon Translate
+
+This tutorial shows you how to use Amazon Translate to translate text between languages, auto-detect the source language, and list supported languages.
+
+## Prerequisites
+
+- AWS CLI configured with credentials and a default region
+- Permissions to call Amazon Translate APIs
+
+## Step 1: Translate English to Spanish
+
+```bash
+aws translate translate-text \
+ --text "Your text here" \
+ --source-language-code en --target-language-code es \
+ --query 'TranslatedText' --output text
+```
+
+## Step 2: Translate English to French
+
+```bash
+aws translate translate-text \
+ --text "Your text here" \
+ --source-language-code en --target-language-code fr \
+ --query 'TranslatedText' --output text
+```
+
+## Step 3: Translate English to Japanese
+
+```bash
+aws translate translate-text \
+ --text "Your text here" \
+ --source-language-code en --target-language-code ja \
+ --query 'TranslatedText' --output text
+```
+
+## Step 4: Auto-detect source language
+
+Use `auto` as the source language code to let Translate detect the language:
+
+```bash
+aws translate translate-text \
+ --text "Amazon Translate ist ein neuronaler maschineller Übersetzungsdienst." \
+ --source-language-code auto --target-language-code en \
+ --query '{Translation:TranslatedText,DetectedLanguage:SourceLanguageCode}' --output table
+```
+
+## Step 5: List supported languages
+
+```bash
+aws translate list-languages \
+ --query 'Languages[:10].{Name:LanguageName,Code:LanguageCode}' --output table
+```
+
+## Cleanup
+
+No cleanup needed. Translate is a stateless API — no resources are created.
+
+The script automates all steps:
+
+```bash
+bash amazon-translate-gs.sh
+```
diff --git a/tuts/091-amazon-translate-gs/amazon-translate-gs.sh b/tuts/091-amazon-translate-gs/amazon-translate-gs.sh
new file mode 100644
index 0000000..600da5b
--- /dev/null
+++ b/tuts/091-amazon-translate-gs/amazon-translate-gs.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# Tutorial: Translate text between languages with Amazon Translate
+# Source: https://docs.aws.amazon.com/translate/latest/dg/get-started.html
+
+WORK_DIR=$(mktemp -d)
+LOG_FILE="$WORK_DIR/translate-$(date +%Y%m%d-%H%M%S).log"
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null)}}
+if [ -z "$REGION" ]; then
+ echo "ERROR: No AWS region configured. Set one with: export AWS_DEFAULT_REGION=us-east-1"
+ exit 1
+fi
+export AWS_DEFAULT_REGION="$REGION"
+echo "Region: $REGION"
+
+TEXT="Amazon Translate is a neural machine translation service that delivers fast, high-quality, affordable, and customizable language translation."
+
+echo ""
+echo "Source text (English):"
+echo " $TEXT"
+echo ""
+
+# Step 1: Translate English to Spanish
+echo "Step 1: English → Spanish"
+aws translate translate-text --text "$TEXT" \
+ --source-language-code en --target-language-code es \
+ --query 'TranslatedText' --output text
+echo ""
+
+# Step 2: Translate English to French
+echo "Step 2: English → French"
+aws translate translate-text --text "$TEXT" \
+ --source-language-code en --target-language-code fr \
+ --query 'TranslatedText' --output text
+echo ""
+
+# Step 3: Translate English to Japanese
+echo "Step 3: English → Japanese"
+aws translate translate-text --text "$TEXT" \
+ --source-language-code en --target-language-code ja \
+ --query 'TranslatedText' --output text
+echo ""
+
+# Step 4: Auto-detect source language
+echo "Step 4: Auto-detect source language (German input)"
+GERMAN="Amazon Translate ist ein neuronaler maschineller Übersetzungsdienst."
+echo " Input: $GERMAN"
+aws translate translate-text --text "$GERMAN" \
+ --source-language-code auto --target-language-code en \
+ --query '{Translation:TranslatedText,DetectedLanguage:SourceLanguageCode}' --output table
+echo ""
+
+# Step 5: List supported languages
+echo "Step 5: Listing supported languages (first 10)"
+aws translate list-languages --query 'Languages[:10].{Name:LanguageName,Code:LanguageCode}' --output table
+
+echo ""
+echo "Tutorial complete. No resources were created — Translate is a stateless API."
+rm -rf "$WORK_DIR"
diff --git a/tuts/103-amazon-textract-gs/README.md b/tuts/103-amazon-textract-gs/README.md
new file mode 100644
index 0000000..415a872
--- /dev/null
+++ b/tuts/103-amazon-textract-gs/README.md
@@ -0,0 +1,54 @@
+# Textract: Extract text from documents
+
+## Source
+
+https://docs.aws.amazon.com/textract/latest/dg/getting-started.html
+
+## Use case
+
+- **ID**: textract/getting-started
+- **Level**: beginner
+- **Core actions**: `textract:DetectDocumentText`, `textract:AnalyzeDocument`
+
+## Steps
+
+1. Create a sample PNG image
+2. Upload the document to S3
+3. Detect text in the document
+4. Analyze document for forms and tables
+5. Detect text from local file bytes
+
+## Resources created
+
+| Resource | Type |
+|----------|------|
+| S3 bucket | `AWS::S3::Bucket` |
+
+## Duration
+
+~10 seconds
+
+## Cost
+
+Textract charges per page analyzed. `DetectDocumentText` costs $1.50 per 1,000 pages; `AnalyzeDocument` costs $50 per 1,000 pages for forms and $15 per 1,000 pages for tables. This tutorial analyzes one page, costing less than $0.01. The S3 bucket is deleted during cleanup.
+
+## Related docs
+
+- [Getting started with Amazon Textract](https://docs.aws.amazon.com/textract/latest/dg/getting-started.html)
+- [Detecting text](https://docs.aws.amazon.com/textract/latest/dg/detecting-document-text.html)
+- [Analyzing documents](https://docs.aws.amazon.com/textract/latest/dg/analyzing-document-text.html)
+- [Amazon Textract quotas](https://docs.aws.amazon.com/textract/latest/dg/limits.html)
+
+---
+
+## Appendix
+
+| Field | Value |
+|-------|-------|
+| Date | 2026-04-14 |
+| Script lines | 100 |
+| Exit code | 0 |
+| Runtime | 10s |
+| Steps | 5 |
+| Issues | Fixed duplicate python block |
+| Version | v1 |
diff --git a/tuts/103-amazon-textract-gs/amazon-textract-gs.md b/tuts/103-amazon-textract-gs/amazon-textract-gs.md
new file mode 100644
index 0000000..bf0d11a
--- /dev/null
+++ b/tuts/103-amazon-textract-gs/amazon-textract-gs.md
@@ -0,0 +1,109 @@
+# Extract text from documents with Amazon Textract
+
+This tutorial shows you how to upload a document image to Amazon S3, use Amazon Textract to detect text and analyze the document for forms and tables, and detect text directly from local file bytes.
+
+## Prerequisites
+
+- AWS CLI configured with credentials and a default region
+- Python 3 installed (used to generate a sample PNG image)
+- Permissions for `s3:CreateBucket`, `s3:PutObject`, `s3:DeleteObject`, `s3:DeleteBucket`, `textract:DetectDocumentText`, and `textract:AnalyzeDocument`
+
+## Step 1: Create a sample document image
+
+Generate a minimal PNG image to use as a test document. In practice, you would use a scanned document or photograph containing text.
+
+```bash
+WORK_DIR=$(mktemp -d)
+
+python3 -c "
+import struct, zlib
+w,h=200,50
+row=b'\x00'+b'\xff\xff\xff'*w
+raw=row*h
+comp=zlib.compress(raw)
+def ch(t,d):
+ c=t+d
+ return struct.pack('>I',len(d))+c+struct.pack('>I',zlib.crc32(c)&0xffffffff)
+with open('$WORK_DIR/sample.png','wb') as f:
+ f.write(b'\x89PNG\r\n\x1a\n')
+ f.write(ch(b'IHDR',struct.pack('>IIBBBBB',w,h,8,2,0,0,0)))
+ f.write(ch(b'IDAT',comp))
+ f.write(ch(b'IEND',b''))
+"
+echo "Created sample.png (200x50 white image)"
+```
+
+This creates a blank white PNG. Textract won't find text in it, but it demonstrates the API calls. Replace it with a real document to see text extraction in action.
+
+## Step 2: Upload the document to S3
+
+Create an S3 bucket and upload the sample image.
+
+```bash
+ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
+RANDOM_ID=$(openssl rand -hex 4)
+BUCKET_NAME="textract-tut-${RANDOM_ID}-${ACCOUNT_ID}"
+
+aws s3api create-bucket --bucket "$BUCKET_NAME" \
+ --create-bucket-configuration LocationConstraint="$AWS_DEFAULT_REGION"
+aws s3 cp "$WORK_DIR/sample.png" "s3://$BUCKET_NAME/sample.png" --quiet
+echo "Uploaded to s3://$BUCKET_NAME/sample.png"
+```
+
+Textract reads documents directly from S3. For `us-east-1`, omit the `--create-bucket-configuration` parameter.
+
+## Step 3: Detect text in the document
+
+```bash
+aws textract detect-document-text \
+ --document '{"S3Object":{"Bucket":"'"$BUCKET_NAME"'","Name":"sample.png"}}' \
+ --query 'Blocks[?BlockType==`LINE`].{Text:Text,Confidence:Confidence}' --output table
+```
+
+`detect-document-text` returns `LINE` and `WORD` blocks. Each block includes the detected text and a confidence score. With the blank sample image, no text lines are returned.
+
+## Step 4: Analyze document for forms and tables
+
+```bash
+aws textract analyze-document \
+ --document '{"S3Object":{"Bucket":"'"$BUCKET_NAME"'","Name":"sample.png"}}' \
+ --feature-types '["FORMS","TABLES"]' \
+ --query '{Pages:DocumentMetadata.Pages,Blocks:Blocks|length(@)}' --output table
+```
+
+`analyze-document` goes beyond text detection. With `FORMS`, it identifies key-value pairs (like form fields). With `TABLES`, it identifies rows and columns. You can request both features in a single call.
+
+## Step 5: Detect text from local file bytes
+
+Send the document directly as base64-encoded bytes instead of referencing S3.
+
+```bash
+aws textract detect-document-text \
+ --document '{"Bytes":"'"$(base64 -w0 "$WORK_DIR/sample.png")"'"}' \
+ --query '{Pages:DocumentMetadata.Pages,BlockCount:Blocks|length(@)}' --output table
+```
+
+The `Bytes` option is useful for quick tests or when you don't want to upload to S3 first. The document size limit for synchronous operations is 10 MB.
+
+## Cleanup
+
+Delete the S3 bucket and its contents, then remove the temporary directory.
+
+```bash
+aws s3 rm "s3://$BUCKET_NAME" --recursive --quiet
+aws s3 rb "s3://$BUCKET_NAME"
+rm -rf "$WORK_DIR"
+```
+
+The script automates all steps including cleanup:
+
+```bash
+bash amazon-textract-gs.sh
+```
+
+## Related resources
+
+- [Getting started with Amazon Textract](https://docs.aws.amazon.com/textract/latest/dg/getting-started.html)
+- [Detecting text](https://docs.aws.amazon.com/textract/latest/dg/detecting-document-text.html)
+- [Analyzing documents](https://docs.aws.amazon.com/textract/latest/dg/analyzing-document-text.html)
+- [Amazon Textract quotas](https://docs.aws.amazon.com/textract/latest/dg/limits.html)
diff --git a/tuts/103-amazon-textract-gs/amazon-textract-gs.sh b/tuts/103-amazon-textract-gs/amazon-textract-gs.sh
new file mode 100644
index 0000000..65a5526
--- /dev/null
+++ b/tuts/103-amazon-textract-gs/amazon-textract-gs.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+# Tutorial: Extract text from documents with Amazon Textract
+# Source: https://docs.aws.amazon.com/textract/latest/dg/getting-started.html
+
+WORK_DIR=$(mktemp -d)
+LOG_FILE="$WORK_DIR/textract-$(date +%Y%m%d-%H%M%S).log"
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null)}}
+if [ -z "$REGION" ]; then
+ echo "ERROR: No AWS region configured. Set one with: export AWS_DEFAULT_REGION=us-east-1"
+ exit 1
+fi
+export AWS_DEFAULT_REGION="$REGION"
+ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
+echo "Region: $REGION"
+
+RANDOM_ID=$(openssl rand -hex 4)
+BUCKET_NAME="textract-tut-${RANDOM_ID}-${ACCOUNT_ID}"
+
+handle_error() { echo "ERROR on line $1"; trap - ERR; cleanup; exit 1; }
+trap 'handle_error $LINENO' ERR
+
+cleanup() {
+ echo ""
+ echo "Cleaning up resources..."
+ if aws s3 ls "s3://$BUCKET_NAME" > /dev/null 2>&1; then
+ aws s3 rm "s3://$BUCKET_NAME" --recursive --quiet 2>/dev/null
+ aws s3 rb "s3://$BUCKET_NAME" 2>/dev/null && echo " Deleted bucket $BUCKET_NAME"
+ fi
+ rm -rf "$WORK_DIR"
+ echo "Cleanup complete."
+}
+
+# Step 1: Create a sample document image
+echo "Step 1: Creating a sample document image"
+python3 -c "
+import struct, zlib
+w,h=200,50
+row=b'\x00'+b'\xff\xff\xff'*w
+raw=row*h
+comp=zlib.compress(raw)
+def ch(t,d):
+ c=t+d
+ return struct.pack('>I',len(d))+c+struct.pack('>I',zlib.crc32(c)&0xffffffff)
+with open('$WORK_DIR/sample.png','wb') as f:
+ f.write(b'\x89PNG\r\n\x1a\n')
+ f.write(ch(b'IHDR',struct.pack('>IIBBBBB',w,h,8,2,0,0,0)))
+ f.write(ch(b'IDAT',comp))
+ f.write(ch(b'IEND',b''))
+"
+echo " Created sample.png (200x50 white image)"
+
+# Step 2: Create S3 bucket and upload
+echo "Step 2: Uploading document to S3"
+if [ "$REGION" = "us-east-1" ]; then
+ aws s3api create-bucket --bucket "$BUCKET_NAME" > /dev/null
+else
+ aws s3api create-bucket --bucket "$BUCKET_NAME" \
+ --create-bucket-configuration LocationConstraint="$REGION" > /dev/null
+fi
+aws s3 cp "$WORK_DIR/sample.png" "s3://$BUCKET_NAME/sample.png" --quiet
+echo " Uploaded to s3://$BUCKET_NAME/sample.png"
+
+# Step 3: Detect text in the document
+echo "Step 3: Detecting text in document"
+aws textract detect-document-text \
+ --document "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --query 'Blocks[?BlockType==`LINE`].{Text:Text,Confidence:Confidence}' --output table 2>/dev/null || \
+ echo " No text detected (expected — the sample image is blank)"
+
+# Step 4: Analyze document (forms and tables)
+echo "Step 4: Analyzing document for forms and tables"
+aws textract analyze-document \
+ --document "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --feature-types '["FORMS","TABLES"]' \
+ --query '{Pages:DocumentMetadata.Pages,Blocks:Blocks|length(@)}' --output table
+
+# Step 5: Detect text using bytes (inline)
+echo "Step 5: Detecting text from local file (bytes)"
+aws textract detect-document-text \
+ --document "{\"Bytes\":\"$(base64 -w0 "$WORK_DIR/sample.png")\"}" \
+ --query '{Pages:DocumentMetadata.Pages,BlockCount:Blocks|length(@)}' --output table
+
+echo ""
+echo "Tutorial complete."
+echo "Note: The sample image is blank, so no text was detected."
+echo "Try with a real document image to see Textract extract text, forms, and tables."
+echo ""
+echo "Do you want to clean up all resources? (y/n): "
+read -r CHOICE
+if [[ "$CHOICE" =~ ^[Yy]$ ]]; then
+ cleanup
+else
+ echo "Manual cleanup:"
+ echo " aws s3 rm s3://$BUCKET_NAME --recursive && aws s3 rb s3://$BUCKET_NAME"
+fi
diff --git a/tuts/104-amazon-polly-gs/README.md b/tuts/104-amazon-polly-gs/README.md
new file mode 100644
index 0000000..476c734
--- /dev/null
+++ b/tuts/104-amazon-polly-gs/README.md
@@ -0,0 +1,53 @@
+# Polly: Synthesize speech from text
+
+## Source
+
+https://docs.aws.amazon.com/polly/latest/dg/getting-started-cli.html
+
+## Use case
+
+- **ID**: polly/getting-started
+- **Level**: beginner
+- **Core actions**: `polly:DescribeVoices`, `polly:SynthesizeSpeech`
+
+## Steps
+
+1. List available English voices
+2. Synthesize speech with the standard engine
+3. Synthesize speech with the neural engine
+4. Synthesize with SSML markup
+5. List available languages
+6. Synthesize in Spanish
+
+## Resources created
+
+None. Polly is a stateless API.
+
+## Cost
+
+Polly pricing is per character. This tutorial synthesizes ~300 characters, costing less than $0.01.
+
+## Duration
+
+~5 seconds
+
+## Related docs
+
+- [Getting started with Amazon Polly](https://docs.aws.amazon.com/polly/latest/dg/getting-started-cli.html)
+- [Voices in Amazon Polly](https://docs.aws.amazon.com/polly/latest/dg/voicelist.html)
+- [Using SSML](https://docs.aws.amazon.com/polly/latest/dg/ssml.html)
+- [Supported languages](https://docs.aws.amazon.com/polly/latest/dg/SupportedLanguage.html)
+
+---
+
+## Appendix
+
+| Field | Value |
+|-------|-------|
+| Date | 2026-04-14 |
+| Script lines | 68 |
+| Exit code | 0 |
+| Runtime | 5s |
+| Steps | 6 |
+| Issues | None |
+| Version | v1 |
diff --git a/tuts/104-amazon-polly-gs/amazon-polly-gs.md b/tuts/104-amazon-polly-gs/amazon-polly-gs.md
new file mode 100644
index 0000000..bbbcba0
--- /dev/null
+++ b/tuts/104-amazon-polly-gs/amazon-polly-gs.md
@@ -0,0 +1,107 @@
+# Synthesize speech from text with Amazon Polly
+
+## Overview
+
+In this tutorial, you use the AWS CLI to synthesize speech from text using Amazon Polly. You list available voices, generate audio with the standard and neural engines, use SSML markup to control speech, list supported languages, and synthesize speech in Spanish.
+
+## Prerequisites
+
+- AWS CLI installed and configured with appropriate permissions.
+- An IAM principal with permissions for `polly:DescribeVoices` and `polly:SynthesizeSpeech`.
+
+## Step 1: List available voices
+
+List English voices available in Amazon Polly.
+
+```bash
+aws polly describe-voices --language-code en-US \
+ --query 'Voices[:5].{Name:Name,Gender:Gender,Engine:SupportedEngines[0]}' --output table
+```
+
+Each voice supports one or more engines: `standard`, `neural`, or `generative`. Neural voices sound more natural but are available for fewer languages.
+
+## Step 2: Synthesize speech with the standard engine
+
+Generate an MP3 file from plain text using the Joanna voice.
+
+```bash
+aws polly synthesize-speech \
+ --text "Hello! This is Amazon Polly synthesizing speech from text." \
+ --output-format mp3 \
+ --voice-id Joanna \
+ standard.mp3 > /dev/null
+```
+
+The `synthesize-speech` command writes audio directly to the output file. Supported formats are `mp3`, `ogg_vorbis`, and `pcm`.
+
+## Step 3: Synthesize speech with the neural engine
+
+Use the `--engine neural` flag for more natural-sounding speech.
+
+```bash
+aws polly synthesize-speech \
+ --text "This is the neural engine. It sounds more natural and expressive." \
+ --output-format mp3 \
+ --voice-id Joanna \
+ --engine neural \
+ neural.mp3 > /dev/null
+```
+
+Neural voices use a different pricing tier than standard voices. Not all voices support the neural engine.
+
+## Step 4: Synthesize with SSML markup
+
+Use SSML to control emphasis, pauses, speech rate, and pitch.
+
+```bash
+aws polly synthesize-speech \
+ --text-type ssml \
+ --text 'Welcome to Amazon Polly. You can control speech rate and pitch.' \
+ --output-format mp3 \
+ --voice-id Joanna \
+ ssml.mp3 > /dev/null
+```
+
+SSML tags supported by Polly include ``, ``, ``, ``, and ``. Set `--text-type ssml` when using SSML input.
+
+## Step 5: List available languages
+
+Query the unique languages supported by Polly.
+
+```bash
+aws polly describe-voices --query 'Voices[].LanguageName' --output text \
+ | tr '\t' '\n' | sort -u | head -10
+```
+
+Polly supports dozens of languages and regional variants. Each language has one or more voices.
+
+## Step 6: Synthesize in Spanish
+
+Use a Spanish voice to synthesize text in another language.
+
+```bash
+aws polly synthesize-speech \
+ --text "Hola, esto es Amazon Polly hablando en español." \
+ --output-format mp3 \
+ --voice-id Lucia \
+ spanish.mp3 > /dev/null
+```
+
+Match the voice to the language of the text. Lucia is a Castilian Spanish voice.
+
+## Cleanup
+
+No cleanup needed. Polly is a stateless API — no AWS resources are created. Delete the local MP3 files when you no longer need them.
+
+The script automates all steps:
+
+```bash
+bash amazon-polly-gs.sh
+```
+
+## Related resources
+
+- [Getting started with Amazon Polly](https://docs.aws.amazon.com/polly/latest/dg/getting-started-cli.html)
+- [Voices in Amazon Polly](https://docs.aws.amazon.com/polly/latest/dg/voicelist.html)
+- [Using SSML](https://docs.aws.amazon.com/polly/latest/dg/ssml.html)
+- [Supported languages](https://docs.aws.amazon.com/polly/latest/dg/SupportedLanguage.html)
diff --git a/tuts/104-amazon-polly-gs/amazon-polly-gs.sh b/tuts/104-amazon-polly-gs/amazon-polly-gs.sh
new file mode 100644
index 0000000..8169ecf
--- /dev/null
+++ b/tuts/104-amazon-polly-gs/amazon-polly-gs.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+# Tutorial: Synthesize speech from text with Amazon Polly
+# Source: https://docs.aws.amazon.com/polly/latest/dg/getting-started-cli.html
+
+WORK_DIR=$(mktemp -d)
+LOG_FILE="$WORK_DIR/polly-$(date +%Y%m%d-%H%M%S).log"
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null)}}
+if [ -z "$REGION" ]; then
+ echo "ERROR: No AWS region configured. Set one with: export AWS_DEFAULT_REGION=us-east-1"
+ exit 1
+fi
+export AWS_DEFAULT_REGION="$REGION"
+echo "Region: $REGION"
+
+# Step 1: List available voices
+echo "Step 1: Listing available English voices"
+aws polly describe-voices --language-code en-US \
+ --query 'Voices[:5].{Name:Name,Gender:Gender,Engine:SupportedEngines[0]}' --output table
+
+# Step 2: Synthesize speech (standard engine)
+echo "Step 2: Synthesizing speech with standard engine"
+aws polly synthesize-speech \
+ --text "Hello! This is Amazon Polly synthesizing speech from text." \
+ --output-format mp3 \
+ --voice-id Joanna \
+ "$WORK_DIR/standard.mp3" > /dev/null
+echo " Output: standard.mp3 ($(wc -c < "$WORK_DIR/standard.mp3" > /dev/null) bytes)"
+
+# Step 3: Synthesize with neural engine
+echo "Step 3: Synthesizing speech with neural engine"
+aws polly synthesize-speech \
+ --text "This is the neural engine. It sounds more natural and expressive." \
+ --output-format mp3 \
+ --voice-id Joanna \
+ --engine neural \
+ "$WORK_DIR/neural.mp3" > /dev/null
+echo " Output: neural.mp3 ($(wc -c < "$WORK_DIR/neural.mp3" > /dev/null) bytes)"
+
+# Step 4: Synthesize with SSML
+echo "Step 4: Synthesizing with SSML markup"
+aws polly synthesize-speech \
+ --text-type ssml \
+ --text 'Welcome to Amazon Polly. You can control speech rate and pitch.' \
+ --output-format mp3 \
+ --voice-id Joanna \
+ "$WORK_DIR/ssml.mp3" > /dev/null
+echo " Output: ssml.mp3 ($(wc -c < "$WORK_DIR/ssml.mp3" > /dev/null) bytes)"
+
+# Step 5: List available languages
+echo "Step 5: Available languages (first 10)"
+aws polly describe-voices --query 'Voices[].LanguageName' --output text | tr '\t' '\n' | sort -u | head -10
+
+# Step 6: Synthesize in another language
+echo "Step 6: Synthesizing in Spanish"
+aws polly synthesize-speech \
+ --text "Hola, esto es Amazon Polly hablando en español." \
+ --output-format mp3 \
+ --voice-id Lucia \
+ "$WORK_DIR/spanish.mp3" > /dev/null
+echo " Output: spanish.mp3 ($(wc -c < "$WORK_DIR/spanish.mp3" > /dev/null) bytes)"
+
+echo ""
+echo "Tutorial complete. Audio files saved to $WORK_DIR/"
+echo "No AWS resources were created — Polly is a stateless API."
+ls -lh "$WORK_DIR"/*.mp3
+rm -rf "$WORK_DIR"
diff --git a/tuts/105-amazon-rekognition-gs/README.md b/tuts/105-amazon-rekognition-gs/README.md
new file mode 100644
index 0000000..2ed4cc0
--- /dev/null
+++ b/tuts/105-amazon-rekognition-gs/README.md
@@ -0,0 +1,55 @@
+# Rekognition: Detect labels in images
+
+## Source
+
+https://docs.aws.amazon.com/rekognition/latest/dg/getting-started.html
+
+## Use case
+
+- **ID**: rekognition/getting-started
+- **Level**: beginner
+- **Core actions**: `rekognition:DetectLabels`, `rekognition:DetectText`
+
+## Steps
+
+1. Create a sample gradient PNG image
+2. Upload the image to S3
+3. Detect labels in the image
+4. Detect labels from local bytes
+5. Detect text in the image
+6. Detect image properties
+
+## Resources created
+
+| Resource | Type |
+|----------|------|
+| `rekognition-tut--` | S3 bucket |
+
+## Cost
+
+Rekognition pricing is per image analyzed. This tutorial analyzes ~4 images, costing less than $0.01.
+
+## Duration
+
+~10 seconds
+
+## Related docs
+
+- [Getting started with Amazon Rekognition](https://docs.aws.amazon.com/rekognition/latest/dg/getting-started.html)
+- [Detecting labels](https://docs.aws.amazon.com/rekognition/latest/dg/labels-detect-labels-image.html)
+- [Detecting text](https://docs.aws.amazon.com/rekognition/latest/dg/text-detecting-text-procedure.html)
+- [Image properties](https://docs.aws.amazon.com/rekognition/latest/dg/image-properties.html)
+
+---
+
+## Appendix
+
+| Field | Value |
+|-------|-------|
+| Date | 2026-04-14 |
+| Script lines | 107 |
+| Exit code | 0 |
+| Runtime | 10s |
+| Steps | 6 |
+| Issues | None |
+| Version | v1 |
diff --git a/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.md b/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.md
new file mode 100644
index 0000000..5293590
--- /dev/null
+++ b/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.md
@@ -0,0 +1,126 @@
+# Detect labels in images with Amazon Rekognition
+
+## Overview
+
+In this tutorial, you use the AWS CLI to analyze images with Amazon Rekognition. You create a sample PNG image, upload it to S3, detect labels, detect labels from local bytes, detect text, and detect image properties. You then clean up the S3 bucket.
+
+## Prerequisites
+
+- AWS CLI installed and configured with appropriate permissions.
+- Python 3 (to generate the sample image).
+- An IAM principal with permissions for `rekognition:DetectLabels`, `rekognition:DetectText`, `s3:CreateBucket`, `s3:PutObject`, `s3:DeleteObject`, and `s3:DeleteBucket`.
+
+## Step 1: Create a sample image
+
+Generate a 100×100 gradient PNG using Python. The gradient gives Rekognition color data to analyze.
+
+```bash
+python3 -c "
+import struct, zlib
+w,h=100,100
+rows=b''
+for y in range(h):
+ rows+=b'\x00'
+ for x in range(w):
+ rows+=bytes([int(x*2.55), int(y*2.55), 128])
+comp=zlib.compress(rows)
+def ch(t,d):
+ c=t+d
+ return struct.pack('>I',len(d))+c+struct.pack('>I',zlib.crc32(c)&0xffffffff)
+with open('sample.png','wb') as f:
+ f.write(b'\x89PNG\r\n\x1a\n')
+ f.write(ch(b'IHDR',struct.pack('>IIBBBBB',w,h,8,2,0,0,0)))
+ f.write(ch(b'IDAT',comp))
+ f.write(ch(b'IEND',b''))
+"
+```
+
+This creates a minimal valid PNG without any external dependencies.
+
+## Step 2: Upload to S3
+
+Create an S3 bucket and upload the image.
+
+```bash
+RANDOM_ID=$(openssl rand -hex 4)
+ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
+BUCKET_NAME="rekognition-tut-${RANDOM_ID}-${ACCOUNT_ID}"
+
+aws s3api create-bucket --bucket "$BUCKET_NAME"
+aws s3 cp sample.png "s3://$BUCKET_NAME/sample.png"
+```
+
+For regions other than `us-east-1`, add `--create-bucket-configuration LocationConstraint=$AWS_DEFAULT_REGION`.
+
+## Step 3: Detect labels
+
+Detect labels in the image stored in S3.
+
+```bash
+aws rekognition detect-labels \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --max-labels 10 \
+ --query 'Labels[].{Label:Name,Confidence:Confidence}' --output table
+```
+
+Rekognition returns labels with confidence scores. For a gradient image, expect generic labels like "Pattern" or "Art".
+
+## Step 4: Detect labels from local bytes
+
+Pass the image directly from the local filesystem instead of S3.
+
+```bash
+aws rekognition detect-labels \
+ --image-bytes "fileb://sample.png" \
+ --max-labels 5 \
+ --query 'Labels[:5].{Label:Name,Confidence:Confidence}' --output table
+```
+
+The `fileb://` prefix sends the file as raw bytes. This avoids the S3 upload step for quick tests.
+
+## Step 5: Detect text in image
+
+Look for text in the image.
+
+```bash
+aws rekognition detect-text \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --query 'TextDetections[:5].{Text:DetectedText,Type:Type,Confidence:Confidence}' --output table
+```
+
+The gradient image contains no text, so this returns an empty result. With a real image containing text, Rekognition returns each detected word and line.
+
+## Step 6: Detect image properties
+
+Use the `IMAGE_PROPERTIES` feature to get dominant colors and quality metrics.
+
+```bash
+aws rekognition detect-labels \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --features GENERAL_LABELS IMAGE_PROPERTIES \
+ --query 'ImageProperties.{Quality:Quality,DominantColors:DominantColors[:3]}' --output json
+```
+
+Image properties include sharpness, brightness, and dominant colors.
+
+## Cleanup
+
+Delete the S3 bucket and its contents.
+
+```bash
+aws s3 rm "s3://$BUCKET_NAME" --recursive
+aws s3 rb "s3://$BUCKET_NAME"
+```
+
+The script automates all steps including cleanup:
+
+```bash
+bash amazon-rekognition-gs.sh
+```
+
+## Related resources
+
+- [Getting started with Amazon Rekognition](https://docs.aws.amazon.com/rekognition/latest/dg/getting-started.html)
+- [Detecting labels](https://docs.aws.amazon.com/rekognition/latest/dg/labels-detect-labels-image.html)
+- [Detecting text](https://docs.aws.amazon.com/rekognition/latest/dg/text-detecting-text-procedure.html)
+- [Image properties](https://docs.aws.amazon.com/rekognition/latest/dg/image-properties.html)
diff --git a/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.sh b/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.sh
new file mode 100644
index 0000000..c26ea29
--- /dev/null
+++ b/tuts/105-amazon-rekognition-gs/amazon-rekognition-gs.sh
@@ -0,0 +1,107 @@
+#!/bin/bash
+# Tutorial: Detect labels in images with Amazon Rekognition
+# Source: https://docs.aws.amazon.com/rekognition/latest/dg/getting-started.html
+
+WORK_DIR=$(mktemp -d)
+LOG_FILE="$WORK_DIR/rekognition-$(date +%Y%m%d-%H%M%S).log"
+exec > >(tee -a "$LOG_FILE") 2>&1
+
+REGION=${AWS_DEFAULT_REGION:-${AWS_REGION:-$(aws configure get region 2>/dev/null)}}
+if [ -z "$REGION" ]; then
+ echo "ERROR: No AWS region configured. Set one with: export AWS_DEFAULT_REGION=us-east-1"
+ exit 1
+fi
+export AWS_DEFAULT_REGION="$REGION"
+ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
+echo "Region: $REGION"
+
+RANDOM_ID=$(openssl rand -hex 4)
+BUCKET_NAME="rekognition-tut-${RANDOM_ID}-${ACCOUNT_ID}"
+
+handle_error() { echo "ERROR on line $1"; trap - ERR; cleanup; exit 1; }
+trap 'handle_error $LINENO' ERR
+
+cleanup() {
+ echo ""
+ echo "Cleaning up resources..."
+ if aws s3 ls "s3://$BUCKET_NAME" > /dev/null 2>&1; then
+ aws s3 rm "s3://$BUCKET_NAME" --recursive --quiet 2>/dev/null
+ aws s3 rb "s3://$BUCKET_NAME" 2>/dev/null && echo " Deleted bucket $BUCKET_NAME"
+ fi
+ rm -rf "$WORK_DIR"
+ echo "Cleanup complete."
+}
+
+# Step 1: Create a sample image (simple colored PNG)
+echo "Step 1: Creating sample image"
+python3 -c "
+import struct, zlib
+w,h=100,100
+# Blue/green gradient - gives Rekognition something to analyze
+rows=b''
+for y in range(h):
+ rows+=b'\x00' # filter byte
+ for x in range(w):
+ rows+=bytes([int(x*2.55), int(y*2.55), 128])
+comp=zlib.compress(rows)
+def ch(t,d):
+ c=t+d
+ return struct.pack('>I',len(d))+c+struct.pack('>I',zlib.crc32(c)&0xffffffff)
+with open('$WORK_DIR/sample.png','wb') as f:
+ f.write(b'\x89PNG\r\n\x1a\n')
+ f.write(ch(b'IHDR',struct.pack('>IIBBBBB',w,h,8,2,0,0,0)))
+ f.write(ch(b'IDAT',comp))
+ f.write(ch(b'IEND',b''))
+"
+echo " Created sample.png (100x100 gradient)"
+
+# Step 2: Upload to S3
+echo "Step 2: Uploading to S3"
+if [ "$REGION" = "us-east-1" ]; then
+ aws s3api create-bucket --bucket "$BUCKET_NAME" > /dev/null
+else
+ aws s3api create-bucket --bucket "$BUCKET_NAME" \
+ --create-bucket-configuration LocationConstraint="$REGION" > /dev/null
+fi
+aws s3 cp "$WORK_DIR/sample.png" "s3://$BUCKET_NAME/sample.png" --quiet
+echo " Uploaded to s3://$BUCKET_NAME/sample.png"
+
+# Step 3: Detect labels
+echo "Step 3: Detecting labels in image"
+aws rekognition detect-labels \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --max-labels 10 \
+ --query 'Labels[].{Label:Name,Confidence:Confidence}' --output table
+
+# Step 4: Detect labels from bytes (local file)
+echo "Step 4: Detecting labels from local file"
+aws rekognition detect-labels \
+ --image-bytes "fileb://$WORK_DIR/sample.png" \
+ --max-labels 5 \
+ --query 'Labels[:5].{Label:Name,Confidence:Confidence}' --output table
+
+# Step 5: Detect text in image
+echo "Step 5: Detecting text in image"
+aws rekognition detect-text \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --query 'TextDetections[:5].{Text:DetectedText,Type:Type,Confidence:Confidence}' --output table 2>/dev/null || \
+ echo " No text detected (expected — the sample is a gradient)"
+
+# Step 6: Detect image properties
+echo "Step 6: Detecting image properties"
+aws rekognition detect-labels \
+ --image "{\"S3Object\":{\"Bucket\":\"$BUCKET_NAME\",\"Name\":\"sample.png\"}}" \
+ --features GENERAL_LABELS IMAGE_PROPERTIES \
+ --query 'ImageProperties.{Quality:Quality,DominantColors:DominantColors[:3]}' --output json 2>/dev/null | python3 -m json.tool 2>/dev/null || \
+ echo " Image properties not available for this image"
+
+echo ""
+echo "Tutorial complete."
+echo "Do you want to clean up all resources? (y/n): "
+read -r CHOICE
+if [[ "$CHOICE" =~ ^[Yy]$ ]]; then
+ cleanup
+else
+ echo "Manual cleanup:"
+ echo " aws s3 rm s3://$BUCKET_NAME --recursive && aws s3 rb s3://$BUCKET_NAME"
+fi