feat: Complete Smart Resume Formatter with R2 and Gemini AI integration
Some checks failed
Profile Linker Docker Build / Build and push Docker image (push) Failing after 3s

- Integrated Cloudflare R2 for template storage and converted file management
- Added Google Gemini AI for resume parsing and HTML generation
- Created backend API endpoints for templates, conversion, and history
- Refactored frontend to use real API instead of mock data
- Fixed Docker networking issues (IPv6/IPv4) for R2 connectivity
- Added resumeService.ts for frontend API integration
- Updated Vite configuration for proper asset serving in Docker
- Successfully tested with 13 templates from R2 bucket
This commit is contained in:
Laxmi Khilnani
2025-10-14 21:43:41 +05:30
parent ee030b70bc
commit cda50356b4
34 changed files with 2604 additions and 360 deletions

81
test_r2_direct.py Executable file
View File

@@ -0,0 +1,81 @@
#!/usr/bin/env python3
"""
Test R2 connection directly
"""
import boto3
from botocore.client import Config
# R2 Configuration
R2_ENDPOINT = "https://cba4afd7666247724ece1f34e1aace6c.r2.cloudflarestorage.com"
R2_ACCESS_KEY_ID = "8f7244b0e7f9c8297a606af0073d4a5a"
R2_SECRET_ACCESS_KEY = "17845714ff4c2e5f33f09740112be47925d0fab93d27b26982964cd14808b60b"
R2_BUCKET_NAME = "e-teams"
print("Testing R2 Connection...")
print(f"Endpoint: {R2_ENDPOINT}")
print(f"Bucket: {R2_BUCKET_NAME}\n")
try:
# Create S3 client
s3_client = boto3.client(
's3',
endpoint_url=R2_ENDPOINT,
aws_access_key_id=R2_ACCESS_KEY_ID,
aws_secret_access_key=R2_SECRET_ACCESS_KEY,
config=Config(
signature_version='s3v4',
s3={'addressing_style': 'path'}
),
region_name='auto'
)
# Test 1: List all objects in bucket
print("Test 1: Listing ALL objects in bucket...")
response = s3_client.list_objects_v2(Bucket=R2_BUCKET_NAME)
if 'Contents' in response:
print(f"✅ Found {len(response['Contents'])} objects:")
for obj in response['Contents'][:10]: # Show first 10
print(f" - {obj['Key']} ({obj['Size']} bytes)")
if len(response['Contents']) > 10:
print(f" ... and {len(response['Contents']) - 10} more")
else:
print("⚠️ Bucket is empty or no objects found")
print("\n" + "="*50 + "\n")
# Test 2: List objects with templates prefix
print("Test 2: Listing objects with 'templates/' prefix...")
response = s3_client.list_objects_v2(
Bucket=R2_BUCKET_NAME,
Prefix="templates/"
)
if 'Contents' in response:
print(f"✅ Found {len(response['Contents'])} template(s):")
for obj in response['Contents']:
print(f" - {obj['Key']}")
else:
print("⚠️ No templates found with prefix 'templates/'")
print("\n" + "="*50 + "\n")
# Test 3: Try without prefix
print("Test 3: Checking if templates exist without prefix...")
response = s3_client.list_objects_v2(Bucket=R2_BUCKET_NAME)
if 'Contents' in response:
html_files = [obj['Key'] for obj in response['Contents'] if obj['Key'].endswith('.html')]
if html_files:
print(f"✅ Found {len(html_files)} HTML file(s):")
for file in html_files:
print(f" - {file}")
else:
print("⚠️ No .html files found in bucket")
print("\n✅ R2 Connection Test Complete!")
except Exception as e:
print(f"❌ Error: {e}")
import traceback
traceback.print_exc()