fix urls fft search

apikeys
Razmig Sarkissian 4 months ago
parent f988fc1c06
commit c167f21a96
  1. 1
      api/urls.py
  2. 479
      api/utils.py
  3. 249
      api/views.py

@ -45,7 +45,6 @@ urlpatterns = [
path('config/tournament/', views.get_tournament_config, name='tournament-config'), path('config/tournament/', views.get_tournament_config, name='tournament-config'),
path('config/payment/', views.get_payment_config, name='payment-config'), path('config/payment/', views.get_payment_config, name='payment-config'),
path('fft/club-tournaments/', views.get_fft_club_tournaments, name='get-fft-club-tournaments'), path('fft/club-tournaments/', views.get_fft_club_tournaments, name='get-fft-club-tournaments'),
path('fft/club-tournaments-complete/', views.get_fft_club_tournaments_with_umpire_data, name='get-fft-club-tournaments-complete'),
path('fft/all-tournaments/', views.get_fft_all_tournaments, name='get-fft-all-tournaments'), path('fft/all-tournaments/', views.get_fft_all_tournaments, name='get-fft-all-tournaments'),
path('fft/umpire/<str:tournament_id>/', views.get_fft_umpire_data, name='get-fft-umpire-data'), path('fft/umpire/<str:tournament_id>/', views.get_fft_umpire_data, name='get-fft-umpire-data'),
path('fft/federal-clubs/', views.get_fft_federal_clubs, name='get-fft-federal-clubs'), path('fft/federal-clubs/', views.get_fft_federal_clubs, name='get-fft-federal-clubs'),

@ -1,4 +1,3 @@
import time
import logging import logging
import requests import requests
import re import re
@ -6,6 +5,7 @@ from playwright.sync_api import sync_playwright
from datetime import datetime, timedelta from datetime import datetime, timedelta
import json import json
import traceback import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -212,49 +212,6 @@ def scrape_fft_club_tournaments_all_pages(club_code, club_name, start_date=None,
'pages_scraped': page + 1 'pages_scraped': page + 1
} }
def get_umpire_data(tournament_id):
"""
Scrapes umpire data for a specific tournament
"""
logger.info(f"Getting umpire data for tournament {tournament_id}")
try:
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15'
}
response = requests.get(url, headers=headers, timeout=30)
if response.status_code != 200:
logger.error(f"Failed to fetch tournament page: {response.status_code}")
return None, None, None
html_content = response.text
# Extract name
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
# Extract email
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
# Extract phone
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
logger.info(f"Extracted umpire data: name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as e:
logger.error(f"Error getting umpire data: {e}")
return None, None, None
def _parse_ajax_response(commands): def _parse_ajax_response(commands):
""" """
Parse the AJAX response commands to extract tournament data Parse the AJAX response commands to extract tournament data
@ -555,3 +512,437 @@ def scrape_fft_all_tournaments(sorting_option=None, page=0, start_date=None, end
logger.error(f"Error in Playwright scraping: {e}") logger.error(f"Error in Playwright scraping: {e}")
logger.error(f"Traceback: {traceback.format_exc()}") logger.error(f"Traceback: {traceback.format_exc()}")
return None return None
def get_umpire_data(tournament_id):
"""
Fast umpire data extraction using Playwright (optimized for speed)
"""
logger.info(f"Getting umpire data for tournament {tournament_id}")
try:
with sync_playwright() as p:
browser = p.chromium.launch(
headless=True,
args=[
'--no-sandbox',
'--disable-dev-shm-usage',
'--disable-images', # Don't load images
'--disable-javascript', # Disable JS for faster loading
'--disable-plugins',
'--disable-extensions'
]
)
page = browser.new_page()
# Navigate to tournament page quickly
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
logger.info(f"Navigating to tournament page: {url}")
try:
# Fast navigation - don't wait for everything to load
page.goto(url, timeout=15000, wait_until="domcontentloaded")
# Quick Queue-It check
if "queue-it.net" in page.url.lower():
logger.warning("Hit Queue-It on tournament page")
browser.close()
return None, None, None
# Extract data using the fastest method - regex on HTML content
html_content = page.content()
# Extract name
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
# Extract email
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
# Extract phone
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
browser.close()
logger.info(f"Extracted umpire data: name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as page_error:
logger.error(f"Error loading tournament page: {page_error}")
browser.close()
return None, None, None
except Exception as e:
logger.error(f"Error in umpire data extraction: {e}")
return None, None, None
def _get_umpire_data_requests_fallback(tournament_id):
"""
Fallback method using requests (may hit Queue-It)
"""
logger.info(f"Using requests fallback for tournament {tournament_id}")
try:
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15'
}
response = requests.get(url, headers=headers, timeout=30)
if response.status_code != 200:
logger.error(f"Failed to fetch tournament page: {response.status_code}")
return None, None, None
html_content = response.text
# Extract using regex (original method)
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
logger.info(f"Extracted umpire data (requests): name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as e:
logger.error(f"Error getting umpire data with requests: {e}")
return None, None, None
def _scrape_single_page(sorting_option, page, start_date, end_date, city, distance,
categories, levels, lat, lng, ages, tournament_types, national_cup):
"""
Helper function to scrape a single page of tournaments
"""
return scrape_fft_all_tournaments(
sorting_option=sorting_option,
page=page,
start_date=start_date,
end_date=end_date,
city=city,
distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup
)
def scrape_fft_all_tournaments_concurrent(sorting_option=None, start_date=None, end_date=None,
city='', distance=15, categories=None, levels=None,
lat=None, lng=None, ages=None, tournament_types=None,
national_cup=False, max_workers=5):
"""
Scrapes all remaining pages of FFT tournaments concurrently (pages 1 to end)
This assumes page 0 was already fetched by the client
"""
logger.info(f"Starting concurrent scraping for remaining tournament pages")
# First, get the first page to determine total results and pages
first_page_result = scrape_fft_all_tournaments(
sorting_option=sorting_option,
page=0,
start_date=start_date,
end_date=end_date,
city=city,
distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup
)
if not first_page_result:
logger.error("Failed to get first page results for pagination info")
return None
total_results = first_page_result.get('total_results', 0)
first_page_tournaments = first_page_result.get('tournaments', [])
results_per_page = len(first_page_tournaments)
logger.info(f"Total results: {total_results}, Results per page: {results_per_page}")
if total_results == 0:
return {'tournaments': [], 'total_results': 0, 'current_count': 0, 'pages_scraped': 0}
# Calculate number of pages needed
if results_per_page > 0:
total_pages = (total_results + results_per_page - 1) // results_per_page
else:
total_pages = 1
logger.info(f"Total pages: {total_pages}")
# If only one page total, return empty since page 0 was already handled
if total_pages <= 1:
return {'tournaments': [], 'total_results': total_results, 'current_count': 0, 'pages_scraped': 0}
# Scrape all remaining pages concurrently (pages 1 to total_pages-1)
all_tournaments = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = []
for page in range(1, total_pages):
future = executor.submit(
_scrape_single_page,
sorting_option, page, start_date, end_date, city, distance,
categories, levels, lat, lng, ages, tournament_types, national_cup
)
futures.append((page, future))
# Collect results as they complete
for page, future in futures:
try:
result = future.result(timeout=60) # 60 second timeout per page
if result and result.get('tournaments'):
tournaments = result.get('tournaments', [])
all_tournaments.extend(tournaments)
logger.info(f"Page {page} completed: {len(tournaments)} tournaments")
else:
logger.warning(f"Page {page} returned no results")
except Exception as e:
logger.error(f"Error processing page {page}: {e}")
logger.info(f"Concurrent scraping completed: {len(all_tournaments)} tournaments from {total_pages-1} remaining pages")
return {
'tournaments': all_tournaments,
'total_results': total_results,
'current_count': len(all_tournaments),
'pages_scraped': total_pages - 1 # Excluding page 0 which was handled separately
}
def _parse_clubs_ajax_response(json_data):
"""
Parse the clubs AJAX response to match Swift FederalClubResponse structure
"""
try:
# Log the raw response structure to understand what we're getting
logger.info(f"Raw clubs response structure: {json_data}")
club_markers = []
total_results = 0
# Try to extract clubs data from different possible response structures
if isinstance(json_data, dict):
# Pattern 1: Direct club_markers array
if 'club_markers' in json_data:
clubs_data = json_data['club_markers']
total_results = json_data.get('nombreResultat', len(clubs_data))
# Pattern 2: Results wrapper
elif 'results' in json_data:
results = json_data['results']
clubs_data = results.get('clubs', results.get('items', results.get('club_markers', [])))
total_results = results.get('nombreResultat', results.get('total', results.get('nb_results', len(clubs_data))))
# Pattern 3: Direct array in response
elif 'data' in json_data:
clubs_data = json_data['data']
total_results = len(clubs_data)
# Pattern 4: Response is the clubs array directly
else:
clubs_data = json_data if isinstance(json_data, list) else []
total_results = len(clubs_data)
elif isinstance(json_data, list):
clubs_data = json_data
total_results = len(clubs_data)
else:
logger.error(f"Unexpected response format: {type(json_data)}")
clubs_data = []
total_results = 0
# Parse each club to match ClubMarker structure
for item in clubs_data:
if isinstance(item, dict):
# Extract pratiques array
pratiques = []
if 'pratiques' in item:
pratiques = item['pratiques']
elif 'practices' in item:
pratiques = item['practices']
else:
# Default to PADEL if not specified
pratiques = ["PADEL"]
# Ensure pratiques are uppercase strings
pratiques = [p.upper() if isinstance(p, str) else str(p).upper() for p in pratiques]
club_marker = {
"nom": item.get('nom', item.get('name', '')),
"clubId": str(item.get('clubId', item.get('id', item.get('code', '')))),
"ville": item.get('ville', item.get('city', '')),
"distance": str(item.get('distance', '0')),
"terrainPratiqueLibelle": item.get('terrainPratiqueLibelle', item.get('courtsInfo', '')),
"pratiques": pratiques,
"lat": float(item.get('lat', item.get('latitude', 0.0))),
"lng": float(item.get('lng', item.get('longitude', 0.0)))
}
club_markers.append(club_marker)
logger.info(f"Successfully parsed {len(club_markers)} club markers from response")
# Return the response in the format expected by Swift FederalClubResponse
return {
"typeRecherche": "clubs",
"nombreResultat": total_results,
"club_markers": club_markers
}
except Exception as e:
logger.error(f"Error parsing clubs AJAX response: {e}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
def scrape_federal_clubs(country=None, city='', latitude=None, longitude=None,
radius=15, max_workers=5):
"""
Scrapes FFT federal clubs by extracting data from the HTML response
"""
logger.info(f"Starting federal clubs scraping for city: {city}, country: {country}")
try:
with sync_playwright() as p:
browser = p.chromium.launch(headless=True)
page_obj = browser.new_page()
page_obj.set_extra_http_headers({
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15"
})
# Clean up city name - remove zip code and extra info
clean_city = city
if city:
import re
clean_city = re.sub(r'[,\s]*\d{5}.*$', '', city).strip()
clean_city = clean_city.rstrip(',').strip()
logger.info(f"Cleaned city name: '{city}' -> '{clean_city}'")
# Build the results URL directly
params = f"ville={clean_city}&pratique=PADEL&distance={int(radius)}&country={country or 'fr'}"
results_url = f"https://tenup.fft.fr/recherche/clubs/resultats?{params}"
logger.info(f"Requesting results URL: {results_url}")
# Navigate to the results page
page_obj.goto(results_url)
# Wait for the page to load
page_obj.wait_for_timeout(3000)
# Check if we're in queue
if "queue-it.net" in page_obj.url.lower():
logger.warning("Hit Queue-It on results page")
browser.close()
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
# Use JavaScript to extract the data directly from the page
extraction_script = """
() => {
try {
// Check if Drupal.settings exists and has the data
if (typeof Drupal !== 'undefined' &&
Drupal.settings &&
Drupal.settings.fft_recherche_club) {
const data = Drupal.settings.fft_recherche_club;
return {
success: true,
typeRecherche: data.typeRecherche || 'club',
total: data.total || 0,
resultat: data.resultat || []
};
}
return {
success: false,
error: 'Drupal.settings.fft_recherche_club not found'
};
} catch (error) {
return {
success: false,
error: error.message
};
}
}
"""
result = page_obj.evaluate(extraction_script)
browser.close()
if result.get('success'):
type_recherche = result.get('typeRecherche', 'club')
total = result.get('total', 0)
resultat = result.get('resultat', [])
logger.info(f"Successfully extracted {total} clubs")
# Convert resultat to club_markers format
club_markers = []
for club in resultat:
club_markers.append({
"nom": club.get('nom', ''),
"clubId": club.get('clubId', ''),
"ville": club.get('ville', ''),
"distance": club.get('distance', ''),
"terrainPratiqueLibelle": club.get('terrainPratiqueLibelle', ''),
"pratiques": club.get('pratiques', []),
"lat": club.get('lat', 0.0),
"lng": club.get('lng', 0.0)
})
return {
"typeRecherche": type_recherche,
"nombreResultat": total,
"club_markers": club_markers
}
else:
logger.error(f"Failed to extract data: {result.get('error')}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
except Exception as e:
logger.error(f"Error in federal clubs scraping: {e}")
logger.error(f"Traceback: {traceback.format_exc()}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}

@ -16,8 +16,7 @@ from .serializers import ClubSerializer, CourtSerializer, DateIntervalSerializer
from tournaments.models import Club, Tournament, CustomUser, Event, Round, GroupStage, Match, TeamScore, TeamRegistration, PlayerRegistration, Court, DateInterval, Purchase, FailedApiCall, Log, DeviceToken, DrawLog, UnregisteredTeam, UnregisteredPlayer, Image from tournaments.models import Club, Tournament, CustomUser, Event, Round, GroupStage, Match, TeamScore, TeamRegistration, PlayerRegistration, Court, DateInterval, Purchase, FailedApiCall, Log, DeviceToken, DrawLog, UnregisteredTeam, UnregisteredPlayer, Image
from .permissions import IsClubOwner from .permissions import IsClubOwner
from .utils import check_version_smaller_than_1_1_12, scrape_fft_club_tournaments, scrape_fft_club_tournaments_all_pages, get_umpire_data, scrape_fft_all_tournaments from .utils import check_version_smaller_than_1_1_12, scrape_fft_club_tournaments, scrape_fft_club_tournaments_all_pages, get_umpire_data, scrape_fft_all_tournaments, scrape_fft_all_tournaments_concurrent, scrape_federal_clubs
from shared.discord import send_discord_log_message from shared.discord import send_discord_log_message
from tournaments.services.payment_service import PaymentService from tournaments.services.payment_service import PaymentService
@ -655,14 +654,9 @@ def get_fft_umpire_data(request, tournament_id):
name, email, phone = get_umpire_data(tournament_id) name, email, phone = get_umpire_data(tournament_id)
return JsonResponse({ return JsonResponse({
'success': True, 'name': name,
'umpire': { 'email': email,
'name': name, 'phone': phone
'email': email,
'phone': phone
},
'japPhoneNumber': phone, # Direct field for updating FederalTournament
'message': 'Umpire data retrieved successfully'
}, status=status.HTTP_200_OK) }, status=status.HTTP_200_OK)
except Exception as e: except Exception as e:
@ -678,7 +672,9 @@ def get_fft_umpire_data(request, tournament_id):
@permission_classes([]) @permission_classes([])
def get_fft_all_tournaments(request): def get_fft_all_tournaments(request):
""" """
API endpoint to get all tournaments with filters API endpoint to get tournaments with smart pagination:
- page=0: Returns first page + metadata about total pages
- page>0: Returns all remaining pages concurrently
""" """
try: try:
if request.method == 'POST': if request.method == 'POST':
@ -692,7 +688,7 @@ def get_fft_all_tournaments(request):
start_date = data.get('start_date') start_date = data.get('start_date')
end_date = data.get('end_date') end_date = data.get('end_date')
city = data.get('city', '') city = data.get('city', '')
distance = float(data.get('distance', 15)) distance = int(data.get('distance', 15))
categories = data.getlist('categories') if hasattr(data, 'getlist') else data.get('categories', []) categories = data.getlist('categories') if hasattr(data, 'getlist') else data.get('categories', [])
levels = data.getlist('levels') if hasattr(data, 'getlist') else data.get('levels', []) levels = data.getlist('levels') if hasattr(data, 'getlist') else data.get('levels', [])
lat = data.get('lat') lat = data.get('lat')
@ -700,40 +696,95 @@ def get_fft_all_tournaments(request):
ages = data.getlist('ages') if hasattr(data, 'getlist') else data.get('ages', []) ages = data.getlist('ages') if hasattr(data, 'getlist') else data.get('ages', [])
tournament_types = data.getlist('types') if hasattr(data, 'getlist') else data.get('types', []) tournament_types = data.getlist('types') if hasattr(data, 'getlist') else data.get('types', [])
national_cup = data.get('national_cup', 'false').lower() == 'true' national_cup = data.get('national_cup', 'false').lower() == 'true'
max_workers = int(data.get('max_workers', 5))
result = scrape_fft_all_tournaments( if page == 0:
sorting_option=sorting_option, # Handle first page individually
page=page, result = scrape_fft_all_tournaments(
start_date=start_date, sorting_option=sorting_option,
end_date=end_date, page=0,
city=city, start_date=start_date,
distance=distance, end_date=end_date,
categories=categories, city=city,
levels=levels, distance=distance,
lat=lat, categories=categories,
lng=lng, levels=levels,
ages=ages, lat=lat,
tournament_types=tournament_types, lng=lng,
national_cup=national_cup ages=ages,
) tournament_types=tournament_types,
national_cup=national_cup
)
if result:
tournaments = result.get('tournaments', [])
total_results = result.get('total_results', 0)
results_per_page = len(tournaments)
# Calculate total pages
if results_per_page > 0:
total_pages = (total_results + results_per_page - 1) // results_per_page
else:
total_pages = 1
return JsonResponse({
'success': True,
'tournaments': tournaments,
'total_results': total_results,
'current_count': len(tournaments),
'page': 0,
'total_pages': total_pages,
'has_more_pages': total_pages > 1,
'message': f'Successfully scraped page 0: {len(tournaments)} tournaments. Total: {total_results} across {total_pages} pages.'
}, status=status.HTTP_200_OK)
else:
return JsonResponse({
'success': False,
'tournaments': [],
'total_results': 0,
'current_count': 0,
'page': 0,
'total_pages': 0,
'has_more_pages': False,
'message': 'Failed to scrape first page'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if result:
return JsonResponse({
'success': True,
'tournaments': result.get('tournaments', []),
'total_results': result.get('total_results', 0),
'current_count': result.get('current_count', 0),
'page': page,
'message': f'Successfully scraped {len(result.get("tournaments", []))} tournaments'
}, status=status.HTTP_200_OK)
else: else:
return JsonResponse({ # Handle all remaining pages concurrently
'success': False, result = scrape_fft_all_tournaments_concurrent(
'tournaments': [], sorting_option=sorting_option,
'total_results': 0, start_date=start_date,
'current_count': 0, end_date=end_date,
'message': 'Failed to scrape all tournaments' city=city,
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup,
max_workers=max_workers
)
if result:
return JsonResponse({
'success': True,
'tournaments': result.get('tournaments', []),
'total_results': result.get('total_results', 0),
'current_count': result.get('current_count', 0),
'pages_scraped': result.get('pages_scraped', 0),
'message': f'Successfully scraped {result.get("pages_scraped", 0)} remaining pages concurrently: {len(result.get("tournaments", []))} tournaments'
}, status=status.HTTP_200_OK)
else:
return JsonResponse({
'success': False,
'tournaments': [],
'total_results': 0,
'current_count': 0,
'pages_scraped': 0,
'message': 'Failed to scrape remaining pages'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e: except Exception as e:
logger.error(f"Error in get_fft_all_tournaments endpoint: {e}") logger.error(f"Error in get_fft_all_tournaments endpoint: {e}")
@ -747,7 +798,7 @@ def get_fft_all_tournaments(request):
@permission_classes([]) @permission_classes([])
def get_fft_federal_clubs(request): def get_fft_federal_clubs(request):
""" """
API endpoint to get federal clubs API endpoint to get federal clubs with filters
""" """
try: try:
if request.method == 'POST': if request.method == 'POST':
@ -755,12 +806,14 @@ def get_fft_federal_clubs(request):
else: else:
data = request.GET data = request.GET
# Extract parameters - matching the Swift query parameters
country = data.get('country', 'fr') country = data.get('country', 'fr')
city = data.get('city', '') city = data.get('city', '')
radius = float(data.get('radius', 15)) radius = float(data.get('radius', 15))
latitude = data.get('latitude') latitude = data.get('lat')
longitude = data.get('longitude') longitude = data.get('lng')
# Convert latitude and longitude to float if provided
if latitude: if latitude:
latitude = float(latitude) latitude = float(latitude)
if longitude: if longitude:
@ -769,108 +822,26 @@ def get_fft_federal_clubs(request):
result = scrape_federal_clubs( result = scrape_federal_clubs(
country=country, country=country,
city=city, city=city,
radius=radius,
latitude=latitude, latitude=latitude,
longitude=longitude longitude=longitude,
radius=radius
) )
if result: if result:
return JsonResponse({ # Return the result directly as JSON (already in correct format)
'success': True, return JsonResponse(result, status=status.HTTP_200_OK)
'clubs': result,
'message': 'Federal clubs retrieved successfully'
}, status=status.HTTP_200_OK)
else: else:
# Return error in expected format
return JsonResponse({ return JsonResponse({
'success': False, "typeRecherche": "clubs",
'clubs': [], "nombreResultat": 0,
'message': 'Failed to retrieve federal clubs' "club_markers": []
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e: except Exception as e:
logger.error(f"Error in get_fft_federal_clubs endpoint: {e}") logger.error(f"Error in get_fft_federal_clubs endpoint: {e}")
return JsonResponse({ return JsonResponse({
'success': False, "typeRecherche": "clubs",
'clubs': [], "nombreResultat": 0,
'message': f'Unexpected error: {str(e)}' "club_markers": []
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET', 'POST'])
@permission_classes([])
def get_fft_club_tournaments_with_umpire_data(request):
"""
Combined endpoint that gets club tournaments and enriches them with umpire data
This matches the complete workflow from your Swift code
"""
try:
if request.method == 'POST':
data = request.data
else:
data = request.GET
club_code = data.get('club_code', '62130180')
club_name = data.get('club_name', 'TENNIS SPORTING CLUB DE CASSIS')
start_date = data.get('start_date')
end_date = data.get('end_date')
include_umpire_data = data.get('include_umpire_data', 'false').lower() == 'true'
# Get all tournaments for the club
result = scrape_fft_club_tournaments_all_pages(
club_code=club_code,
club_name=club_name,
start_date=start_date,
end_date=end_date
)
if not result:
return JsonResponse({
'success': False,
'tournaments': [],
'message': 'Failed to scrape club tournaments'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
tournaments = result.get('tournaments', [])
# Enrich with umpire data if requested
if include_umpire_data:
logger.info(f"Enriching {len(tournaments)} tournaments with umpire data...")
for tournament in tournaments:
try:
tournament_id = tournament.get('id')
if tournament_id:
name, email, phone = get_umpire_data(tournament_id)
tournament['japPhoneNumber'] = phone
# Also update jugeArbitre if we got more data
if name and not tournament.get('jugeArbitre'):
tournament['jugeArbitre'] = {
'nom': name.split(' ')[-1] if name else None,
'prenom': ' '.join(name.split(' ')[:-1]) if name and ' ' in name else name
}
# Small delay to avoid rate limiting
time.sleep(0.5)
except Exception as e:
logger.warning(f"Failed to get umpire data for tournament {tournament_id}: {e}")
continue
return JsonResponse({
'success': True,
'tournaments': tournaments,
'total_results': result.get('total_results', 0),
'current_count': len(tournaments),
'pages_scraped': result.get('pages_scraped', 1),
'umpire_data_included': include_umpire_data,
'message': f'Successfully scraped {len(tournaments)} tournaments' +
(' with umpire data' if include_umpire_data else '')
}, status=status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error in get_fft_club_tournaments_with_umpire_data endpoint: {e}")
return JsonResponse({
'success': False,
'tournaments': [],
'message': f'Unexpected error: {str(e)}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)

Loading…
Cancel
Save