Laurent 4 months ago
commit 16a233f977
  1. 1
      api/urls.py
  2. 479
      api/utils.py
  3. 189
      api/views.py
  4. 24
      tournaments/models/match.py
  5. 57
      tournaments/models/tournament.py
  6. 20
      tournaments/templates/tournaments/broadcast/broadcasted_planning.html

@ -49,7 +49,6 @@ urlpatterns = [
path('config/tournament/', views.get_tournament_config, name='tournament-config'),
path('config/payment/', views.get_payment_config, name='payment-config'),
path('fft/club-tournaments/', views.get_fft_club_tournaments, name='get-fft-club-tournaments'),
path('fft/club-tournaments-complete/', views.get_fft_club_tournaments_with_umpire_data, name='get-fft-club-tournaments-complete'),
path('fft/all-tournaments/', views.get_fft_all_tournaments, name='get-fft-all-tournaments'),
path('fft/umpire/<str:tournament_id>/', views.get_fft_umpire_data, name='get-fft-umpire-data'),
path('fft/federal-clubs/', views.get_fft_federal_clubs, name='get-fft-federal-clubs'),

@ -1,4 +1,3 @@
import time
import logging
import requests
import re
@ -6,6 +5,7 @@ from playwright.sync_api import sync_playwright
from datetime import datetime, timedelta
import json
import traceback
from concurrent.futures import ThreadPoolExecutor, as_completed
logger = logging.getLogger(__name__)
@ -212,49 +212,6 @@ def scrape_fft_club_tournaments_all_pages(club_code, club_name, start_date=None,
'pages_scraped': page + 1
}
def get_umpire_data(tournament_id):
"""
Scrapes umpire data for a specific tournament
"""
logger.info(f"Getting umpire data for tournament {tournament_id}")
try:
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15'
}
response = requests.get(url, headers=headers, timeout=30)
if response.status_code != 200:
logger.error(f"Failed to fetch tournament page: {response.status_code}")
return None, None, None
html_content = response.text
# Extract name
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
# Extract email
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
# Extract phone
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
logger.info(f"Extracted umpire data: name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as e:
logger.error(f"Error getting umpire data: {e}")
return None, None, None
def _parse_ajax_response(commands):
"""
Parse the AJAX response commands to extract tournament data
@ -555,3 +512,437 @@ def scrape_fft_all_tournaments(sorting_option=None, page=0, start_date=None, end
logger.error(f"Error in Playwright scraping: {e}")
logger.error(f"Traceback: {traceback.format_exc()}")
return None
def get_umpire_data(tournament_id):
"""
Fast umpire data extraction using Playwright (optimized for speed)
"""
logger.info(f"Getting umpire data for tournament {tournament_id}")
try:
with sync_playwright() as p:
browser = p.chromium.launch(
headless=True,
args=[
'--no-sandbox',
'--disable-dev-shm-usage',
'--disable-images', # Don't load images
'--disable-javascript', # Disable JS for faster loading
'--disable-plugins',
'--disable-extensions'
]
)
page = browser.new_page()
# Navigate to tournament page quickly
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
logger.info(f"Navigating to tournament page: {url}")
try:
# Fast navigation - don't wait for everything to load
page.goto(url, timeout=15000, wait_until="domcontentloaded")
# Quick Queue-It check
if "queue-it.net" in page.url.lower():
logger.warning("Hit Queue-It on tournament page")
browser.close()
return None, None, None
# Extract data using the fastest method - regex on HTML content
html_content = page.content()
# Extract name
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
# Extract email
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
# Extract phone
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
browser.close()
logger.info(f"Extracted umpire data: name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as page_error:
logger.error(f"Error loading tournament page: {page_error}")
browser.close()
return None, None, None
except Exception as e:
logger.error(f"Error in umpire data extraction: {e}")
return None, None, None
def _get_umpire_data_requests_fallback(tournament_id):
"""
Fallback method using requests (may hit Queue-It)
"""
logger.info(f"Using requests fallback for tournament {tournament_id}")
try:
url = f"https://tenup.fft.fr/tournoi/{tournament_id}"
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15'
}
response = requests.get(url, headers=headers, timeout=30)
if response.status_code != 200:
logger.error(f"Failed to fetch tournament page: {response.status_code}")
return None, None, None
html_content = response.text
# Extract using regex (original method)
name_pattern = r'tournoi-detail-page-inscription-responsable-title">\s*([^<]+)\s*<'
name_match = re.search(name_pattern, html_content)
name = name_match.group(1).strip() if name_match else None
email_pattern = r'mailto:([^"]+)"'
email_match = re.search(email_pattern, html_content)
email = email_match.group(1) if email_match else None
phone_pattern = r'<div class="details-bloc">\s*(\d{2}\s+\d{2}\s+\d{2}\s+\d{2}\s+\d{2})\s*</div>'
phone_match = re.search(phone_pattern, html_content)
phone = phone_match.group(1).strip() if phone_match else None
logger.info(f"Extracted umpire data (requests): name={name}, email={email}, phone={phone}")
return name, email, phone
except Exception as e:
logger.error(f"Error getting umpire data with requests: {e}")
return None, None, None
def _scrape_single_page(sorting_option, page, start_date, end_date, city, distance,
categories, levels, lat, lng, ages, tournament_types, national_cup):
"""
Helper function to scrape a single page of tournaments
"""
return scrape_fft_all_tournaments(
sorting_option=sorting_option,
page=page,
start_date=start_date,
end_date=end_date,
city=city,
distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup
)
def scrape_fft_all_tournaments_concurrent(sorting_option=None, start_date=None, end_date=None,
city='', distance=15, categories=None, levels=None,
lat=None, lng=None, ages=None, tournament_types=None,
national_cup=False, max_workers=5):
"""
Scrapes all remaining pages of FFT tournaments concurrently (pages 1 to end)
This assumes page 0 was already fetched by the client
"""
logger.info(f"Starting concurrent scraping for remaining tournament pages")
# First, get the first page to determine total results and pages
first_page_result = scrape_fft_all_tournaments(
sorting_option=sorting_option,
page=0,
start_date=start_date,
end_date=end_date,
city=city,
distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup
)
if not first_page_result:
logger.error("Failed to get first page results for pagination info")
return None
total_results = first_page_result.get('total_results', 0)
first_page_tournaments = first_page_result.get('tournaments', [])
results_per_page = len(first_page_tournaments)
logger.info(f"Total results: {total_results}, Results per page: {results_per_page}")
if total_results == 0:
return {'tournaments': [], 'total_results': 0, 'current_count': 0, 'pages_scraped': 0}
# Calculate number of pages needed
if results_per_page > 0:
total_pages = (total_results + results_per_page - 1) // results_per_page
else:
total_pages = 1
logger.info(f"Total pages: {total_pages}")
# If only one page total, return empty since page 0 was already handled
if total_pages <= 1:
return {'tournaments': [], 'total_results': total_results, 'current_count': 0, 'pages_scraped': 0}
# Scrape all remaining pages concurrently (pages 1 to total_pages-1)
all_tournaments = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = []
for page in range(1, total_pages):
future = executor.submit(
_scrape_single_page,
sorting_option, page, start_date, end_date, city, distance,
categories, levels, lat, lng, ages, tournament_types, national_cup
)
futures.append((page, future))
# Collect results as they complete
for page, future in futures:
try:
result = future.result(timeout=60) # 60 second timeout per page
if result and result.get('tournaments'):
tournaments = result.get('tournaments', [])
all_tournaments.extend(tournaments)
logger.info(f"Page {page} completed: {len(tournaments)} tournaments")
else:
logger.warning(f"Page {page} returned no results")
except Exception as e:
logger.error(f"Error processing page {page}: {e}")
logger.info(f"Concurrent scraping completed: {len(all_tournaments)} tournaments from {total_pages-1} remaining pages")
return {
'tournaments': all_tournaments,
'total_results': total_results,
'current_count': len(all_tournaments),
'pages_scraped': total_pages - 1 # Excluding page 0 which was handled separately
}
def _parse_clubs_ajax_response(json_data):
"""
Parse the clubs AJAX response to match Swift FederalClubResponse structure
"""
try:
# Log the raw response structure to understand what we're getting
logger.info(f"Raw clubs response structure: {json_data}")
club_markers = []
total_results = 0
# Try to extract clubs data from different possible response structures
if isinstance(json_data, dict):
# Pattern 1: Direct club_markers array
if 'club_markers' in json_data:
clubs_data = json_data['club_markers']
total_results = json_data.get('nombreResultat', len(clubs_data))
# Pattern 2: Results wrapper
elif 'results' in json_data:
results = json_data['results']
clubs_data = results.get('clubs', results.get('items', results.get('club_markers', [])))
total_results = results.get('nombreResultat', results.get('total', results.get('nb_results', len(clubs_data))))
# Pattern 3: Direct array in response
elif 'data' in json_data:
clubs_data = json_data['data']
total_results = len(clubs_data)
# Pattern 4: Response is the clubs array directly
else:
clubs_data = json_data if isinstance(json_data, list) else []
total_results = len(clubs_data)
elif isinstance(json_data, list):
clubs_data = json_data
total_results = len(clubs_data)
else:
logger.error(f"Unexpected response format: {type(json_data)}")
clubs_data = []
total_results = 0
# Parse each club to match ClubMarker structure
for item in clubs_data:
if isinstance(item, dict):
# Extract pratiques array
pratiques = []
if 'pratiques' in item:
pratiques = item['pratiques']
elif 'practices' in item:
pratiques = item['practices']
else:
# Default to PADEL if not specified
pratiques = ["PADEL"]
# Ensure pratiques are uppercase strings
pratiques = [p.upper() if isinstance(p, str) else str(p).upper() for p in pratiques]
club_marker = {
"nom": item.get('nom', item.get('name', '')),
"clubId": str(item.get('clubId', item.get('id', item.get('code', '')))),
"ville": item.get('ville', item.get('city', '')),
"distance": str(item.get('distance', '0')),
"terrainPratiqueLibelle": item.get('terrainPratiqueLibelle', item.get('courtsInfo', '')),
"pratiques": pratiques,
"lat": float(item.get('lat', item.get('latitude', 0.0))),
"lng": float(item.get('lng', item.get('longitude', 0.0)))
}
club_markers.append(club_marker)
logger.info(f"Successfully parsed {len(club_markers)} club markers from response")
# Return the response in the format expected by Swift FederalClubResponse
return {
"typeRecherche": "clubs",
"nombreResultat": total_results,
"club_markers": club_markers
}
except Exception as e:
logger.error(f"Error parsing clubs AJAX response: {e}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
def scrape_federal_clubs(country=None, city='', latitude=None, longitude=None,
radius=15, max_workers=5):
"""
Scrapes FFT federal clubs by extracting data from the HTML response
"""
logger.info(f"Starting federal clubs scraping for city: {city}, country: {country}")
try:
with sync_playwright() as p:
browser = p.chromium.launch(headless=True)
page_obj = browser.new_page()
page_obj.set_extra_http_headers({
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/18.5 Safari/605.1.15"
})
# Clean up city name - remove zip code and extra info
clean_city = city
if city:
import re
clean_city = re.sub(r'[,\s]*\d{5}.*$', '', city).strip()
clean_city = clean_city.rstrip(',').strip()
logger.info(f"Cleaned city name: '{city}' -> '{clean_city}'")
# Build the results URL directly
params = f"ville={clean_city}&pratique=PADEL&distance={int(radius)}&country={country or 'fr'}"
results_url = f"https://tenup.fft.fr/recherche/clubs/resultats?{params}"
logger.info(f"Requesting results URL: {results_url}")
# Navigate to the results page
page_obj.goto(results_url)
# Wait for the page to load
page_obj.wait_for_timeout(3000)
# Check if we're in queue
if "queue-it.net" in page_obj.url.lower():
logger.warning("Hit Queue-It on results page")
browser.close()
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
# Use JavaScript to extract the data directly from the page
extraction_script = """
() => {
try {
// Check if Drupal.settings exists and has the data
if (typeof Drupal !== 'undefined' &&
Drupal.settings &&
Drupal.settings.fft_recherche_club) {
const data = Drupal.settings.fft_recherche_club;
return {
success: true,
typeRecherche: data.typeRecherche || 'club',
total: data.total || 0,
resultat: data.resultat || []
};
}
return {
success: false,
error: 'Drupal.settings.fft_recherche_club not found'
};
} catch (error) {
return {
success: false,
error: error.message
};
}
}
"""
result = page_obj.evaluate(extraction_script)
browser.close()
if result.get('success'):
type_recherche = result.get('typeRecherche', 'club')
total = result.get('total', 0)
resultat = result.get('resultat', [])
logger.info(f"Successfully extracted {total} clubs")
# Convert resultat to club_markers format
club_markers = []
for club in resultat:
club_markers.append({
"nom": club.get('nom', ''),
"clubId": club.get('clubId', ''),
"ville": club.get('ville', ''),
"distance": club.get('distance', ''),
"terrainPratiqueLibelle": club.get('terrainPratiqueLibelle', ''),
"pratiques": club.get('pratiques', []),
"lat": club.get('lat', 0.0),
"lng": club.get('lng', 0.0)
})
return {
"typeRecherche": type_recherche,
"nombreResultat": total,
"club_markers": club_markers
}
else:
logger.error(f"Failed to extract data: {result.get('error')}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}
except Exception as e:
logger.error(f"Error in federal clubs scraping: {e}")
logger.error(f"Traceback: {traceback.format_exc()}")
return {
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}

@ -28,8 +28,7 @@ from django.http import Http404
from django.db.models import Q
from .permissions import IsClubOwner
from .utils import check_version_smaller_than_1_1_12, scrape_fft_club_tournaments, scrape_fft_club_tournaments_all_pages, get_umpire_data, scrape_fft_all_tournaments
from .utils import check_version_smaller_than_1_1_12, scrape_fft_club_tournaments, scrape_fft_club_tournaments_all_pages, get_umpire_data, scrape_fft_all_tournaments, scrape_fft_all_tournaments_concurrent, scrape_federal_clubs
from shared.discord import send_discord_log_message
from tournaments.services.payment_service import PaymentService
@ -667,14 +666,9 @@ def get_fft_umpire_data(request, tournament_id):
name, email, phone = get_umpire_data(tournament_id)
return JsonResponse({
'success': True,
'umpire': {
'name': name,
'email': email,
'phone': phone
},
'japPhoneNumber': phone, # Direct field for updating FederalTournament
'message': 'Umpire data retrieved successfully'
}, status=status.HTTP_200_OK)
except Exception as e:
@ -690,7 +684,9 @@ def get_fft_umpire_data(request, tournament_id):
@permission_classes([])
def get_fft_all_tournaments(request):
"""
API endpoint to get all tournaments with filters
API endpoint to get tournaments with smart pagination:
- page=0: Returns first page + metadata about total pages
- page>0: Returns all remaining pages concurrently
"""
try:
if request.method == 'POST':
@ -704,7 +700,7 @@ def get_fft_all_tournaments(request):
start_date = data.get('start_date')
end_date = data.get('end_date')
city = data.get('city', '')
distance = float(data.get('distance', 15))
distance = int(data.get('distance', 15))
categories = data.getlist('categories') if hasattr(data, 'getlist') else data.get('categories', [])
levels = data.getlist('levels') if hasattr(data, 'getlist') else data.get('levels', [])
lat = data.get('lat')
@ -712,10 +708,13 @@ def get_fft_all_tournaments(request):
ages = data.getlist('ages') if hasattr(data, 'getlist') else data.get('ages', [])
tournament_types = data.getlist('types') if hasattr(data, 'getlist') else data.get('types', [])
national_cup = data.get('national_cup', 'false').lower() == 'true'
max_workers = int(data.get('max_workers', 5))
if page == 0:
# Handle first page individually
result = scrape_fft_all_tournaments(
sorting_option=sorting_option,
page=page,
page=0,
start_date=start_date,
end_date=end_date,
city=city,
@ -729,14 +728,65 @@ def get_fft_all_tournaments(request):
national_cup=national_cup
)
if result:
tournaments = result.get('tournaments', [])
total_results = result.get('total_results', 0)
results_per_page = len(tournaments)
# Calculate total pages
if results_per_page > 0:
total_pages = (total_results + results_per_page - 1) // results_per_page
else:
total_pages = 1
return JsonResponse({
'success': True,
'tournaments': tournaments,
'total_results': total_results,
'current_count': len(tournaments),
'page': 0,
'total_pages': total_pages,
'has_more_pages': total_pages > 1,
'message': f'Successfully scraped page 0: {len(tournaments)} tournaments. Total: {total_results} across {total_pages} pages.'
}, status=status.HTTP_200_OK)
else:
return JsonResponse({
'success': False,
'tournaments': [],
'total_results': 0,
'current_count': 0,
'page': 0,
'total_pages': 0,
'has_more_pages': False,
'message': 'Failed to scrape first page'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
# Handle all remaining pages concurrently
result = scrape_fft_all_tournaments_concurrent(
sorting_option=sorting_option,
start_date=start_date,
end_date=end_date,
city=city,
distance=distance,
categories=categories,
levels=levels,
lat=lat,
lng=lng,
ages=ages,
tournament_types=tournament_types,
national_cup=national_cup,
max_workers=max_workers
)
if result:
return JsonResponse({
'success': True,
'tournaments': result.get('tournaments', []),
'total_results': result.get('total_results', 0),
'current_count': result.get('current_count', 0),
'page': page,
'message': f'Successfully scraped {len(result.get("tournaments", []))} tournaments'
'pages_scraped': result.get('pages_scraped', 0),
'message': f'Successfully scraped {result.get("pages_scraped", 0)} remaining pages concurrently: {len(result.get("tournaments", []))} tournaments'
}, status=status.HTTP_200_OK)
else:
return JsonResponse({
@ -744,7 +794,8 @@ def get_fft_all_tournaments(request):
'tournaments': [],
'total_results': 0,
'current_count': 0,
'message': 'Failed to scrape all tournaments'
'pages_scraped': 0,
'message': 'Failed to scrape remaining pages'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
@ -759,7 +810,7 @@ def get_fft_all_tournaments(request):
@permission_classes([])
def get_fft_federal_clubs(request):
"""
API endpoint to get federal clubs
API endpoint to get federal clubs with filters
"""
try:
if request.method == 'POST':
@ -767,12 +818,14 @@ def get_fft_federal_clubs(request):
else:
data = request.GET
# Extract parameters - matching the Swift query parameters
country = data.get('country', 'fr')
city = data.get('city', '')
radius = float(data.get('radius', 15))
latitude = data.get('latitude')
longitude = data.get('longitude')
latitude = data.get('lat')
longitude = data.get('lng')
# Convert latitude and longitude to float if provided
if latitude:
latitude = float(latitude)
if longitude:
@ -781,110 +834,28 @@ def get_fft_federal_clubs(request):
result = scrape_federal_clubs(
country=country,
city=city,
radius=radius,
latitude=latitude,
longitude=longitude
longitude=longitude,
radius=radius
)
if result:
return JsonResponse({
'success': True,
'clubs': result,
'message': 'Federal clubs retrieved successfully'
}, status=status.HTTP_200_OK)
# Return the result directly as JSON (already in correct format)
return JsonResponse(result, status=status.HTTP_200_OK)
else:
# Return error in expected format
return JsonResponse({
'success': False,
'clubs': [],
'message': 'Failed to retrieve federal clubs'
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
except Exception as e:
logger.error(f"Error in get_fft_federal_clubs endpoint: {e}")
return JsonResponse({
'success': False,
'clubs': [],
'message': f'Unexpected error: {str(e)}'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
@api_view(['GET', 'POST'])
@permission_classes([])
def get_fft_club_tournaments_with_umpire_data(request):
"""
Combined endpoint that gets club tournaments and enriches them with umpire data
This matches the complete workflow from your Swift code
"""
try:
if request.method == 'POST':
data = request.data
else:
data = request.GET
club_code = data.get('club_code', '62130180')
club_name = data.get('club_name', 'TENNIS SPORTING CLUB DE CASSIS')
start_date = data.get('start_date')
end_date = data.get('end_date')
include_umpire_data = data.get('include_umpire_data', 'false').lower() == 'true'
# Get all tournaments for the club
result = scrape_fft_club_tournaments_all_pages(
club_code=club_code,
club_name=club_name,
start_date=start_date,
end_date=end_date
)
if not result:
return JsonResponse({
'success': False,
'tournaments': [],
'message': 'Failed to scrape club tournaments'
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
tournaments = result.get('tournaments', [])
# Enrich with umpire data if requested
if include_umpire_data:
logger.info(f"Enriching {len(tournaments)} tournaments with umpire data...")
for tournament in tournaments:
try:
tournament_id = tournament.get('id')
if tournament_id:
name, email, phone = get_umpire_data(tournament_id)
tournament['japPhoneNumber'] = phone
# Also update jugeArbitre if we got more data
if name and not tournament.get('jugeArbitre'):
tournament['jugeArbitre'] = {
'nom': name.split(' ')[-1] if name else None,
'prenom': ' '.join(name.split(' ')[:-1]) if name and ' ' in name else name
}
# Small delay to avoid rate limiting
time.sleep(0.5)
except Exception as e:
logger.warning(f"Failed to get umpire data for tournament {tournament_id}: {e}")
continue
return JsonResponse({
'success': True,
'tournaments': tournaments,
'total_results': result.get('total_results', 0),
'current_count': len(tournaments),
'pages_scraped': result.get('pages_scraped', 1),
'umpire_data_included': include_umpire_data,
'message': f'Successfully scraped {len(tournaments)} tournaments' +
(' with umpire data' if include_umpire_data else '')
}, status=status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error in get_fft_club_tournaments_with_umpire_data endpoint: {e}")
return JsonResponse({
'success': False,
'tournaments': [],
'message': f'Unexpected error: {str(e)}'
"typeRecherche": "clubs",
"nombreResultat": 0,
"club_markers": []
}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
### bizdev

@ -307,12 +307,22 @@ class Match(TournamentSubModel):
return teams
def local_start_date(self):
timezone = self.get_tournament().timezone()
tournament = self.get_tournament()
if tournament is None:
return self.start_date
if self.start_date is not None:
timezone = tournament.timezone()
return self.start_date.astimezone(timezone)
return None
def local_planned_start_date(self):
timezone = self.get_tournament().timezone()
tournament = self.get_tournament()
if tournament is None:
return self.start_date
if self.planned_start_date is not None:
timezone = tournament.timezone()
return self.planned_start_date.astimezone(timezone)
return None
def formatted_start_date(self):
if self.start_date:
@ -335,10 +345,14 @@ class Match(TournamentSubModel):
return 'À suivre'
else:
# timezoned_datetime = timezone.localtime(self.start_date)
timezone = self.get_tournament().timezone()
local_start = self.start_date.astimezone(timezone)
tournament = self.get_tournament()
day_duration = 3
if tournament:
day_duration = tournament.day_duration
local_start = self.local_start_date()
time_format ='l H:i'
if self.get_tournament().day_duration >= 7:
if day_duration >= 7:
time_format = 'D. d F à H:i'
if self.confirmed:
return formats.date_format(local_start, format=time_format)

@ -1953,21 +1953,26 @@ class Tournament(BaseModel):
- days: List of unique days found (datetime.date objects)
- match_groups: Dictionary of match groups by date and hour or just for the selected day
"""
event = self.event
tournaments_count = 1
if event:
tournaments_count = event.tournaments.count()
if event_mode is True and self.event.tournaments.count() == 1:
if event_mode is True and tournaments_count == 1:
event_mode = False
if self.event.tournaments.count() == 1:
show_teams_in_prog = False
if tournaments_count == 1:
show_teams_in_prog = self.show_teams_in_prog
else:
show_teams_in_prog = self.event.tournaments.filter(show_teams_in_prog=True).first() is not None
elif event:
show_teams_in_prog = event.tournaments.filter(show_teams_in_prog=True).first() is not None
# Get all matches from rounds and group stages - use a set to avoid duplicates
all_matches = set()
tournaments = [self]
if event_mode is True:
tournaments = self.event.tournaments.all()
if event_mode is True and event:
tournaments = event.tournaments.all()
# Check if all tournaments have started - if so, always show teams
all_started = True
@ -2016,6 +2021,44 @@ class Tournament(BaseModel):
sorted_days = sorted(list(days))
# Create match groups for the selected day
match_groups = []
hide_teams = show_teams_in_prog == False
# When broadcast=True, handle all days with matches
if broadcast:
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0).date()
sorted_days = [day for day in sorted(list(days)) if day >= today]
# Process all days with matches
for selected_day in sorted_days:
# Group matches by hour
matches_by_hour = {}
for match in matches_by_day[selected_day]:
local_time = timezone.localtime(match.planned_start_date)
hour_key = local_time.strftime('%H:%M')
if hour_key not in matches_by_hour:
matches_by_hour[hour_key] = []
matches_by_hour[hour_key].append(match)
# Create match groups for each hour
for hour, matches in sorted(matches_by_hour.items()):
# Sort matches by court if available
matches.sort(key=lambda m: (m.court_index if m.court_index is not None else 999))
local_date = matches[0].local_planned_start_date()
formatted_name = formats.date_format(local_date, format='l j F à H:i').capitalize()
mg = self.create_match_group(
name=formatted_name,
matches=matches,
round_id=None,
round_index=None,
hide_teams=hide_teams,
event_mode=event_mode,
broadcast=broadcast
)
match_groups.append(mg)
return sorted_days, match_groups
if all or day is None:
today = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0).date()
@ -2039,7 +2082,6 @@ class Tournament(BaseModel):
matches_by_hour[hour_key].append(match)
hide_teams = show_teams_in_prog == False
# Create match groups for each hour
for hour, matches in sorted(matches_by_hour.items()):
# Sort matches by court if available
@ -2084,7 +2126,6 @@ class Tournament(BaseModel):
matches_by_hour[hour_key].append(match)
hide_teams = show_teams_in_prog == False
# Create match groups for each hour
for hour, matches in sorted(matches_by_hour.items()):
# Sort matches by court if available

@ -194,6 +194,15 @@
// Rearrange groups in vertical order
if (columns === 2) {
if (this.courtCount < 5 && pageGroups.length < groupsPerPageThreshold / 2) {
const pageLength = pageGroups.length;
const rearrangedGroups = new Array(pageGroups.length);
for (let col = 0; col < pageLength; col++) {
rearrangedGroups[col * 2] = pageGroups[col]; // First column: indices 0, 2, 4, etc.
}
paginatedGroups.push(rearrangedGroups);
} else {
const rearrangedGroups = [];
const halfPageLength = Math.ceil(pageGroups.length / 2);
@ -205,6 +214,7 @@
}
}
paginatedGroups.push(rearrangedGroups);
}
} else {
paginatedGroups.push(pageGroups);
}
@ -217,7 +227,11 @@
return courtIndex;
},
organizeMatchesByCourt(matches) {
organizeMatchesByCourt(group) {
if (!group || !group.matches) {
return Array(this.courtCount).fill(null);
}
const matches = group.matches;
const courtMatches = Array(this.courtCount).fill(null);
if (matches && matches.length > 0) {
matches.forEach(match => {
@ -326,11 +340,11 @@
<div class="cell" :class="{'large-12': courtCount >= 5, 'large-6': courtCount < 5}">
<div style="display: flex; margin-bottom: 10px;">
<div class="bubble-timeslot" style="align-items: center; justify-content: center; margin-right: 10px; width: 6vw;">
<h1 class="timeslot" x-text="group.name.slice(-5)"></h1>
<h1 class="timeslot" x-text="group && group.name ? group.name.slice(-5) : ''"></h1>
</div>
<div class="matches-row">
<template x-for="(match, courtIndex) in organizeMatchesByCourt(group.matches)" :key="courtIndex">
<template x-for="(match, courtIndex) in organizeMatchesByCourt(group)" :key="courtIndex">
<div class="match-cell" :style="{'width': calculateFractionWidth()}">
<template x-if="match">
<div class="bubble" :class="{'running': !match.ended && match.started, 'even': courtIndex % 2 === 1, 'ended': match.ended}" style="text-align: center;">

Loading…
Cancel
Save