make the retry logic faster
This commit is contained in:
parent
f5d7a5ab5b
commit
2dd0626a3a
1 changed files with 50 additions and 1 deletions
|
|
@ -6,7 +6,7 @@ from typing import Any, Awaitable, Callable, Optional, TypeVar
|
||||||
|
|
||||||
from pydantic import BaseModel
|
from pydantic import BaseModel
|
||||||
|
|
||||||
from ..types.errors import NetworkError
|
from ..types.errors import NetworkError, AuthenticationError, GhostApiError, ValidationError
|
||||||
from .logging import get_logger
|
from .logging import get_logger
|
||||||
|
|
||||||
T = TypeVar("T")
|
T = TypeVar("T")
|
||||||
|
|
@ -22,6 +22,44 @@ class RetryConfig(BaseModel):
|
||||||
jitter: bool = True
|
jitter: bool = True
|
||||||
|
|
||||||
|
|
||||||
|
def _should_retry(exception: Exception) -> bool:
|
||||||
|
"""Determine if an exception should trigger a retry.
|
||||||
|
|
||||||
|
Only retry transient network errors, not client errors or authentication issues.
|
||||||
|
"""
|
||||||
|
# Retry network errors (connection issues, timeouts)
|
||||||
|
if isinstance(exception, NetworkError):
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Don't retry authentication errors - these need manual intervention
|
||||||
|
if isinstance(exception, AuthenticationError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Don't retry validation errors - the request is malformed
|
||||||
|
if isinstance(exception, ValidationError):
|
||||||
|
return False
|
||||||
|
|
||||||
|
# For Ghost API errors, only retry 5xx server errors, not 4xx client errors
|
||||||
|
if isinstance(exception, GhostApiError):
|
||||||
|
# Check if the error context indicates a server error (5xx)
|
||||||
|
if exception.context and "HTTP 5" in exception.context:
|
||||||
|
return True
|
||||||
|
# Check if it's a rate limiting error (429) - should be retried
|
||||||
|
if exception.context and "HTTP 429" in exception.context:
|
||||||
|
return True
|
||||||
|
# All other Ghost API errors (4xx) should not be retried
|
||||||
|
return False
|
||||||
|
|
||||||
|
# For unknown exceptions, be conservative and retry (could be network issues)
|
||||||
|
# but log a warning so we can identify what should/shouldn't be retried
|
||||||
|
logger.warning(
|
||||||
|
"Unknown exception type encountered in retry logic",
|
||||||
|
exception_type=type(exception).__name__,
|
||||||
|
exception=str(exception)
|
||||||
|
)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
async def with_retry(
|
async def with_retry(
|
||||||
operation: Callable[[], Awaitable[T]],
|
operation: Callable[[], Awaitable[T]],
|
||||||
config: Optional[RetryConfig] = None,
|
config: Optional[RetryConfig] = None,
|
||||||
|
|
@ -39,6 +77,17 @@ async def with_retry(
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_exception = e
|
last_exception = e
|
||||||
|
|
||||||
|
# Check if this exception should trigger a retry
|
||||||
|
if not _should_retry(e):
|
||||||
|
logger.debug(
|
||||||
|
"Exception not suitable for retry, failing immediately",
|
||||||
|
attempt=attempt,
|
||||||
|
exception_type=type(e).__name__,
|
||||||
|
error=str(e),
|
||||||
|
request_id=request_id,
|
||||||
|
)
|
||||||
|
break
|
||||||
|
|
||||||
if attempt == config.max_retries:
|
if attempt == config.max_retries:
|
||||||
logger.error(
|
logger.error(
|
||||||
"Operation failed after all retries",
|
"Operation failed after all retries",
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue