No puede seleccionar más de 25 temas Los temas deben comenzar con una letra o número, pueden incluir guiones ('-') y pueden tener hasta 35 caracteres de largo.
 
 
 
 

966 líneas
29 KiB

  1. import io
  2. import os
  3. import random
  4. import re
  5. import sys
  6. import threading
  7. import time
  8. import warnings
  9. import zlib
  10. from abc import ABC, abstractmethod
  11. from contextlib import contextmanager
  12. from datetime import datetime, timezone
  13. from functools import wraps, partial
  14. import sentry_sdk
  15. from sentry_sdk.utils import (
  16. ContextVar,
  17. now,
  18. nanosecond_time,
  19. to_timestamp,
  20. serialize_frame,
  21. json_dumps,
  22. )
  23. from sentry_sdk.envelope import Envelope, Item
  24. from sentry_sdk.tracing import TransactionSource
  25. from typing import TYPE_CHECKING
  26. if TYPE_CHECKING:
  27. from typing import Any
  28. from typing import Callable
  29. from typing import Dict
  30. from typing import Generator
  31. from typing import Iterable
  32. from typing import List
  33. from typing import Optional
  34. from typing import Set
  35. from typing import Tuple
  36. from typing import Union
  37. from sentry_sdk._types import BucketKey
  38. from sentry_sdk._types import DurationUnit
  39. from sentry_sdk._types import FlushedMetricValue
  40. from sentry_sdk._types import MeasurementUnit
  41. from sentry_sdk._types import MetricMetaKey
  42. from sentry_sdk._types import MetricTagValue
  43. from sentry_sdk._types import MetricTags
  44. from sentry_sdk._types import MetricTagsInternal
  45. from sentry_sdk._types import MetricType
  46. from sentry_sdk._types import MetricValue
  47. warnings.warn(
  48. "The sentry_sdk.metrics module is deprecated and will be removed in the next major release. "
  49. "Sentry will reject all metrics sent after October 7, 2024. "
  50. "Learn more: https://sentry.zendesk.com/hc/en-us/articles/26369339769883-Upcoming-API-Changes-to-Metrics",
  51. DeprecationWarning,
  52. stacklevel=2,
  53. )
  54. _in_metrics = ContextVar("in_metrics", default=False)
  55. _set = set # set is shadowed below
  56. GOOD_TRANSACTION_SOURCES = frozenset(
  57. [
  58. TransactionSource.ROUTE,
  59. TransactionSource.VIEW,
  60. TransactionSource.COMPONENT,
  61. TransactionSource.TASK,
  62. ]
  63. )
  64. _sanitize_unit = partial(re.compile(r"[^a-zA-Z0-9_]+").sub, "")
  65. _sanitize_metric_key = partial(re.compile(r"[^a-zA-Z0-9_\-.]+").sub, "_")
  66. _sanitize_tag_key = partial(re.compile(r"[^a-zA-Z0-9_\-.\/]+").sub, "")
  67. def _sanitize_tag_value(value):
  68. # type: (str) -> str
  69. table = str.maketrans(
  70. {
  71. "\n": "\\n",
  72. "\r": "\\r",
  73. "\t": "\\t",
  74. "\\": "\\\\",
  75. "|": "\\u{7c}",
  76. ",": "\\u{2c}",
  77. }
  78. )
  79. return value.translate(table)
  80. def get_code_location(stacklevel):
  81. # type: (int) -> Optional[Dict[str, Any]]
  82. try:
  83. frm = sys._getframe(stacklevel)
  84. except Exception:
  85. return None
  86. return serialize_frame(
  87. frm, include_local_variables=False, include_source_context=True
  88. )
  89. @contextmanager
  90. def recursion_protection():
  91. # type: () -> Generator[bool, None, None]
  92. """Enters recursion protection and returns the old flag."""
  93. old_in_metrics = _in_metrics.get()
  94. _in_metrics.set(True)
  95. try:
  96. yield old_in_metrics
  97. finally:
  98. _in_metrics.set(old_in_metrics)
  99. def metrics_noop(func):
  100. # type: (Any) -> Any
  101. """Convenient decorator that uses `recursion_protection` to
  102. make a function a noop.
  103. """
  104. @wraps(func)
  105. def new_func(*args, **kwargs):
  106. # type: (*Any, **Any) -> Any
  107. with recursion_protection() as in_metrics:
  108. if not in_metrics:
  109. return func(*args, **kwargs)
  110. return new_func
  111. class Metric(ABC):
  112. __slots__ = ()
  113. @abstractmethod
  114. def __init__(self, first):
  115. # type: (MetricValue) -> None
  116. pass
  117. @property
  118. @abstractmethod
  119. def weight(self):
  120. # type: () -> int
  121. pass
  122. @abstractmethod
  123. def add(self, value):
  124. # type: (MetricValue) -> None
  125. pass
  126. @abstractmethod
  127. def serialize_value(self):
  128. # type: () -> Iterable[FlushedMetricValue]
  129. pass
  130. class CounterMetric(Metric):
  131. __slots__ = ("value",)
  132. def __init__(
  133. self, first # type: MetricValue
  134. ):
  135. # type: (...) -> None
  136. self.value = float(first)
  137. @property
  138. def weight(self):
  139. # type: (...) -> int
  140. return 1
  141. def add(
  142. self, value # type: MetricValue
  143. ):
  144. # type: (...) -> None
  145. self.value += float(value)
  146. def serialize_value(self):
  147. # type: (...) -> Iterable[FlushedMetricValue]
  148. return (self.value,)
  149. class GaugeMetric(Metric):
  150. __slots__ = (
  151. "last",
  152. "min",
  153. "max",
  154. "sum",
  155. "count",
  156. )
  157. def __init__(
  158. self, first # type: MetricValue
  159. ):
  160. # type: (...) -> None
  161. first = float(first)
  162. self.last = first
  163. self.min = first
  164. self.max = first
  165. self.sum = first
  166. self.count = 1
  167. @property
  168. def weight(self):
  169. # type: (...) -> int
  170. # Number of elements.
  171. return 5
  172. def add(
  173. self, value # type: MetricValue
  174. ):
  175. # type: (...) -> None
  176. value = float(value)
  177. self.last = value
  178. self.min = min(self.min, value)
  179. self.max = max(self.max, value)
  180. self.sum += value
  181. self.count += 1
  182. def serialize_value(self):
  183. # type: (...) -> Iterable[FlushedMetricValue]
  184. return (
  185. self.last,
  186. self.min,
  187. self.max,
  188. self.sum,
  189. self.count,
  190. )
  191. class DistributionMetric(Metric):
  192. __slots__ = ("value",)
  193. def __init__(
  194. self, first # type: MetricValue
  195. ):
  196. # type(...) -> None
  197. self.value = [float(first)]
  198. @property
  199. def weight(self):
  200. # type: (...) -> int
  201. return len(self.value)
  202. def add(
  203. self, value # type: MetricValue
  204. ):
  205. # type: (...) -> None
  206. self.value.append(float(value))
  207. def serialize_value(self):
  208. # type: (...) -> Iterable[FlushedMetricValue]
  209. return self.value
  210. class SetMetric(Metric):
  211. __slots__ = ("value",)
  212. def __init__(
  213. self, first # type: MetricValue
  214. ):
  215. # type: (...) -> None
  216. self.value = {first}
  217. @property
  218. def weight(self):
  219. # type: (...) -> int
  220. return len(self.value)
  221. def add(
  222. self, value # type: MetricValue
  223. ):
  224. # type: (...) -> None
  225. self.value.add(value)
  226. def serialize_value(self):
  227. # type: (...) -> Iterable[FlushedMetricValue]
  228. def _hash(x):
  229. # type: (MetricValue) -> int
  230. if isinstance(x, str):
  231. return zlib.crc32(x.encode("utf-8")) & 0xFFFFFFFF
  232. return int(x)
  233. return (_hash(value) for value in self.value)
  234. def _encode_metrics(flushable_buckets):
  235. # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]]) -> bytes
  236. out = io.BytesIO()
  237. _write = out.write
  238. # Note on sanitization: we intentionally sanitize in emission (serialization)
  239. # and not during aggregation for performance reasons. This means that the
  240. # envelope can in fact have duplicate buckets stored. This is acceptable for
  241. # relay side emission and should not happen commonly.
  242. for timestamp, buckets in flushable_buckets:
  243. for bucket_key, metric in buckets.items():
  244. metric_type, metric_name, metric_unit, metric_tags = bucket_key
  245. metric_name = _sanitize_metric_key(metric_name)
  246. metric_unit = _sanitize_unit(metric_unit)
  247. _write(metric_name.encode("utf-8"))
  248. _write(b"@")
  249. _write(metric_unit.encode("utf-8"))
  250. for serialized_value in metric.serialize_value():
  251. _write(b":")
  252. _write(str(serialized_value).encode("utf-8"))
  253. _write(b"|")
  254. _write(metric_type.encode("ascii"))
  255. if metric_tags:
  256. _write(b"|#")
  257. first = True
  258. for tag_key, tag_value in metric_tags:
  259. tag_key = _sanitize_tag_key(tag_key)
  260. if not tag_key:
  261. continue
  262. if first:
  263. first = False
  264. else:
  265. _write(b",")
  266. _write(tag_key.encode("utf-8"))
  267. _write(b":")
  268. _write(_sanitize_tag_value(tag_value).encode("utf-8"))
  269. _write(b"|T")
  270. _write(str(timestamp).encode("ascii"))
  271. _write(b"\n")
  272. return out.getvalue()
  273. def _encode_locations(timestamp, code_locations):
  274. # type: (int, Iterable[Tuple[MetricMetaKey, Dict[str, Any]]]) -> bytes
  275. mapping = {} # type: Dict[str, List[Any]]
  276. for key, loc in code_locations:
  277. metric_type, name, unit = key
  278. mri = "{}:{}@{}".format(
  279. metric_type, _sanitize_metric_key(name), _sanitize_unit(unit)
  280. )
  281. loc["type"] = "location"
  282. mapping.setdefault(mri, []).append(loc)
  283. return json_dumps({"timestamp": timestamp, "mapping": mapping})
  284. METRIC_TYPES = {
  285. "c": CounterMetric,
  286. "g": GaugeMetric,
  287. "d": DistributionMetric,
  288. "s": SetMetric,
  289. } # type: dict[MetricType, type[Metric]]
  290. # some of these are dumb
  291. TIMING_FUNCTIONS = {
  292. "nanosecond": nanosecond_time,
  293. "microsecond": lambda: nanosecond_time() / 1000.0,
  294. "millisecond": lambda: nanosecond_time() / 1000000.0,
  295. "second": now,
  296. "minute": lambda: now() / 60.0,
  297. "hour": lambda: now() / 3600.0,
  298. "day": lambda: now() / 3600.0 / 24.0,
  299. "week": lambda: now() / 3600.0 / 24.0 / 7.0,
  300. }
  301. class LocalAggregator:
  302. __slots__ = ("_measurements",)
  303. def __init__(self):
  304. # type: (...) -> None
  305. self._measurements = (
  306. {}
  307. ) # type: Dict[Tuple[str, MetricTagsInternal], Tuple[float, float, int, float]]
  308. def add(
  309. self,
  310. ty, # type: MetricType
  311. key, # type: str
  312. value, # type: float
  313. unit, # type: MeasurementUnit
  314. tags, # type: MetricTagsInternal
  315. ):
  316. # type: (...) -> None
  317. export_key = "%s:%s@%s" % (ty, key, unit)
  318. bucket_key = (export_key, tags)
  319. old = self._measurements.get(bucket_key)
  320. if old is not None:
  321. v_min, v_max, v_count, v_sum = old
  322. v_min = min(v_min, value)
  323. v_max = max(v_max, value)
  324. v_count += 1
  325. v_sum += value
  326. else:
  327. v_min = v_max = v_sum = value
  328. v_count = 1
  329. self._measurements[bucket_key] = (v_min, v_max, v_count, v_sum)
  330. def to_json(self):
  331. # type: (...) -> Dict[str, Any]
  332. rv = {} # type: Any
  333. for (export_key, tags), (
  334. v_min,
  335. v_max,
  336. v_count,
  337. v_sum,
  338. ) in self._measurements.items():
  339. rv.setdefault(export_key, []).append(
  340. {
  341. "tags": _tags_to_dict(tags),
  342. "min": v_min,
  343. "max": v_max,
  344. "count": v_count,
  345. "sum": v_sum,
  346. }
  347. )
  348. return rv
  349. class MetricsAggregator:
  350. ROLLUP_IN_SECONDS = 10.0
  351. MAX_WEIGHT = 100000
  352. FLUSHER_SLEEP_TIME = 5.0
  353. def __init__(
  354. self,
  355. capture_func, # type: Callable[[Envelope], None]
  356. enable_code_locations=False, # type: bool
  357. ):
  358. # type: (...) -> None
  359. self.buckets = {} # type: Dict[int, Any]
  360. self._enable_code_locations = enable_code_locations
  361. self._seen_locations = _set() # type: Set[Tuple[int, MetricMetaKey]]
  362. self._pending_locations = {} # type: Dict[int, List[Tuple[MetricMetaKey, Any]]]
  363. self._buckets_total_weight = 0
  364. self._capture_func = capture_func
  365. self._running = True
  366. self._lock = threading.Lock()
  367. self._flush_event = threading.Event() # type: threading.Event
  368. self._force_flush = False
  369. # The aggregator shifts its flushing by up to an entire rollup window to
  370. # avoid multiple clients trampling on end of a 10 second window as all the
  371. # buckets are anchored to multiples of ROLLUP seconds. We randomize this
  372. # number once per aggregator boot to achieve some level of offsetting
  373. # across a fleet of deployed SDKs. Relay itself will also apply independent
  374. # jittering.
  375. self._flush_shift = random.random() * self.ROLLUP_IN_SECONDS
  376. self._flusher = None # type: Optional[threading.Thread]
  377. self._flusher_pid = None # type: Optional[int]
  378. def _ensure_thread(self):
  379. # type: (...) -> bool
  380. """For forking processes we might need to restart this thread.
  381. This ensures that our process actually has that thread running.
  382. """
  383. if not self._running:
  384. return False
  385. pid = os.getpid()
  386. if self._flusher_pid == pid:
  387. return True
  388. with self._lock:
  389. # Recheck to make sure another thread didn't get here and start the
  390. # the flusher in the meantime
  391. if self._flusher_pid == pid:
  392. return True
  393. self._flusher_pid = pid
  394. self._flusher = threading.Thread(target=self._flush_loop)
  395. self._flusher.daemon = True
  396. try:
  397. self._flusher.start()
  398. except RuntimeError:
  399. # Unfortunately at this point the interpreter is in a state that no
  400. # longer allows us to spawn a thread and we have to bail.
  401. self._running = False
  402. return False
  403. return True
  404. def _flush_loop(self):
  405. # type: (...) -> None
  406. _in_metrics.set(True)
  407. while self._running or self._force_flush:
  408. if self._running:
  409. self._flush_event.wait(self.FLUSHER_SLEEP_TIME)
  410. self._flush()
  411. def _flush(self):
  412. # type: (...) -> None
  413. self._emit(self._flushable_buckets(), self._flushable_locations())
  414. def _flushable_buckets(self):
  415. # type: (...) -> (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
  416. with self._lock:
  417. force_flush = self._force_flush
  418. cutoff = time.time() - self.ROLLUP_IN_SECONDS - self._flush_shift
  419. flushable_buckets = () # type: Iterable[Tuple[int, Dict[BucketKey, Metric]]]
  420. weight_to_remove = 0
  421. if force_flush:
  422. flushable_buckets = self.buckets.items()
  423. self.buckets = {}
  424. self._buckets_total_weight = 0
  425. self._force_flush = False
  426. else:
  427. flushable_buckets = []
  428. for buckets_timestamp, buckets in self.buckets.items():
  429. # If the timestamp of the bucket is newer that the rollup we want to skip it.
  430. if buckets_timestamp <= cutoff:
  431. flushable_buckets.append((buckets_timestamp, buckets))
  432. # We will clear the elements while holding the lock, in order to avoid requesting it downstream again.
  433. for buckets_timestamp, buckets in flushable_buckets:
  434. for metric in buckets.values():
  435. weight_to_remove += metric.weight
  436. del self.buckets[buckets_timestamp]
  437. self._buckets_total_weight -= weight_to_remove
  438. return flushable_buckets
  439. def _flushable_locations(self):
  440. # type: (...) -> Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
  441. with self._lock:
  442. locations = self._pending_locations
  443. self._pending_locations = {}
  444. return locations
  445. @metrics_noop
  446. def add(
  447. self,
  448. ty, # type: MetricType
  449. key, # type: str
  450. value, # type: MetricValue
  451. unit, # type: MeasurementUnit
  452. tags, # type: Optional[MetricTags]
  453. timestamp=None, # type: Optional[Union[float, datetime]]
  454. local_aggregator=None, # type: Optional[LocalAggregator]
  455. stacklevel=0, # type: Optional[int]
  456. ):
  457. # type: (...) -> None
  458. if not self._ensure_thread() or self._flusher is None:
  459. return None
  460. if timestamp is None:
  461. timestamp = time.time()
  462. elif isinstance(timestamp, datetime):
  463. timestamp = to_timestamp(timestamp)
  464. bucket_timestamp = int(
  465. (timestamp // self.ROLLUP_IN_SECONDS) * self.ROLLUP_IN_SECONDS
  466. )
  467. serialized_tags = _serialize_tags(tags)
  468. bucket_key = (
  469. ty,
  470. key,
  471. unit,
  472. serialized_tags,
  473. )
  474. with self._lock:
  475. local_buckets = self.buckets.setdefault(bucket_timestamp, {})
  476. metric = local_buckets.get(bucket_key)
  477. if metric is not None:
  478. previous_weight = metric.weight
  479. metric.add(value)
  480. else:
  481. metric = local_buckets[bucket_key] = METRIC_TYPES[ty](value)
  482. previous_weight = 0
  483. added = metric.weight - previous_weight
  484. if stacklevel is not None:
  485. self.record_code_location(ty, key, unit, stacklevel + 2, timestamp)
  486. # Given the new weight we consider whether we want to force flush.
  487. self._consider_force_flush()
  488. # For sets, we only record that a value has been added to the set but not which one.
  489. # See develop docs: https://develop.sentry.dev/sdk/metrics/#sets
  490. if local_aggregator is not None:
  491. local_value = float(added if ty == "s" else value)
  492. local_aggregator.add(ty, key, local_value, unit, serialized_tags)
  493. def record_code_location(
  494. self,
  495. ty, # type: MetricType
  496. key, # type: str
  497. unit, # type: MeasurementUnit
  498. stacklevel, # type: int
  499. timestamp=None, # type: Optional[float]
  500. ):
  501. # type: (...) -> None
  502. if not self._enable_code_locations:
  503. return
  504. if timestamp is None:
  505. timestamp = time.time()
  506. meta_key = (ty, key, unit)
  507. start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
  508. hour=0, minute=0, second=0, microsecond=0, tzinfo=None
  509. )
  510. start_of_day = int(to_timestamp(start_of_day))
  511. if (start_of_day, meta_key) not in self._seen_locations:
  512. self._seen_locations.add((start_of_day, meta_key))
  513. loc = get_code_location(stacklevel + 3)
  514. if loc is not None:
  515. # Group metadata by day to make flushing more efficient.
  516. # There needs to be one envelope item per timestamp.
  517. self._pending_locations.setdefault(start_of_day, []).append(
  518. (meta_key, loc)
  519. )
  520. @metrics_noop
  521. def need_code_location(
  522. self,
  523. ty, # type: MetricType
  524. key, # type: str
  525. unit, # type: MeasurementUnit
  526. timestamp, # type: float
  527. ):
  528. # type: (...) -> bool
  529. if self._enable_code_locations:
  530. return False
  531. meta_key = (ty, key, unit)
  532. start_of_day = datetime.fromtimestamp(timestamp, timezone.utc).replace(
  533. hour=0, minute=0, second=0, microsecond=0, tzinfo=None
  534. )
  535. start_of_day = int(to_timestamp(start_of_day))
  536. return (start_of_day, meta_key) not in self._seen_locations
  537. def kill(self):
  538. # type: (...) -> None
  539. if self._flusher is None:
  540. return
  541. self._running = False
  542. self._flush_event.set()
  543. self._flusher = None
  544. @metrics_noop
  545. def flush(self):
  546. # type: (...) -> None
  547. self._force_flush = True
  548. self._flush()
  549. def _consider_force_flush(self):
  550. # type: (...) -> None
  551. # It's important to acquire a lock around this method, since it will touch shared data structures.
  552. total_weight = len(self.buckets) + self._buckets_total_weight
  553. if total_weight >= self.MAX_WEIGHT:
  554. self._force_flush = True
  555. self._flush_event.set()
  556. def _emit(
  557. self,
  558. flushable_buckets, # type: (Iterable[Tuple[int, Dict[BucketKey, Metric]]])
  559. code_locations, # type: Dict[int, List[Tuple[MetricMetaKey, Dict[str, Any]]]]
  560. ):
  561. # type: (...) -> Optional[Envelope]
  562. envelope = Envelope()
  563. if flushable_buckets:
  564. encoded_metrics = _encode_metrics(flushable_buckets)
  565. envelope.add_item(Item(payload=encoded_metrics, type="statsd"))
  566. for timestamp, locations in code_locations.items():
  567. encoded_locations = _encode_locations(timestamp, locations)
  568. envelope.add_item(Item(payload=encoded_locations, type="metric_meta"))
  569. if envelope.items:
  570. self._capture_func(envelope)
  571. return envelope
  572. return None
  573. def _serialize_tags(
  574. tags, # type: Optional[MetricTags]
  575. ):
  576. # type: (...) -> MetricTagsInternal
  577. if not tags:
  578. return ()
  579. rv = []
  580. for key, value in tags.items():
  581. # If the value is a collection, we want to flatten it.
  582. if isinstance(value, (list, tuple)):
  583. for inner_value in value:
  584. if inner_value is not None:
  585. rv.append((key, str(inner_value)))
  586. elif value is not None:
  587. rv.append((key, str(value)))
  588. # It's very important to sort the tags in order to obtain the
  589. # same bucket key.
  590. return tuple(sorted(rv))
  591. def _tags_to_dict(tags):
  592. # type: (MetricTagsInternal) -> Dict[str, Any]
  593. rv = {} # type: Dict[str, Any]
  594. for tag_name, tag_value in tags:
  595. old_value = rv.get(tag_name)
  596. if old_value is not None:
  597. if isinstance(old_value, list):
  598. old_value.append(tag_value)
  599. else:
  600. rv[tag_name] = [old_value, tag_value]
  601. else:
  602. rv[tag_name] = tag_value
  603. return rv
  604. def _get_aggregator():
  605. # type: () -> Optional[MetricsAggregator]
  606. client = sentry_sdk.get_client()
  607. return (
  608. client.metrics_aggregator
  609. if client.is_active() and client.metrics_aggregator is not None
  610. else None
  611. )
  612. def _get_aggregator_and_update_tags(key, value, unit, tags):
  613. # type: (str, Optional[MetricValue], MeasurementUnit, Optional[MetricTags]) -> Tuple[Optional[MetricsAggregator], Optional[LocalAggregator], Optional[MetricTags]]
  614. client = sentry_sdk.get_client()
  615. if not client.is_active() or client.metrics_aggregator is None:
  616. return None, None, tags
  617. updated_tags = dict(tags or ()) # type: Dict[str, MetricTagValue]
  618. updated_tags.setdefault("release", client.options["release"])
  619. updated_tags.setdefault("environment", client.options["environment"])
  620. scope = sentry_sdk.get_current_scope()
  621. local_aggregator = None
  622. # We go with the low-level API here to access transaction information as
  623. # this one is the same between just errors and errors + performance
  624. transaction_source = scope._transaction_info.get("source")
  625. if transaction_source in GOOD_TRANSACTION_SOURCES:
  626. transaction_name = scope._transaction
  627. if transaction_name:
  628. updated_tags.setdefault("transaction", transaction_name)
  629. if scope._span is not None:
  630. local_aggregator = scope._span._get_local_aggregator()
  631. experiments = client.options.get("_experiments", {})
  632. before_emit_callback = experiments.get("before_emit_metric")
  633. if before_emit_callback is not None:
  634. with recursion_protection() as in_metrics:
  635. if not in_metrics:
  636. if not before_emit_callback(key, value, unit, updated_tags):
  637. return None, None, updated_tags
  638. return client.metrics_aggregator, local_aggregator, updated_tags
  639. def increment(
  640. key, # type: str
  641. value=1.0, # type: float
  642. unit="none", # type: MeasurementUnit
  643. tags=None, # type: Optional[MetricTags]
  644. timestamp=None, # type: Optional[Union[float, datetime]]
  645. stacklevel=0, # type: int
  646. ):
  647. # type: (...) -> None
  648. """Increments a counter."""
  649. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  650. key, value, unit, tags
  651. )
  652. if aggregator is not None:
  653. aggregator.add(
  654. "c", key, value, unit, tags, timestamp, local_aggregator, stacklevel
  655. )
  656. # alias as incr is relatively common in python
  657. incr = increment
  658. class _Timing:
  659. def __init__(
  660. self,
  661. key, # type: str
  662. tags, # type: Optional[MetricTags]
  663. timestamp, # type: Optional[Union[float, datetime]]
  664. value, # type: Optional[float]
  665. unit, # type: DurationUnit
  666. stacklevel, # type: int
  667. ):
  668. # type: (...) -> None
  669. self.key = key
  670. self.tags = tags
  671. self.timestamp = timestamp
  672. self.value = value
  673. self.unit = unit
  674. self.entered = None # type: Optional[float]
  675. self._span = None # type: Optional[sentry_sdk.tracing.Span]
  676. self.stacklevel = stacklevel
  677. def _validate_invocation(self, context):
  678. # type: (str) -> None
  679. if self.value is not None:
  680. raise TypeError(
  681. "cannot use timing as %s when a value is provided" % context
  682. )
  683. def __enter__(self):
  684. # type: (...) -> _Timing
  685. self.entered = TIMING_FUNCTIONS[self.unit]()
  686. self._validate_invocation("context-manager")
  687. self._span = sentry_sdk.start_span(op="metric.timing", name=self.key)
  688. if self.tags:
  689. for key, value in self.tags.items():
  690. if isinstance(value, (tuple, list)):
  691. value = ",".join(sorted(map(str, value)))
  692. self._span.set_tag(key, value)
  693. self._span.__enter__()
  694. # report code locations here for better accuracy
  695. aggregator = _get_aggregator()
  696. if aggregator is not None:
  697. aggregator.record_code_location("d", self.key, self.unit, self.stacklevel)
  698. return self
  699. def __exit__(self, exc_type, exc_value, tb):
  700. # type: (Any, Any, Any) -> None
  701. assert self._span, "did not enter"
  702. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  703. self.key,
  704. self.value,
  705. self.unit,
  706. self.tags,
  707. )
  708. if aggregator is not None:
  709. elapsed = TIMING_FUNCTIONS[self.unit]() - self.entered # type: ignore
  710. aggregator.add(
  711. "d",
  712. self.key,
  713. elapsed,
  714. self.unit,
  715. tags,
  716. self.timestamp,
  717. local_aggregator,
  718. None, # code locations are reported in __enter__
  719. )
  720. self._span.__exit__(exc_type, exc_value, tb)
  721. self._span = None
  722. def __call__(self, f):
  723. # type: (Any) -> Any
  724. self._validate_invocation("decorator")
  725. @wraps(f)
  726. def timed_func(*args, **kwargs):
  727. # type: (*Any, **Any) -> Any
  728. with timing(
  729. key=self.key,
  730. tags=self.tags,
  731. timestamp=self.timestamp,
  732. unit=self.unit,
  733. stacklevel=self.stacklevel + 1,
  734. ):
  735. return f(*args, **kwargs)
  736. return timed_func
  737. def timing(
  738. key, # type: str
  739. value=None, # type: Optional[float]
  740. unit="second", # type: DurationUnit
  741. tags=None, # type: Optional[MetricTags]
  742. timestamp=None, # type: Optional[Union[float, datetime]]
  743. stacklevel=0, # type: int
  744. ):
  745. # type: (...) -> _Timing
  746. """Emits a distribution with the time it takes to run the given code block.
  747. This method supports three forms of invocation:
  748. - when a `value` is provided, it functions similar to `distribution` but with
  749. - it can be used as a context manager
  750. - it can be used as a decorator
  751. """
  752. if value is not None:
  753. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  754. key, value, unit, tags
  755. )
  756. if aggregator is not None:
  757. aggregator.add(
  758. "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
  759. )
  760. return _Timing(key, tags, timestamp, value, unit, stacklevel)
  761. def distribution(
  762. key, # type: str
  763. value, # type: float
  764. unit="none", # type: MeasurementUnit
  765. tags=None, # type: Optional[MetricTags]
  766. timestamp=None, # type: Optional[Union[float, datetime]]
  767. stacklevel=0, # type: int
  768. ):
  769. # type: (...) -> None
  770. """Emits a distribution."""
  771. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  772. key, value, unit, tags
  773. )
  774. if aggregator is not None:
  775. aggregator.add(
  776. "d", key, value, unit, tags, timestamp, local_aggregator, stacklevel
  777. )
  778. def set(
  779. key, # type: str
  780. value, # type: Union[int, str]
  781. unit="none", # type: MeasurementUnit
  782. tags=None, # type: Optional[MetricTags]
  783. timestamp=None, # type: Optional[Union[float, datetime]]
  784. stacklevel=0, # type: int
  785. ):
  786. # type: (...) -> None
  787. """Emits a set."""
  788. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  789. key, value, unit, tags
  790. )
  791. if aggregator is not None:
  792. aggregator.add(
  793. "s", key, value, unit, tags, timestamp, local_aggregator, stacklevel
  794. )
  795. def gauge(
  796. key, # type: str
  797. value, # type: float
  798. unit="none", # type: MeasurementUnit
  799. tags=None, # type: Optional[MetricTags]
  800. timestamp=None, # type: Optional[Union[float, datetime]]
  801. stacklevel=0, # type: int
  802. ):
  803. # type: (...) -> None
  804. """Emits a gauge."""
  805. aggregator, local_aggregator, tags = _get_aggregator_and_update_tags(
  806. key, value, unit, tags
  807. )
  808. if aggregator is not None:
  809. aggregator.add(
  810. "g", key, value, unit, tags, timestamp, local_aggregator, stacklevel
  811. )