Compare commits

..

1 Commits

Author SHA1 Message Date
Nathaniel May
d342165f6d add exception level 2021-10-29 15:08:07 -04:00
15 changed files with 72 additions and 633 deletions

View File

@@ -2,12 +2,7 @@ import re
import os.path
from dbt.clients.system import run_cmd, rmdir
from dbt.events.functions import fire_event
from dbt.events.types import (
GitSparseCheckoutSubdirectory, GitProgressCheckoutRevision,
GitProgressUpdatingExistingDependency, GitProgressPullingNewDependency,
GitNothingToDo, GitProgressUpdatedCheckoutRange, GitProgressCheckedOutAt
)
from dbt.logger import GLOBAL_LOGGER as logger
import dbt.exceptions
from packaging import version
@@ -23,7 +18,7 @@ def clone(repo, cwd, dirname=None, remove_git_dir=False, revision=None, subdirec
clone_cmd = ['git', 'clone', '--depth', '1']
if subdirectory:
fire_event(GitSparseCheckoutSubdirectory(subdir=subdirectory))
logger.debug(' Subdirectory specified: {}, using sparse checkout.'.format(subdirectory))
out, _ = run_cmd(cwd, ['git', '--version'], env={'LC_ALL': 'C'})
git_version = version.parse(re.search(r"\d+\.\d+\.\d+", out.decode("utf-8")).group(0))
if not git_version >= version.parse("2.25.0"):
@@ -59,7 +54,7 @@ def list_tags(cwd):
def _checkout(cwd, repo, revision):
fire_event(GitProgressCheckoutRevision(revision=revision))
logger.debug(' Checking out revision {}.'.format(revision))
fetch_cmd = ["git", "fetch", "origin", "--depth", "1"]
@@ -123,7 +118,7 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
start_sha = None
if exists:
directory = exists.group(1)
fire_event(GitProgressUpdatingExistingDependency(dir=directory))
logger.debug('Updating existing dependency {}.', directory)
else:
matches = re.match("Cloning into '(.+)'", err.decode('utf-8'))
if matches is None:
@@ -131,18 +126,17 @@ def clone_and_checkout(repo, cwd, dirname=None, remove_git_dir=False,
f'Error cloning {repo} - never saw "Cloning into ..." from git'
)
directory = matches.group(1)
fire_event(GitProgressPullingNewDependency(dir=directory))
logger.debug('Pulling new dependency {}.', directory)
full_path = os.path.join(cwd, directory)
start_sha = get_current_sha(full_path)
checkout(full_path, repo, revision)
end_sha = get_current_sha(full_path)
if exists:
if start_sha == end_sha:
fire_event(GitNothingToDo(sha=start_sha[:7]))
logger.debug(' Already at {}, nothing to do.', start_sha[:7])
else:
fire_event(GitProgressUpdatedCheckoutRange(
start_sha=start_sha[:7], end_sha=end_sha[:7]
))
logger.debug(' Updated checkout from {} to {}.',
start_sha[:7], end_sha[:7])
else:
fire_event(GitProgressCheckedOutAt(end_sha=end_sha[:7]))
logger.debug(' Checked out at {}.', end_sha[:7])
return os.path.join(directory, subdirectory or '')

View File

@@ -33,6 +33,7 @@ from dbt.exceptions import (
UndefinedMacroException
)
from dbt import flags
from dbt.logger import GLOBAL_LOGGER as logger # noqa
def _linecache_inject(source, write):

View File

@@ -1,11 +1,7 @@
import functools
import requests
from dbt.events.functions import fire_event
from dbt.events.types import (
RegistryProgressMakingGETRequest,
RegistryProgressGETResponse
)
from dbt.utils import memoized, _connection_exception_retry as connection_exception_retry
from dbt.logger import GLOBAL_LOGGER as logger
from dbt import deprecations
import os
@@ -29,9 +25,10 @@ def _get_with_retries(path, registry_base_url=None):
def _get(path, registry_base_url=None):
url = _get_url(path, registry_base_url)
fire_event(RegistryProgressMakingGETRequest(url=url))
logger.debug('Making package registry request: GET {}'.format(url))
resp = requests.get(url, timeout=30)
fire_event(RegistryProgressGETResponse(url=url, resp_code=resp.status_code))
logger.debug('Response from registry: GET {} {}'.format(url,
resp.status_code))
resp.raise_for_status()
return resp.json()

View File

@@ -15,12 +15,8 @@ from typing import (
Type, NoReturn, List, Optional, Dict, Any, Tuple, Callable, Union
)
from dbt.events.functions import fire_event
from dbt.events.types import (
SystemErrorRetrievingModTime, SystemCouldNotWrite, SystemExecutingCmd, SystemStdOutMsg,
SystemStdErrMsg, SystemReportReturnCode
)
import dbt.exceptions
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.utils import _connection_exception_retry as connection_exception_retry
if sys.platform == 'win32':
@@ -69,7 +65,9 @@ def find_matching(
try:
modification_time = os.path.getmtime(absolute_path)
except OSError:
fire_event(SystemErrorRetrievingModTime(path=absolute_path))
logger.exception(
f"Error retrieving modification time for file {absolute_path}"
)
if reobj.match(local_file):
matching.append({
'searched_path': relative_path_to_search,
@@ -163,7 +161,10 @@ def write_file(path: str, contents: str = '') -> bool:
reason = 'Path was possibly too long'
# all our hard work and the path was still too long. Log and
# continue.
fire_event(SystemCouldNotWrite(path=path, reason=reason, exc=exc))
logger.debug(
f'Could not write to path {path}({len(path)} characters): '
f'{reason}\nexception: {exc}'
)
else:
raise
return True
@@ -411,7 +412,7 @@ def _interpret_oserror(exc: OSError, cwd: str, cmd: List[str]) -> NoReturn:
def run_cmd(
cwd: str, cmd: List[str], env: Optional[Dict[str, Any]] = None
) -> Tuple[bytes, bytes]:
fire_event(SystemExecutingCmd(cmd=cmd))
logger.debug('Executing "{}"'.format(' '.join(cmd)))
if len(cmd) == 0:
raise dbt.exceptions.CommandError(cwd, cmd)
@@ -437,11 +438,11 @@ def run_cmd(
except OSError as exc:
_interpret_oserror(exc, cwd, cmd)
fire_event(SystemStdOutMsg(bmsg=out))
fire_event(SystemStdErrMsg(bmsg=err))
logger.debug('STDOUT: "{!s}"'.format(out))
logger.debug('STDERR: "{!s}"'.format(err))
if proc.returncode != 0:
fire_event(SystemReportReturnCode(code=proc.returncode))
logger.debug('command return code={}'.format(proc.returncode))
raise dbt.exceptions.CommandResultError(cwd, cmd, proc.returncode,
out, err)

View File

@@ -7,16 +7,3 @@ The event module provides types that represent what is happening in dbt in `even
# Adding a New Event
In `events.types` add a new class that represents the new event. This may be a simple class with no values, or it may be a dataclass with some values to construct downstream messaging. Only include the data necessary to construct this message within this class. You must extend all destinations (e.g. - if your log message belongs on the cli, extend `CliEventABC`) as well as the loglevel this event belongs to.
# Adapter Maintainers
To integrate existing log messages from adapters, you likely have a line of code like this in your adapter already:
```python
from dbt.logger import GLOBAL_LOGGER as logger
```
Simply change it to these two lines with your adapter's database name, and all your existing call sites will now use the new system for v1.0:
```python
from dbt.events import AdapterLogger
logger = AdapterLogger("<database name>")
# e.g. AdapterLogger("Snowflake")
```

View File

@@ -1 +0,0 @@
from .adapter_endpoint import AdapterLogger # noqa: F401

View File

@@ -1,86 +0,0 @@
from dataclasses import dataclass
from dbt.events.functions import fire_event
from dbt.events.types import (
AdapterEventDebug, AdapterEventInfo, AdapterEventWarning, AdapterEventError
)
from typing import Any
@dataclass
class AdapterLogger():
name: str
def debug(
self,
msg: str,
exc_info: Any = None,
stack_info: Any = None,
extra: Any = None
):
event = AdapterEventDebug(name=self.name, raw_msg=msg)
event.exc_info = exc_info
event.stack_info = stack_info
event.extra = extra
fire_event(event)
def info(
self,
msg: str,
exc_info: Any = None,
stack_info: Any = None,
extra: Any = None
):
event = AdapterEventInfo(name=self.name, raw_msg=msg)
event.exc_info = exc_info
event.stack_info = stack_info
event.extra = extra
fire_event(event)
def warning(
self,
msg: str,
exc_info: Any = None,
stack_info: Any = None,
extra: Any = None
):
event = AdapterEventWarning(name=self.name, raw_msg=msg)
event.exc_info = exc_info
event.stack_info = stack_info
event.extra = extra
fire_event(event)
def error(
self,
msg: str,
exc_info: Any = None,
stack_info: Any = None,
extra: Any = None
):
event = AdapterEventError(name=self.name, raw_msg=msg)
event.exc_info = exc_info
event.stack_info = stack_info
event.extra = extra
fire_event(event)
def exception(
self,
msg: str,
exc_info: Any = True, # this default is what makes this method different
stack_info: Any = None,
extra: Any = None
):
event = AdapterEventError(name=self.name, raw_msg=msg)
event.exc_info = exc_info
event.stack_info = stack_info
event.extra = extra
fire_event(event)

View File

@@ -1,162 +1,7 @@
import dbt.logger as logger # type: ignore # TODO eventually remove dependency on this logger
from dbt.events.history import EVENT_HISTORY
from dbt.events.types import CliEventABC, Event, ShowException
import dbt.logger as logger # TODO remove references to this logger
import dbt.flags as flags
import logging.config
import logging
import os
import structlog
import sys
# these two loggers be set up with CLI inputs via setup_event_logger
# DO NOT IMPORT AND USE THESE DIRECTLY
global STDOUT_LOGGER
STDOUT_LOGGER = structlog.get_logger()
global FILE_LOGGER
FILE_LOGGER = structlog.get_logger()
def setup_event_logger(log_path):
logger.make_log_dir_if_missing(log_path)
json: bool = flags.LOG_FORMAT == 'json'
# USE_COLORS can be None if the app just started and the cli flags
# havent been applied yet
colors: bool = True if flags.USE_COLORS else False
# TODO this default should live somewhere better
log_dest = os.path.join(logger.LOG_DIR, 'dbt.log')
# see: https://docs.python.org/3/library/logging.config.html#logging-config-dictschema
# logging.config.dictConfig({
# "version": 1,
# "disable_existing_loggers": False,
# "formatters": {
# "plain": {
# "()": structlog.stdlib.ProcessorFormatter,
# "processor": structlog.dev.ConsoleRenderer(colors=False),
# "foreign_pre_chain": pre_chain,
# },
# "colored": {
# "()": structlog.stdlib.ProcessorFormatter,
# "processor": structlog.dev.ConsoleRenderer(colors=True),
# "foreign_pre_chain": pre_chain,
# },
# "json": {
# "()": structlog.stdlib.ProcessorFormatter,
# "processor": structlog.processors.JSONRenderer(),
# "foreign_pre_chain": pre_chain,
# },
# },
# "handlers": {
# "console": {
# "level": "DEBUG",
# "class": "logging.StreamHandler",
# "formatter": "colored",
# },
# "file": {
# "level": "DEBUG",
# "class": "logging.handlers.WatchedFileHandler",
# # TODO this default should live somewhere better
# "filename": os.path.join(logger.LOG_DIR, 'dbt.log'),
# "formatter": "plain",
# },
# "json-console": {
# "level": "DEBUG",
# "class": "logging.StreamHandler",
# "formatter": "json",
# },
# "json-file": {
# "level": "DEBUG",
# "class": "logging.handlers.WatchedFileHandler",
# # TODO this default should live somewhere better
# "filename": os.path.join(logger.LOG_DIR, 'dbt.log.json'),
# "formatter": "json",
# },
# },
# "loggers": {
# "": {
# "handlers": ["json-console", "json-file"] if json else ["console", "file"],
# "level": "DEBUG" if flags.DEBUG else "INFO",
# "propagate": True,
# },
# }
# })
# set-up global logging configurations
structlog.configure(
wrapper_class=structlog.stdlib.BoundLogger,
logger_factory=structlog.stdlib.LoggerFactory(),
cache_logger_on_first_use=False,
)
# configure the stdout logger
STDOUT_LOGGER = structlog.wrap_logger(
logger=logging.Logger('console logger'),
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper("%H:%M:%S"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
)
formatter = structlog.stdlib.ProcessorFormatter(
processor=structlog.dev.ConsoleRenderer(colors=colors),
)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
STDOUT_LOGGER.addHandler(handler)
# configure the json file handler
if json:
FILE_LOGGER = structlog.wrap_logger(
logger=logging.Logger('json file logger'),
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper(fmt="iso"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.processors.UnicodeDecoder(),
structlog.processors.JSONRenderer()
]
)
formatter = structlog.stdlib.ProcessorFormatter(
processor=structlog.processors.JSONRenderer(),
)
handler = logging.handlers.WatchedFileHandler(filename=log_dest)
handler.setFormatter(formatter)
FILE_LOGGER.addHandler(handler)
# configure the plaintext file handler
else:
# TODO follow pattern from above ^^
FILE_LOGGER = structlog.wrap_logger(
logger=logging.Logger('plaintext file logger'),
processors=[
structlog.stdlib.filter_by_level,
structlog.stdlib.add_log_level,
structlog.stdlib.PositionalArgumentsFormatter(),
structlog.processors.TimeStamper("%H:%M:%S"),
structlog.processors.StackInfoRenderer(),
structlog.processors.format_exc_info,
structlog.stdlib.ProcessorFormatter.wrap_for_formatter,
]
)
formatter = structlog.stdlib.ProcessorFormatter(
processor=structlog.dev.ConsoleRenderer(colors=False),
)
handler = logging.handlers.WatchedFileHandler(filename=log_dest)
handler.setFormatter(formatter)
FILE_LOGGER.addHandler(handler)
from dbt.events.types import CliEventABC, Event
# top-level method for accessing the new eventing system
@@ -165,54 +10,21 @@ def setup_event_logger(log_path):
# to files, etc.)
def fire_event(e: Event) -> None:
EVENT_HISTORY.append(e)
level_tag = e.level_tag()
if isinstance(e, CliEventABC):
log_line = e.cli_msg()
if isinstance(e, ShowException):
event_dict = {
'exc_info': e.exc_info,
'stack_info': e.stack_info,
'extra': e.extra
}
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
STDOUT_LOGGER.debug(log_line, event_dict)
FILE_LOGGER.debug(log_line, event_dict)
elif level_tag == 'debug':
STDOUT_LOGGER.debug(log_line, event_dict)
FILE_LOGGER.debug(log_line, event_dict)
elif level_tag == 'info':
STDOUT_LOGGER.info(log_line, event_dict)
FILE_LOGGER.info(log_line, event_dict)
elif level_tag == 'warn':
STDOUT_LOGGER.warning(log_line, event_dict)
FILE_LOGGER.warning(log_line, event_dict)
elif level_tag == 'error':
STDOUT_LOGGER.error(log_line, event_dict)
FILE_LOGGER.error(log_line, event_dict)
else:
raise AssertionError(
f"Event type {type(e).__name__} has unhandled level: {e.level_tag()}"
)
# CliEventABC but not ShowException
if e.level_tag() == 'test':
# TODO after implmenting #3977 send to new test level
logger.GLOBAL_LOGGER.debug(logger.timestamped_line(e.cli_msg()))
elif e.level_tag() == 'debug':
logger.GLOBAL_LOGGER.debug(logger.timestamped_line(e.cli_msg()))
elif e.level_tag() == 'info':
logger.GLOBAL_LOGGER.info(logger.timestamped_line(e.cli_msg()))
elif e.level_tag() == 'warn':
logger.GLOBAL_LOGGER.warning()(logger.timestamped_line(e.cli_msg()))
elif e.level_tag() == 'error':
logger.GLOBAL_LOGGER.error(logger.timestamped_line(e.cli_msg()))
elif e.level_tag() == 'exception':
logger.GLOBAL_LOGGER.exception(logger.timestamped_line(e.cli_msg()))
else:
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
STDOUT_LOGGER.debug(log_line)
FILE_LOGGER.debug(log_line)
elif level_tag == 'debug':
STDOUT_LOGGER.debug(log_line)
FILE_LOGGER.debug(log_line)
elif level_tag == 'info':
STDOUT_LOGGER.info(log_line)
FILE_LOGGER.info(log_line)
elif level_tag == 'warn':
STDOUT_LOGGER.warning(log_line)
FILE_LOGGER.warning(log_line)
elif level_tag == 'error':
STDOUT_LOGGER.error(log_line)
FILE_LOGGER.error(log_line)
else:
raise AssertionError(
f"Event type {type(e).__name__} has unhandled level: {e.level_tag()}"
)
raise AssertionError(
f"Event type {type(e).__name__} has unhandled level: {e.level_tag()}"
)

View File

@@ -1,9 +1,5 @@
from abc import ABCMeta, abstractmethod
import argparse
from dataclasses import dataclass
import datetime
from dbt.semver import Matchers, VersionSpecifier
from typing import Any, List
# types to represent log levels
@@ -34,11 +30,9 @@ class ErrorLevel():
return "error"
@dataclass
class ShowException():
exc_info: Any = None
stack_info: Any = None
extra: Any = None
class ExceptionLevel():
def level_tag(self) -> str:
return "exception"
# The following classes represent the data necessary to describe a
@@ -50,11 +44,6 @@ class ShowException():
# top-level superclass for all events
class Event(metaclass=ABCMeta):
ts: datetime.date
def __init__(self):
self.ts = datetime.datetime.now()
# do not define this yourself. inherit it from one of the above level types.
@abstractmethod
def level_tag(self) -> str:
@@ -68,47 +57,6 @@ class CliEventABC(Event, metaclass=ABCMeta):
raise Exception("cli_msg not implemented for cli event")
@dataclass
class AdapterEventBase():
name: str
raw_msg: str
def cli_msg(self) -> str:
return f"{self.name} adapter: {self.raw_msg}"
class AdapterEventDebug(DebugLevel, AdapterEventBase, CliEventABC, ShowException):
pass
class AdapterEventInfo(InfoLevel, AdapterEventBase, CliEventABC, ShowException):
pass
class AdapterEventWarning(WarnLevel, AdapterEventBase, CliEventABC, ShowException):
pass
class AdapterEventError(ErrorLevel, AdapterEventBase, CliEventABC, ShowException):
pass
@dataclass
class ReportVersion(InfoLevel, CliEventABC):
v: VersionSpecifier
def cli_msg(self):
return f"Running with dbt{str(self.v)}"
@dataclass
class ReportArgs(DebugLevel, CliEventABC):
args: argparse.Namespace
def cli_msg(self):
return f"running dbt with arguments {str(self.args)}"
class ParsingStart(InfoLevel, CliEventABC):
def cli_msg(self) -> str:
return "Start parsing."
@@ -162,176 +110,6 @@ class ReportPerformancePath(InfoLevel, CliEventABC):
return f"Performance info: {self.path}"
@dataclass
class GitSparseCheckoutSubdirectory(DebugLevel, CliEventABC):
subdir: str
def cli_msg(self) -> str:
return f" Subdirectory specified: {self.subdir}, using sparse checkout."
@dataclass
class GitProgressCheckoutRevision(DebugLevel, CliEventABC):
revision: str
def cli_msg(self) -> str:
return f" Checking out revision {self.revision}."
@dataclass
class GitProgressUpdatingExistingDependency(DebugLevel, CliEventABC):
dir: str
def cli_msg(self) -> str:
return f"Updating existing dependency {self.dir}."
@dataclass
class GitProgressPullingNewDependency(DebugLevel, CliEventABC):
dir: str
def cli_msg(self) -> str:
return f"Pulling new dependency {self.dir}."
@dataclass
class GitNothingToDo(DebugLevel, CliEventABC):
sha: str
def cli_msg(self) -> str:
return f"Already at {self.sha}, nothing to do."
@dataclass
class GitProgressUpdatedCheckoutRange(DebugLevel, CliEventABC):
start_sha: str
end_sha: str
def cli_msg(self) -> str:
return f" Updated checkout from {self.start_sha} to {self.end_sha}."
@dataclass
class GitProgressCheckedOutAt(DebugLevel, CliEventABC):
end_sha: str
def cli_msg(self) -> str:
return f" Checked out at {self.end_sha}."
@dataclass
class RegistryProgressMakingGETRequest(DebugLevel, CliEventABC):
url: str
def cli_msg(self) -> str:
return f"Making package registry request: GET {self.url}"
@dataclass
class RegistryProgressGETResponse(DebugLevel, CliEventABC):
url: str
resp_code: int
def cli_msg(self) -> str:
return f"Response from registry: GET {self.url} {self.resp_code}"
# TODO this was actually `logger.exception(...)` not `logger.error(...)`
@dataclass
class SystemErrorRetrievingModTime(ErrorLevel, CliEventABC):
path: str
def cli_msg(self) -> str:
return f"Error retrieving modification time for file {self.path}"
@dataclass
class SystemCouldNotWrite(DebugLevel, CliEventABC):
path: str
reason: str
exc: Exception
def cli_msg(self) -> str:
return (
f"Could not write to path {self.path}({len(self.path)} characters): "
f"{self.reason}\nexception: {self.exc}"
)
@dataclass
class SystemExecutingCmd(DebugLevel, CliEventABC):
cmd: List[str]
def cli_msg(self) -> str:
return f'Executing "{" ".join(self.cmd)}"'
@dataclass
class SystemStdOutMsg(DebugLevel, CliEventABC):
bmsg: bytes
def cli_msg(self) -> str:
return f'STDOUT: "{str(self.bmsg)}"'
@dataclass
class SystemStdErrMsg(DebugLevel, CliEventABC):
bmsg: bytes
def cli_msg(self) -> str:
return f'STDERR: "{str(self.bmsg)}"'
@dataclass
class SystemReportReturnCode(DebugLevel, CliEventABC):
code: int
def cli_msg(self) -> str:
return f"command return code={self.code}"
@dataclass
class SelectorAlertUpto3UnusedNodes(InfoLevel, CliEventABC):
node_names: List[str]
def cli_msg(self) -> str:
summary_nodes_str = ("\n - ").join(self.node_names[:3])
and_more_str = (
f"\n - and {len(self.node_names) - 3} more" if len(self.node_names) > 4 else ""
)
return (
f"\nSome tests were excluded because at least one parent is not selected. "
f"Use the --greedy flag to include them."
f"\n - {summary_nodes_str}{and_more_str}"
)
@dataclass
class SelectorAlertAllUnusedNodes(DebugLevel, CliEventABC):
node_names: List[str]
def cli_msg(self) -> str:
debug_nodes_str = ("\n - ").join(self.node_names)
return (
f"Full list of tests that were excluded:"
f"\n - {debug_nodes_str}"
)
@dataclass
class SelectorReportInvalidSelector(InfoLevel, CliEventABC):
selector_methods: dict
spec_method: str
raw_spec: str
def cli_msg(self) -> str:
valid_selectors = ", ".join(self.selector_methods)
return (
f"The '{self.spec_method}' selector specified in {self.raw_spec} is "
f"invalid. Must be one of [{valid_selectors}]"
)
@dataclass
class MacroEventInfo(InfoLevel, CliEventABC):
msg: str
@@ -355,14 +133,6 @@ class MacroEventDebug(DebugLevel, CliEventABC):
#
# TODO remove these lines once we run mypy everywhere.
if 1 == 0:
ReportVersion(VersionSpecifier(
build=None,
major='0',
matcher=Matchers['='],
minor='1',
patch='2',
prerelease=None,
))
ParsingStart()
ParsingCompiling()
ParsingWritingManifest()
@@ -373,21 +143,5 @@ if 1 == 0:
ManifestChecked()
ManifestFlatGraphBuilt()
ReportPerformancePath(path='')
GitSparseCheckoutSubdirectory(subdir='')
GitProgressCheckoutRevision(revision='')
GitProgressUpdatingExistingDependency(dir='')
GitProgressPullingNewDependency(dir='')
GitNothingToDo(sha='')
GitProgressUpdatedCheckoutRange(start_sha='', end_sha='')
GitProgressCheckedOutAt(end_sha='')
SystemErrorRetrievingModTime(path='')
SystemCouldNotWrite(path='', reason='', exc=Exception(''))
SystemExecutingCmd(cmd=[''])
SystemStdOutMsg(bmsg=b'')
SystemStdErrMsg(bmsg=b'')
SystemReportReturnCode(code=0)
SelectorAlertUpto3UnusedNodes(node_names=[])
SelectorAlertAllUnusedNodes(node_names=[])
SelectorReportInvalidSelector(selector_methods={'': ''}, spec_method='', raw_spec='')
MacroEventInfo(msg='')
MacroEventDebug(msg='')

View File

@@ -5,10 +5,7 @@ from .queue import GraphQueue
from .selector_methods import MethodManager
from .selector_spec import SelectionCriteria, SelectionSpec
from dbt.events.functions import fire_event
from dbt.events.types import (
SelectorAlertUpto3UnusedNodes, SelectorAlertAllUnusedNodes, SelectorReportInvalidSelector
)
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.node_types import NodeType
from dbt.exceptions import (
InternalException,
@@ -33,9 +30,21 @@ def alert_non_existence(raw_spec, nodes):
def alert_unused_nodes(raw_spec, node_names):
fire_event(SelectorAlertUpto3UnusedNodes(node_names=node_names))
summary_nodes_str = ("\n - ").join(node_names[:3])
debug_nodes_str = ("\n - ").join(node_names)
and_more_str = f"\n - and {len(node_names) - 3} more" if len(node_names) > 4 else ""
summary_msg = (
f"\nSome tests were excluded because at least one parent is not selected. "
f"Use the --greedy flag to include them."
f"\n - {summary_nodes_str}{and_more_str}"
)
logger.info(summary_msg)
if len(node_names) > 4:
fire_event(SelectorAlertAllUnusedNodes(node_names=node_names))
debug_msg = (
f"Full list of tests that were excluded:"
f"\n - {debug_nodes_str}"
)
logger.debug(debug_msg)
def can_select_indirectly(node):
@@ -94,11 +103,11 @@ class NodeSelector(MethodManager):
try:
collected = self.select_included(nodes, spec)
except InvalidSelectorException:
fire_event(SelectorReportInvalidSelector(
selector_methods=self.SELECTOR_METHODS,
spec_method=spec.method,
raw_spec=spec.raw
))
valid_selectors = ", ".join(self.SELECTOR_METHODS)
logger.info(
f"The '{spec.method}' selector specified in {spec.raw} is "
f"invalid. Must be one of [{valid_selectors}]"
)
return set(), set()
neighbors = self.collect_specified_neighbors(spec, collected)

View File

@@ -45,10 +45,6 @@ DEBUG_LOG_FORMAT = (
SECRET_ENV_PREFIX = 'DBT_ENV_SECRET_'
# TODO this is a terrible place for a default like this
# fix via issue #4179
LOG_DIR: str = "logs"
def get_secret_env() -> List[str]:
return [

View File

@@ -9,8 +9,6 @@ from contextlib import contextmanager
from pathlib import Path
import dbt.version
from dbt.events.functions import fire_event, setup_event_logger
from dbt.events.types import ReportArgs, ReportVersion
import dbt.flags as flags
import dbt.task.build as build_task
import dbt.task.clean as clean_task
@@ -230,18 +228,18 @@ def run_from_args(parsed):
# set log_format in the logger
parsed.cls.pre_init_hook(parsed)
logger.info("Running with dbt{}".format(dbt.version.installed))
# this will convert DbtConfigErrors into RuntimeExceptions
# task could be any one of the task objects
task = parsed.cls.from_args(args=parsed)
logger.debug("running dbt with arguments {parsed}", parsed=str(parsed))
log_path = None
if task.config is not None:
log_path = getattr(task.config, 'log_path', None)
# we can finally set the file logger up
# TODO move as a part of #4179
setup_event_logger(log_path or 'logs')
fire_event(ReportVersion(v=dbt.version.installed))
fire_event(ReportArgs(args=parsed))
log_manager.set_path(log_path)
if dbt.tracking.active_user is not None: # mypy appeasement, always true
logger.debug("Tracking: {}".format(dbt.tracking.active_user.state()))

View File

@@ -65,7 +65,6 @@ setup(
'dbt-extractor==0.4.0',
'typing-extensions>=3.7.4,<3.11',
'werkzeug>=1,<3',
'structlog>=21.2.0,<21.3.0'
# the following are all to match snowflake-connector-python
'requests<3.0.0',
'idna>=2.5,<4',

View File

@@ -6,16 +6,13 @@ import dbt.exceptions
from dbt.adapters.base import Credentials
from dbt.adapters.sql import SQLConnectionManager
from dbt.contracts.connection import AdapterResponse
from dbt.events import AdapterLogger
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.helper_types import Port
from dataclasses import dataclass
from typing import Optional
logger = AdapterLogger("Postgres")
@dataclass
class PostgresCredentials(Credentials):
host: str

View File

@@ -1,19 +0,0 @@
from unittest import mock, TestCase
class TestFlags(TestCase):
def setUp(self):
pass
# this interface is documented for adapter maintainers to plug into
# so we should test that it at the very least doesn't explode.
def test_adapter_logging_interface(self):
from dbt.events import AdapterLogger
logger = AdapterLogger("dbt_tests")
logger.debug("debug message")
logger.info("info message")
logger.warning("warning message")
logger.error("error message")
logger.exception("exception message")
self.assertTrue(True)