Skip to content

Docstrings guide

Implementing Docstrings in Python Code

When you write or modify Python code in the codebase, it's important to add or update the docstrings accordingly. If you wish to display these docstrings in the documentation, follow these steps.

Suppose the docstrings are located in the following path: docs/Submodules/IntelOwl/api_app/analyzers_manager/classes, and you want to show the description of a class, such as BaseAnalyzerMixin.

To include this in the documentation, use the following command:

:::docs.Submodules.IntelOwl.api_app.analyzers_manager.classes.BaseAnalyzerMixin

Warning

Make sure your path is correct and syntax is correct. If you face any issues even path is correct then read the Submodules Guide.

This is how it would look in documentation:

Bases: Plugin

Abstract Base class for Analyzers. Never inherit from this branch, always use either one of ObservableAnalyzer or FileAnalyzer classes.

Source code in docs/Submodules/IntelOwl/api_app/analyzers_manager/classes.py
class BaseAnalyzerMixin(Plugin, metaclass=ABCMeta):
    """
    Abstract Base class for Analyzers.
    Never inherit from this branch,
    always use either one of ObservableAnalyzer or FileAnalyzer classes.
    """

    HashChoices = HashChoices
    ObservableTypes = ObservableTypes
    TypeChoices = TypeChoices

    MALICIOUS_EVALUATION = 75
    SUSPICIOUS_EVALUATION = 35
    FALSE_POSITIVE = -50

    def threat_to_evaluation(self, threat_level):
        # MAGIC NUMBERS HERE!!!
        # I know, it should be 25-50-75-100. We raised it a bit because too many false positives were generated
        self.report: AnalyzerReport
        if threat_level >= self.MALICIOUS_EVALUATION:
            evaluation = self.report.data_model_class.EVALUATIONS.MALICIOUS.value
        elif threat_level >= self.SUSPICIOUS_EVALUATION:
            evaluation = self.report.data_model_class.EVALUATIONS.SUSPICIOUS.value
        elif threat_level <= self.FALSE_POSITIVE:
            evaluation = self.report.data_model_class.EVALUATIONS.TRUSTED.value
        else:
            evaluation = self.report.data_model_class.EVALUATIONS.CLEAN.value
        return evaluation

    def _do_create_data_model(self) -> bool:
        if self.report.job.observable_classification == ObservableTypes.GENERIC:
            return False
        if (
            not self._config.mapping_data_model
            and self.__class__._create_data_model_mtm
            == BaseAnalyzerMixin._create_data_model_mtm
            and self.__class__._update_data_model
            == BaseAnalyzerMixin._update_data_model
        ):
            return False
        return True

    def _create_data_model_mtm(self):
        return {}

    def _update_data_model(self, data_model) -> None:
        mtm = self._create_data_model_mtm()
        for field_name, value in mtm.items():
            field = getattr(data_model, field_name)
            field.add(*value)

    def create_data_model(self):
        self.report: AnalyzerReport
        if self._do_create_data_model():
            data_model = self.report.create_data_model()
            if data_model:
                self._update_data_model(data_model)
                data_model.save()
            return data_model
        return None

    @classmethod
    @property
    def config_exception(cls):
        """Returns the AnalyzerConfigurationException class."""
        return AnalyzerConfigurationException

    @property
    def analyzer_name(self) -> str:
        """Returns the name of the analyzer."""
        return self._config.name

    @classmethod
    @property
    def report_model(cls):
        """Returns the AnalyzerReport model."""
        return AnalyzerReport

    @classmethod
    @property
    def config_model(cls):
        """Returns the AnalyzerConfig model."""
        return AnalyzerConfig

    def get_exceptions_to_catch(self):
        """
        Returns additional exceptions to catch when running *start* fn
        """
        return (
            AnalyzerConfigurationException,
            AnalyzerRunException,
        )

    def _validate_result(self, result, level=0, max_recursion=190):
        """
        function to validate result, allowing to store inside postgres without errors.

        If the character \u0000 is present in the string, postgres will throw an error

        If an integer is bigger than max_int,
        Mongodb is not capable to store and will throw an error.

        If we have more than 200 recursion levels, every encoding
        will throw a maximum_nested_object exception
        """
        if level == max_recursion:
            logger.info(
                f"We have reached max_recursion {max_recursion} level. "
                f"The following object will be pruned {result} "
            )
            return None
        if isinstance(result, dict):
            for key, values in result.items():
                result[key] = self._validate_result(
                    values, level=level + 1, max_recursion=max_recursion
                )
        elif isinstance(result, list):
            for i, _ in enumerate(result):
                result[i] = self._validate_result(
                    result[i], level=level + 1, max_recursion=max_recursion
                )
        elif isinstance(result, str):
            return result.replace("\u0000", "")
        elif isinstance(result, int) and result > 9223372036854775807:  # max int 8bytes
            result = 9223372036854775807
        return result

    def after_run_success(self, content):
        """
        Handles actions after a successful run.

        Args:
            content (any): The content to process after a successful run.
        """
        super().after_run_success(self._validate_result(content, max_recursion=15))
        try:
            self.create_data_model()
        except Exception as e:
            logger.exception(e)
            self._job.errors.append(
                f"Data model creation failed for {self._config.name}"
            )

analyzer_name property

Returns the name of the analyzer.

config_exception classmethod property

Returns the AnalyzerConfigurationException class.

config_model classmethod property

Returns the AnalyzerConfig model.

report_model classmethod property

Returns the AnalyzerReport model.

after_run_success(content)

Handles actions after a successful run.

Parameters:

Name Type Description Default
content any

The content to process after a successful run.

required
Source code in docs/Submodules/IntelOwl/api_app/analyzers_manager/classes.py
def after_run_success(self, content):
    """
    Handles actions after a successful run.

    Args:
        content (any): The content to process after a successful run.
    """
    super().after_run_success(self._validate_result(content, max_recursion=15))
    try:
        self.create_data_model()
    except Exception as e:
        logger.exception(e)
        self._job.errors.append(
            f"Data model creation failed for {self._config.name}"
        )

get_exceptions_to_catch()

Returns additional exceptions to catch when running start fn

Source code in docs/Submodules/IntelOwl/api_app/analyzers_manager/classes.py
def get_exceptions_to_catch(self):
    """
    Returns additional exceptions to catch when running *start* fn
    """
    return (
        AnalyzerConfigurationException,
        AnalyzerRunException,
    )