Skip to content

Подключение к Greenplum

Bases: JDBCMixin, DBConnection

Greenplum connection. |support_hooks|

Based on package io.pivotal:greenplum-spark:2.2.0 (VMware Greenplum connector for Spark <https://docs.vmware.com/en/VMware-Greenplum-Connector-for-Apache-Spark/index.html>_).

.. seealso::

Before using this connector please take into account :ref:`greenplum-prerequisites`

.. versionadded:: 0.5.0

Parameters

host : str Host of Greenplum master. For example: test.greenplum.domain.com or 193.168.1.17

int, default: 5432

Port of Greenplum master

str

User, which have proper access to the database. For example: some_user

str

Password for database connection

str

Database in RDBMS, NOT schema.

See this page <https://www.educba.com/postgresql-database-vs-schema/>_ for more details

:obj:pyspark.sql.SparkSession

Spark session.

dict, default: None

Specifies one or more extra parameters by which clients can connect to the instance.

For example: {"tcpKeepAlive": "true", "server.port": "50000-65535"}

Supported options are: * All Postgres JDBC driver properties <https://jdbc.postgresql.org/documentation/use/> * Properties from Greenplum connector for Spark documentation <https://docs.vmware.com/en/VMware-Greenplum-Connector-for-Apache-Spark/2.3/greenplum-connector-spark/options.html> page, but only starting with server. or pool.

Examples

Create and check Greenplum connection:

.. code:: python

from onetl.connection import Greenplum
from pyspark.sql import SparkSession

# Create Spark session with Greenplum connector loaded
maven_packages = Greenplum.get_packages(spark_version="3.2")
spark = (
    SparkSession.builder.appName("spark-app-name")
    .config("spark.jars.packages", ",".join(maven_packages))
    .config("spark.executor.allowSparkContext", "true")
    # IMPORTANT!!!
    # Set number of executors according to "Prerequisites" -> "Number of executors"
    .config("spark.dynamicAllocation.maxExecutors", 10)
    .config("spark.executor.cores", 1)
    .getOrCreate()
)

# IMPORTANT!!!
# Set port range of executors according to "Prerequisites" -> "Network ports"
extra = {
    "server.port": "41000-42000",
}

# Create connection
greenplum = Greenplum(
    host="master.host.or.ip",
    user="user",
    password="*****",
    database="target_database",
    extra=extra,
    spark=spark,
).check()
Source code in onetl/connection/db_connection/greenplum/connection.py
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
@support_hooks
class Greenplum(JDBCMixin, DBConnection):  # noqa: WPS338
    """Greenplum connection. |support_hooks|

    Based on package ``io.pivotal:greenplum-spark:2.2.0``
    (`VMware Greenplum connector for Spark <https://docs.vmware.com/en/VMware-Greenplum-Connector-for-Apache-Spark/index.html>`_).

    .. seealso::

        Before using this connector please take into account :ref:`greenplum-prerequisites`

    .. versionadded:: 0.5.0

    Parameters
    ----------
    host : str
        Host of Greenplum master. For example: ``test.greenplum.domain.com`` or ``193.168.1.17``

    port : int, default: ``5432``
        Port of Greenplum master

    user : str
        User, which have proper access to the database. For example: ``some_user``

    password : str
        Password for database connection

    database : str
        Database in RDBMS, NOT schema.

        See `this page <https://www.educba.com/postgresql-database-vs-schema/>`_ for more details

    spark : :obj:`pyspark.sql.SparkSession`
        Spark session.

    extra : dict, default: ``None``
        Specifies one or more extra parameters by which clients can connect to the instance.

        For example: ``{"tcpKeepAlive": "true", "server.port": "50000-65535"}``

        Supported options are:
            * All `Postgres JDBC driver properties <https://jdbc.postgresql.org/documentation/use/>`_
            * Properties from `Greenplum connector for Spark documentation <https://docs.vmware.com/en/VMware-Greenplum-Connector-for-Apache-Spark/2.3/greenplum-connector-spark/options.html>`_ page, but only starting with ``server.`` or ``pool.``

    Examples
    --------

    Create and check Greenplum connection:

    .. code:: python

        from onetl.connection import Greenplum
        from pyspark.sql import SparkSession

        # Create Spark session with Greenplum connector loaded
        maven_packages = Greenplum.get_packages(spark_version="3.2")
        spark = (
            SparkSession.builder.appName("spark-app-name")
            .config("spark.jars.packages", ",".join(maven_packages))
            .config("spark.executor.allowSparkContext", "true")
            # IMPORTANT!!!
            # Set number of executors according to "Prerequisites" -> "Number of executors"
            .config("spark.dynamicAllocation.maxExecutors", 10)
            .config("spark.executor.cores", 1)
            .getOrCreate()
        )

        # IMPORTANT!!!
        # Set port range of executors according to "Prerequisites" -> "Network ports"
        extra = {
            "server.port": "41000-42000",
        }

        # Create connection
        greenplum = Greenplum(
            host="master.host.or.ip",
            user="user",
            password="*****",
            database="target_database",
            extra=extra,
            spark=spark,
        ).check()
    """

    host: Host
    user: str
    password: SecretStr
    database: str
    port: int = 5432
    extra: GreenplumExtra = GreenplumExtra()

    ReadOptions = GreenplumReadOptions
    WriteOptions = GreenplumWriteOptions
    SQLOptions = GreenplumSQLOptions
    FetchOptions = GreenplumFetchOptions
    ExecuteOptions = GreenplumExecuteOptions
    JDBCOptions = JDBCMixinOptions

    Extra = GreenplumExtra
    Dialect = GreenplumDialect

    DRIVER: ClassVar[str] = "org.postgresql.Driver"
    CONNECTIONS_WARNING_LIMIT: ClassVar[int] = 31
    CONNECTIONS_EXCEPTION_LIMIT: ClassVar[int] = 100

    _CHECK_QUERY: ClassVar[str] = "SELECT 1"
    # any small table with always present in db, and which any user can access
    # https://www.greenplumdba.com/pg-catalog-tables-and-views
    _CHECK_DUMMY_TABLE: ClassVar[str] = "pg_catalog.gp_id"
    _CHECK_DUMMY_PARTITION_COLUMN: ClassVar[str] = "dbid"

    @slot
    @classmethod
    def get_packages(
        cls,
        *,
        scala_version: str | None = None,
        spark_version: str | None = None,
        package_version: str | None = None,
    ) -> list[str]:
        """
        Get package names to be downloaded by Spark. |support_hooks|

        .. warning::

            You should pass either ``scala_version`` or ``spark_version``.

        .. versionadded:: 0.9.0

        Parameters
        ----------
        scala_version : str, optional
            Scala version in format ``major.minor``.

            If ``None``, ``spark_version`` is used to determine Scala version.

        spark_version : str, optional
            Spark version in format ``major.minor``.

            Used only if ``scala_version=None``.

        package_version : str, optional, default ``2.2.0``
            Package version in format ``major.minor.patch``

            .. versionadded:: 0.10.1

        Examples
        --------

        .. code:: python

            from onetl.connection import Greenplum

            Greenplum.get_packages(scala_version="2.12")
            Greenplum.get_packages(spark_version="3.2", package_version="2.3.0")

        """

        # Connector version is fixed, so we can perform checks for Scala/Spark version
        if package_version:
            package_ver = Version(package_version)
        else:
            package_ver = Version("2.2.0")

        if scala_version:
            scala_ver = Version(scala_version).min_digits(2)
        elif spark_version:
            spark_ver = Version(spark_version).min_digits(2)
            if spark_ver >= Version("3.3"):
                raise ValueError(f"Spark version must be 3.2.x or less, got {spark_ver}")
            scala_ver = get_default_scala_version(spark_ver)
        else:
            raise ValueError("You should pass either `scala_version` or `spark_version`")

        return [f"io.pivotal:greenplum-spark_{scala_ver.format('{0}.{1}')}:{package_ver}"]

    @classproperty
    def package_spark_3_2(cls) -> str:
        """Get package name to be downloaded by Spark 3.2."""
        msg = "`Greenplum.package_3_2` will be removed in 1.0.0, use `Greenplum.get_packages(spark_version='3.2')` instead"
        warnings.warn(msg, UserWarning, stacklevel=3)
        return "io.pivotal:greenplum-spark_2.12:2.2.0"

    @property
    def instance_url(self) -> str:
        return f"{self.__class__.__name__.lower()}://{self.host}:{self.port}/{self.database}"

    def __str__(self):
        return f"{self.__class__.__name__}[{self.host}:{self.port}/{self.database}]"

    @property
    def jdbc_url(self) -> str:
        return f"jdbc:postgresql://{self.host}:{self.port}/{self.database}"

    @property
    def jdbc_custom_params(self) -> dict:
        result = {
            key: value
            for key, value in self.extra.dict(by_alias=True).items()
            if not (key.startswith("server.") or key.startswith("pool."))
        }
        # https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME
        result["ApplicationName"] = result.get("ApplicationName", get_client_info(self.spark, limit=64))
        return result

    @property
    def jdbc_params(self) -> dict:
        result = super().jdbc_params
        result.update(self.jdbc_custom_params)
        return result

    @slot
    def check(self):
        log.info("|%s| Checking connection availability...", self.__class__.__name__)
        self._log_parameters()  # type: ignore

        log.debug("|%s| Executing SQL query:", self.__class__.__name__)
        log_lines(log, self._CHECK_QUERY, level=logging.DEBUG)

        try:
            with override_job_description(self.spark, f"{self}.check()"):
                self._query_optional_on_driver(self._CHECK_QUERY, self.FetchOptions(fetchsize=1))

                read_options = self._get_connector_params(self._CHECK_DUMMY_TABLE)
                # do not require gp_segment_id column in table
                read_options["partitions"] = 1
                read_options["partitionColumn"] = self._CHECK_DUMMY_PARTITION_COLUMN
                df = self.spark.read.format("greenplum").options(**read_options).load()
                df.take(1)

            log.info("|%s| Connection is available.", self.__class__.__name__)
        except Exception as e:
            log.exception("|%s| Connection is unavailable", self.__class__.__name__)
            raise RuntimeError("Connection is unavailable") from e

        return self

    @slot
    def read_source_as_df(
        self,
        source: str,
        columns: list[str] | None = None,
        hint: str | None = None,
        where: str | None = None,
        df_schema: StructType | None = None,
        window: Window | None = None,
        limit: int | None = None,
        options: GreenplumReadOptions | None = None,
    ) -> DataFrame:
        read_options = self.ReadOptions.parse(options).dict(by_alias=True, exclude_none=True)
        log.info("|%s| Executing SQL query (on executor):", self.__class__.__name__)
        where = self.dialect.apply_window(where, window)
        fake_query_for_log = self.dialect.get_sql_query(table=source, columns=columns, where=where, limit=limit)
        log_lines(log, fake_query_for_log)

        df = self.spark.read.format("greenplum").options(**self._get_connector_params(source), **read_options).load()
        self._check_expected_jobs_number(df, action="read")

        if where:
            for item in where:
                df = df.filter(item)

        if columns:
            df = df.selectExpr(*columns)

        if limit is not None:
            df = df.limit(limit)

        log.info("|Spark| DataFrame successfully created from SQL statement ")
        return df

    @slot
    def write_df_to_target(
        self,
        df: DataFrame,
        target: str,
        options: GreenplumWriteOptions | None = None,
    ) -> None:
        write_options = self.WriteOptions.parse(options)
        options_dict = write_options.dict(by_alias=True, exclude_none=True, exclude={"if_exists"})

        self._check_expected_jobs_number(df, action="write")

        log.info("|%s| Saving data to a table %r", self.__class__.__name__, target)
        mode = (
            "overwrite"
            if write_options.if_exists == GreenplumTableExistBehavior.REPLACE_ENTIRE_TABLE
            else write_options.if_exists.value
        )
        df.write.format("greenplum").options(
            **self._get_connector_params(target),
            **options_dict,
        ).mode(mode).save()

        log.info("|%s| Table %r is successfully written", self.__class__.__name__, target)

    @slot
    def get_df_schema(
        self,
        source: str,
        columns: list[str] | None = None,
        options: JDBCReadOptions | None = None,
    ) -> StructType:
        log.info("|%s| Detected dialect: '%s'", self.__class__.__name__, self._get_spark_dialect_class_name())
        log.info("|%s| Fetching schema of table %r ...", self.__class__.__name__, source)

        query = self.dialect.get_sql_query(source, columns=columns, limit=0, compact=True)
        jdbc_options = self.ReadOptions.parse(options).copy(update={"fetchsize": 0})

        log.debug("|%s| Executing SQL query (on driver):", self.__class__.__name__)
        log_lines(log, query, level=logging.DEBUG)

        df = self._query_on_driver(query, self.FetchOptions.parse(jdbc_options.dict()))
        log.info("|%s| Schema fetched.", self.__class__.__name__)

        return df.schema

    @slot
    def get_min_max_values(
        self,
        source: str,
        window: Window,
        hint: Any | None = None,
        where: Any | None = None,
        options: JDBCReadOptions | None = None,
    ) -> tuple[Any, Any]:
        log.info("|%s| Getting min and max values for %r ...", self.__class__.__name__, window.expression)
        jdbc_options = self.ReadOptions.parse(options).copy(update={"fetchsize": 1})

        query = self.dialect.get_sql_query(
            table=source,
            columns=[
                self.dialect.aliased(
                    self.dialect.get_min_value(window.expression),
                    self.dialect.escape_column("min"),
                ),
                self.dialect.aliased(
                    self.dialect.get_max_value(window.expression),
                    self.dialect.escape_column("max"),
                ),
            ],
            where=self.dialect.apply_window(where, window),
        )

        log.info("|%s| Executing SQL query (on driver):", self.__class__.__name__)
        log_lines(log, query)

        df = self._query_on_driver(query, self.FetchOptions.parse(jdbc_options.dict()))
        row = df.collect()[0]
        min_value = row["min"]
        max_value = row["max"]

        log.info("|%s| Received values:", self.__class__.__name__)
        log_with_indent(log, "MIN(%s) = %r", window.expression, min_value)
        log_with_indent(log, "MAX(%s) = %r", window.expression, max_value)

        return min_value, max_value

    @validator("spark")
    def _check_java_class_imported(cls, spark):
        java_class = "io.pivotal.greenplum.spark.GreenplumRelationProvider"

        try:
            try_import_java_class(spark, java_class)
        except Exception as e:
            spark_version = get_spark_version(spark).format("{0}.{1}")
            msg = MISSING_JVM_CLASS_MSG.format(
                java_class=java_class,
                package_source=cls.__name__,
                args=f"spark_version='{spark_version}'",
            )
            raise ValueError(msg) from e
        return spark

    def _get_connector_params(
        self,
        table: str,
    ) -> dict:
        schema, table_name = table.split(".")  # noqa: WPS414
        extra = self.extra.dict(by_alias=True, exclude_none=True)
        greenplum_connector_options = {
            key: value for key, value in extra.items() if key.startswith("server.") or key.startswith("pool.")
        }

        # Greenplum connector requires all JDBC params to be passed via JDBC URL:
        # https://docs.vmware.com/en/VMware-Greenplum-Connector-for-Apache-Spark/2.3/greenplum-connector-spark/using_the_connector.html#specifying-session-parameters
        parsed_jdbc_url = urlparse(self.jdbc_url)
        sorted_jdbc_params = [(k, v) for k, v in sorted(self.jdbc_custom_params.items(), key=lambda x: x[0].lower())]
        jdbc_url_query = urlencode(sorted_jdbc_params, quote_via=quote)
        jdbc_url = urlunparse(parsed_jdbc_url._replace(query=jdbc_url_query))

        return {
            "driver": self.DRIVER,
            "url": jdbc_url,
            "user": self.user,
            "password": self.password.get_secret_value(),
            "dbschema": schema,
            "dbtable": table_name,
            **greenplum_connector_options,
        }

    def _get_jdbc_connection(self, options: JDBCFetchOptions | JDBCExecuteOptions, read_only: bool):
        if read_only:
            # To properly support pgbouncer, we have to create connection with readOnly option set.
            # See https://github.com/pgjdbc/pgjdbc/issues/848
            options = options.copy(update={"readOnly": True})

        connection_properties = self._options_to_connection_properties(options)
        driver_manager = self.spark._jvm.java.sql.DriverManager  # type: ignore
        # avoid calling .setReadOnly(True) here
        return driver_manager.getConnection(self.jdbc_url, connection_properties)

    def _get_server_setting(self, name: str) -> Any:
        query = f"""
                SELECT setting
                FROM   pg_settings
                WHERE  name = '{name}'
                """
        log.debug("|%s| Executing SQL query (on driver):")
        log_lines(log, query, level=logging.DEBUG)

        df = self._query_on_driver(query, self.FetchOptions())
        result = df.collect()

        log.debug(
            "|%s| Query succeeded, resulting in-memory dataframe contains %d rows",
            len(result),
        )
        if result:
            return result[0][0]

        return None

    def _get_occupied_connections_count(self) -> int:
        # https://stackoverflow.com/a/5270806
        query = """
                SELECT SUM(numbackends)
                FROM pg_stat_database
                """
        log.debug("|%s| Executing SQL query (on driver):")
        log_lines(log, query, level=logging.DEBUG)

        df = self._query_on_driver(query, self.FetchOptions())
        result = df.collect()

        log.debug(
            "|%s| Query succeeded, resulting in-memory dataframe contains %d rows",
            len(result),
        )
        return int(result[0][0])

    def _get_connections_limits(self) -> GreenplumConnectionLimit:
        max_connections = int(self._get_server_setting("max_connections"))
        reserved_connections = int(self._get_server_setting("superuser_reserved_connections"))
        occupied_connections = self._get_occupied_connections_count()
        return GreenplumConnectionLimit(
            maximum=max_connections,
            reserved=reserved_connections,
            occupied=occupied_connections,
        )

    def _check_expected_jobs_number(self, df: DataFrame, action: str) -> None:
        # Parallel reading or writing to Greenplum can open a lot of connections.
        # Connection number is limited on server side, so we should prevent creating too much of them because reaching
        # the limit may prevent all other users from connecting the cluster

        # We cannot use `ReadOptions.partitions` because its default value
        # is calculated dynamically, based on number of segments in the Greenplum instance.
        # Also number of partitions in a writing process is determined by dataframe, not connector options
        partitions = df.rdd.getNumPartitions()
        if partitions < self.CONNECTIONS_WARNING_LIMIT:
            return

        expected_cores, config = get_executor_total_cores(self.spark)
        if expected_cores < self.CONNECTIONS_WARNING_LIMIT:
            return

        # each partition goes to its own core.
        # if there are no enough cores, the excess of partitions will wait for a free core
        max_jobs = min(expected_cores, partitions)

        limits = self._get_connections_limits()
        connections_message = (
            textwrap.dedent(
                f"""
                Each parallel job of {max_jobs} opens a separate connection.
                This can lead to reaching out the connection limit and disrupting other users:
                """,
            ).strip()
            + os.linesep
            + textwrap.indent(limits.summary, " " * 4)
        )

        session_options_recommendation = "reduce number of resources used by your Spark session:"
        session_options_recommendation += os.linesep
        session_options_recommendation += os.linesep.join(" " * 4 + f"{key}: {value}" for key, value in config.items())
        session_options_recommendation += os.linesep

        dataframe_options_recommendation = f"reduce number of partitions ({partitions}) in the dataframe using:"
        dataframe_options_recommendation += os.linesep + " " * 4
        if action == "read":
            dataframe_options_recommendation += (
                f'{self.__class__.__name__}.ReadOptions(partitions=N, partitionColumn="...")'
            )
        else:
            dataframe_options_recommendation += "df.repartition(N)"
        dataframe_options_recommendation += os.linesep

        if expected_cores > partitions or action == "read":
            # default partitioning method is more effective than custom partitioning,
            # so it's better to change session options
            recommendations = "Please " + session_options_recommendation + "or " + dataframe_options_recommendation
        else:
            # 50 executors, 200 partitions means that partitions will be written sequentially.
            # Greenplum is not designed for that, it's better to repartition dataframe.
            # Yes, it will take more RAM on each executor
            recommendations = "Please " + dataframe_options_recommendation + "or " + session_options_recommendation

        message = (connections_message + os.linesep * 2 + recommendations).strip()

        if max_jobs >= self.CONNECTIONS_EXCEPTION_LIMIT:
            raise TooManyParallelJobsError(message)

        log_lines(log, message, level=logging.WARNING)

    def _log_parameters(self):
        super()._log_parameters()
        log_with_indent(log, "jdbc_url = %r", self.jdbc_url)

get_packages(*, scala_version=None, spark_version=None, package_version=None) classmethod

Get package names to be downloaded by Spark. |support_hooks|

.. warning::

You should pass either ``scala_version`` or ``spark_version``.

.. versionadded:: 0.9.0

Parameters

scala_version : str, optional Scala version in format major.minor.

If ``None``, ``spark_version`` is used to determine Scala version.
str, optional

Spark version in format major.minor.

Used only if scala_version=None.

str, optional, default 2.2.0

Package version in format major.minor.patch

.. versionadded:: 0.10.1

Examples

.. code:: python

from onetl.connection import Greenplum

Greenplum.get_packages(scala_version="2.12")
Greenplum.get_packages(spark_version="3.2", package_version="2.3.0")
Source code in onetl/connection/db_connection/greenplum/connection.py
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
@slot
@classmethod
def get_packages(
    cls,
    *,
    scala_version: str | None = None,
    spark_version: str | None = None,
    package_version: str | None = None,
) -> list[str]:
    """
    Get package names to be downloaded by Spark. |support_hooks|

    .. warning::

        You should pass either ``scala_version`` or ``spark_version``.

    .. versionadded:: 0.9.0

    Parameters
    ----------
    scala_version : str, optional
        Scala version in format ``major.minor``.

        If ``None``, ``spark_version`` is used to determine Scala version.

    spark_version : str, optional
        Spark version in format ``major.minor``.

        Used only if ``scala_version=None``.

    package_version : str, optional, default ``2.2.0``
        Package version in format ``major.minor.patch``

        .. versionadded:: 0.10.1

    Examples
    --------

    .. code:: python

        from onetl.connection import Greenplum

        Greenplum.get_packages(scala_version="2.12")
        Greenplum.get_packages(spark_version="3.2", package_version="2.3.0")

    """

    # Connector version is fixed, so we can perform checks for Scala/Spark version
    if package_version:
        package_ver = Version(package_version)
    else:
        package_ver = Version("2.2.0")

    if scala_version:
        scala_ver = Version(scala_version).min_digits(2)
    elif spark_version:
        spark_ver = Version(spark_version).min_digits(2)
        if spark_ver >= Version("3.3"):
            raise ValueError(f"Spark version must be 3.2.x or less, got {spark_ver}")
        scala_ver = get_default_scala_version(spark_ver)
    else:
        raise ValueError("You should pass either `scala_version` or `spark_version`")

    return [f"io.pivotal:greenplum-spark_{scala_ver.format('{0}.{1}')}:{package_ver}"]

check()

Source code in onetl/connection/db_connection/greenplum/connection.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
@slot
def check(self):
    log.info("|%s| Checking connection availability...", self.__class__.__name__)
    self._log_parameters()  # type: ignore

    log.debug("|%s| Executing SQL query:", self.__class__.__name__)
    log_lines(log, self._CHECK_QUERY, level=logging.DEBUG)

    try:
        with override_job_description(self.spark, f"{self}.check()"):
            self._query_optional_on_driver(self._CHECK_QUERY, self.FetchOptions(fetchsize=1))

            read_options = self._get_connector_params(self._CHECK_DUMMY_TABLE)
            # do not require gp_segment_id column in table
            read_options["partitions"] = 1
            read_options["partitionColumn"] = self._CHECK_DUMMY_PARTITION_COLUMN
            df = self.spark.read.format("greenplum").options(**read_options).load()
            df.take(1)

        log.info("|%s| Connection is available.", self.__class__.__name__)
    except Exception as e:
        log.exception("|%s| Connection is unavailable", self.__class__.__name__)
        raise RuntimeError("Connection is unavailable") from e

    return self