#!/usr/bin/env python # coding: utf-8 # # PyCaret Fugue Integration # # [Fugue](https://github.com/fugue-project/fugue) is a low-code unified interface for different computing frameworks such as Spark, Dask and Pandas. PyCaret is using Fugue to support distributed computing scenarios. # # # Hello World # # # Classification # # Let's start with the most standard example, the code is exactly the same as the local version, there is no magic. # In[1]: from pycaret.datasets import get_data from pycaret.classification import * setup(data=get_data("juice", verbose=False), target = 'Purchase', n_jobs=1) test_models = models().index.tolist()[:5] # `compare_model` is also exactly the same if you don't want to use a distributed system # In[2]: compare_models(include=test_models, n_select=2) # Now let's make it distributed, as a toy case, on dask. The only thing changed is an additional parameter `parallel_backend` # In[4]: from pycaret.parallel import FugueBackend compare_models(include=test_models, n_select=2, parallel=FugueBackend("dask")) # In order to use Spark as the execution engine, you must have access to a Spark cluster, and you must have a `SparkSession`, let's initialize a local Spark session # In[5]: from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() # Now just change `parallel_backend` to this session object, you make it run on Spark. You must understand this is a toy case. In the real situation, you need to have a SparkSession pointing to a real Spark cluster to enjoy the power of Spark # In[6]: compare_models(include=test_models, n_select=2, parallel=FugueBackend(spark)) # In the end, you can `pull` to get the metrics table # In[7]: pull() # # Regression # # It follows the same pattern as classification. # In[7]: from pycaret.datasets import get_data from pycaret.regression import * setup(data=get_data("insurance", verbose=False), target = 'charges', n_jobs=1) test_models = models().index.tolist()[:5] # `compare_model` is also exactly the same if you don't want to use a distributed system # In[8]: compare_models(include=test_models, n_select=2, sort="MAE") # Now let's make it distributed, as a toy case, on dask. The only thing changed is an additional parameter `parallel_backend` # In[9]: from pycaret.parallel import FugueBackend compare_models(include=test_models, n_select=2, sort="MAE", parallel=FugueBackend("dask")) # In order to use Spark as the execution engine, you must have access to a Spark cluster, and you must have a `SparkSession`, let's initialize a local Spark session # In[10]: from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() # Now just change `parallel_backend` to this session object, you make it run on Spark. You must understand this is a toy case. In the real situation, you need to have a SparkSession pointing to a real Spark cluster to enjoy the power of Spark # In[12]: compare_models(include=test_models, n_select=2, sort="MAE", parallel=FugueBackend(spark)) # In the end, you can `pull` to get the metrics table # In[13]: pull() # As you see, the results from the distributed versions can be different from your local versions. In the later sections, we will show how to make them identical. # # # Time Series # # It follows the same pattern as classification. # # In[14]: from pycaret.datasets import get_data from pycaret.time_series import * exp = TSForecastingExperiment() exp.setup(data=get_data('airline', verbose=False), fh=12, fold=3, fig_kwargs={'renderer': 'notebook'}, session_id=42) test_models = exp.models().index.tolist()[:5] # In[15]: best_baseline_models = exp.compare_models(include=test_models, n_select=3) best_baseline_models # In[16]: from pycaret.parallel import FugueBackend best_baseline_models = exp.compare_models(include=test_models, n_select=3, parallel=FugueBackend("dask")) best_baseline_models # In[17]: from pyspark.sql import SparkSession spark = SparkSession.builder.getOrCreate() # In[18]: from pycaret.parallel import FugueBackend best_baseline_models = exp.compare_models(include=test_models[:2], n_select=3, parallel=FugueBackend(spark)) best_baseline_models # In[19]: exp.pull() # # A more practical case # # The above examples are pure toys, to make things work perfectly in a distributed system you must be careful about a few things # # # Use a lambda instead of a dataframe in setup # # If you directly provide a dataframe in `setup`, this dataset will need to be sent to all worker nodes. If the dataframe is 1G, you have 100 workers, then it is possible your dirver machine will need to send out up to 100G data (depending on specific framework's implementation), then this data transfer becomes a bottleneck itself. Instead, if you provide a lambda function, it doesn't change the local compute scenario, but the driver will only send the function reference to workers, and each worker will be responsible to load the data by themselves, so there is no heavy traffic on the driver side. # # # Be deterministic # # You should always use `session_id` to make the distributed compute deterministic. # # # Set n_jobs # # It is important to be explicit on n_jobs when you want to run something distributedly, so it will not overuse the local/remote resources. This can also avoid resrouce contention, and make the compute faster. # In[1]: from pycaret.datasets import get_data from pycaret.classification import * setup(data_func=lambda: get_data("juice", verbose=False, profile=False), target = 'Purchase', session_id=0, n_jobs=1); # # Set the appropriate batch_size # # `batch_size` parameter helps adjust between load balence and overhead. For each batch, setup will be called only once. So # # | Choice |Load Balance|Overhead|Best Scenario| # |---|---|---|---| # |Smaller batch size|Better|Worse|`training time >> data loading time` or `models ~= workers`| # |Larger batch size|Worse|Better|`training time << data loading time` or `models >> workers`| # # The default value is set to `1`, meaning we want the best load balance. # # # Display progress # # In development, you can enable visual effect by `display_remote=True`, but meanwhile you must also enable [Fugue Callback](https://fugue-tutorials.readthedocs.io/tutorials/advanced/rpc.html) so that the driver can monitor worker progress. But it is recommended to turn off display in production. # In[9]: from pycaret.parallel import FugueBackend fconf = { "fugue.rpc.server": "fugue.rpc.flask.FlaskRPCServer", # keep this value "fugue.rpc.flask_server.host": "0.0.0.0", # the driver ip address workers can access "fugue.rpc.flask_server.port": "3333", # the open port on the dirver "fugue.rpc.flask_server.timeout": "2 sec", # the timeout for worker to talk to driver } be = FugueBackend("dask", fconf, display_remote=True, batch_size=3, top_only=False) compare_models(n_select=2, parallel=be) # # Custom Metrics # # You can add custom metrics like before. But in order to make the scorer distributable, it must be serializable. A common function should be fine, but if inside the function, it is using some global variables that are not serializable (for example an `RLock` object), it can cause issues. So try to make the custom function independent from global variables. # In[3]: def score_dummy(y_true, y_pred, axis=0): return 0.0 add_metric(id = 'mydummy', name = 'DUMMY', score_func = score_dummy, target = 'pred', greater_is_better = False, ) # Adding a function in a class instance is also ok, but make sure all member variables in the class are serializable. # In[4]: test_models = models().index.tolist()[:5] compare_models(include=test_models, n_select=2, sort="DUMMY", parallel=FugueBackend("dask")) # In[5]: pull() # In[6]: class Scores: def score_dummy2(self, y_true, y_prob, axis=0): return 1.0 scores = Scores() add_metric(id = 'mydummy2', name = 'DUMMY2', score_func = scores.score_dummy2, target = 'pred_proba', greater_is_better = True, ) # In[7]: compare_models(include=test_models, n_select=2, sort="DUMMY2", parallel=FugueBackend("dask")) # In[8]: pull() # # Notes # # # Spark settings # # It is highly recommended to have only 1 worker on each Spark executor, so the worker can fully utilize all cpus (set `spark.task.cpus`). Also when you do this you should explicitly set `n_jobs` in `setup` to the number of cpus of each executor. # # ```python # executor_cores = 4 # # spark = SparkSession.builder.config("spark.task.cpus", executor_cores).config("spark.executor.cores", executor_cores).getOrCreate() # # setup(data=get_data("juice", verbose=False, profile=False), target = 'Purchase', session_id=0, n_jobs=executor_cores) # # compare_models(n_select=2, parallel=FugueBackend(spark)) # ``` # # # Databricks # # On Databricks, `spark` is the magic variable representing a SparkSession. But there is no difference to use. You do the exactly same thing as before: # # ```python # compare_models(parallel=FugueBackend(spark)) # ``` # # But Databricks, the visualization is difficult, so it may be a good idea to do two things: # # * Set `verbose` to False in `setup` # * Set `display_remote` to False in `FugueBackend` # # # Dask # # Dask has fake distributed modes such as the default (multi-thread) and multi-process modes. The default mode will just work fine (but they are actually running sequentially), and multi-process doesn't work for PyCaret for now because it messes up with PyCaret's global variables. On the other hand, any Spark execution mode will just work fine. # # # Local Parallelization # # For practical use where you try non-trivial data and models, local parallelization (The eaiest way is to use local Dask as backend as shown above) normally doesn't have performance advantage. Because it's very easy to overload the CPUS on training, increasing the contention of resources. The value of local parallelization is to verify the code and give you confidence that the distributed environment will provide the expected result with much shorter time. # # # How to develop # # Distributed systems are powerful but you must follow some good practices to use them: # # 1. **From small to large:** initially, you must start with a small set of data, for example in `compare_model` limit the models you want to try to a small number of cheap models, and when you verify they work, you can change to a larger model collection. # 2. **From local to distributed:** you should follow this sequence: verify small data locally then verify small data distributedly and then verify large data distributedly. The current design makes the transition seamless. You can do these sequentially: `parallel=None` -> `parallel=FugueBackend()` -> `parallel=FugueBackend(spark)`. In the second step, you can replace with a local SparkSession or local dask. # In[ ]: