Skip to content

Commit

Permalink
Merge
Browse files Browse the repository at this point in the history
  • Loading branch information
lihuahua123 committed Dec 17, 2024
1 parent 872a765 commit 475668a
Show file tree
Hide file tree
Showing 6 changed files with 4 additions and 15 deletions.
1 change: 0 additions & 1 deletion examples/sd_run.sh
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ $OUTPUT_ARGS \
--warmup_steps 1 \
--prompt "brown dog laying on the ground with a metal bowl in front of him." \
--world_size 2 \
--rank 0 \
$CFG_ARGS \
$PARALLLEL_VAE \
$COMPILE_FLAG \
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def get_cuda_version():
"imageio",
"imageio-ffmpeg",
"optimum-quanto",
"flash_attn>=2.6.3",
"flash_attn>=2.7.0", # flash_attn>=2.7.0 with torch>=2.4.0 wraps ops with torch.ops
"ray"
],
extras_require={
Expand Down
8 changes: 0 additions & 8 deletions xfuser/config/args.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ class xFuserArgs:
# tensor parallel
tensor_parallel_degree: int = 1
split_scheme: Optional[str] = "row"
rank: int = 0
world_size: int = 1
# pipefusion parallel
pipefusion_parallel_degree: int = 1
Expand Down Expand Up @@ -153,12 +152,6 @@ def add_cli_args(parser: FlexibleArgumentParser):

# Parallel arguments
parallel_group = parser.add_argument_group("Parallel Processing Options")
parallel_group.add_argument(
"--rank",
type=int,
default=0,
help="Rank of the process.",
)
parallel_group.add_argument(
"--world_size",
type=int,
Expand Down Expand Up @@ -384,7 +377,6 @@ def create_config(
world_size=self.world_size,
),
world_size=self.world_size,
rank=self.rank,
)

fast_attn_config = FastAttnConfig(
Expand Down
1 change: 0 additions & 1 deletion xfuser/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@ class ParallelConfig:
tp_config: TensorParallelConfig
distributed_executor_backend: Optional[str] = None
world_size: int = 1 # FIXME: remove this
rank: int = 0
worker_cls: str = "xfuser.worker.worker.Worker"

def __post_init__(self):
Expand Down
3 changes: 1 addition & 2 deletions xfuser/executor/gpu_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,7 @@ def _init_executor(self):
self._init_ray_workers()

def _init_ray_workers(self):
placement_group = initialize_ray_cluster(
self.engine_config.parallel_config,ray_address="0.0.0.0:6379")
placement_group = initialize_ray_cluster(self.engine_config.parallel_config)

# create placement group and worker wrapper instance for lazy load worker
self.workers = []
Expand Down
4 changes: 2 additions & 2 deletions xfuser/worker/worker_wrappers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,9 @@ def update_environs(environs: Dict[str, str]):


class RayWorkerWrapper(BaseWorkerWrapper):
def __init__(self, engine_config: EngineConfig, bundle_id: int) -> None:
def __init__(self, engine_config: EngineConfig, rank: int) -> None:
super().__init__(engine_config.parallel_config.worker_cls)
self.init_worker(engine_config.parallel_config, bundle_id)
self.init_worker(engine_config.parallel_config, rank)

def get_node_and_gpu_ids(
self,
Expand Down

0 comments on commit 475668a

Please sign in to comment.