File tree Expand file tree Collapse file tree 1 file changed +1
-5
lines changed 
vllm/v1/attention/backends Expand file tree Collapse file tree 1 file changed +1
-5
lines changed Original file line number Diff line number Diff line change 44
55import  ast 
66from  dataclasses  import  dataclass 
7- from  typing  import  TYPE_CHECKING , Any ,  Optional 
7+ from  typing  import  TYPE_CHECKING , Optional 
88
99import  torch 
1010
@@ -313,15 +313,11 @@ def __init__(
313313        alibi_slopes : Optional [list [float ]],
314314        sliding_window : Optional [int ],
315315        kv_cache_dtype : str ,
316-         blocksparse_params : Optional [dict [str , Any ]] =  None ,
317316        logits_soft_cap : Optional [float ] =  None ,
318317        attn_type : AttentionType  =  AttentionType .DECODER ,
319318        kv_sharing_target_layer_name : Optional [str ] =  None ,
320319        use_irope : bool  =  False ,
321320    ) ->  None :
322-         if  blocksparse_params  is  not   None :
323-             raise  ValueError (
324-                 "TreeAttention does not support block-sparse attention." )
325321        self .num_heads  =  num_heads 
326322        self .head_size  =  head_size 
327323        self .scale  =  float (scale )
    
 
   
 
     
   
   
          
     
  
    
     
 
    
      
     
 
     
    You can’t perform that action at this time.
  
 
    
  
     
    
      
        
     
 
       
      
     
   
 
    
    
  
 
  
 
     
    
0 commit comments