Skip to content

Commit

Permalink
Properly save
Browse files Browse the repository at this point in the history
  • Loading branch information
isamu-isozaki committed Jul 23, 2023
1 parent e88bc1f commit c8107b7
Show file tree
Hide file tree
Showing 2 changed files with 1 addition and 8 deletions.
3 changes: 0 additions & 3 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,10 @@ real
generated
real
output.jpg
<<<<<<< HEAD
wandb
imagenet-text2image
=======
segmented.jpg

>>>>>>> 21b5600a636f39569160876cd6ea291220e8cbcb
__pycache__/
*.py[cod]
*$py.class
Expand Down
6 changes: 1 addition & 5 deletions muse/modeling_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -571,13 +571,9 @@ def forward(self, hidden_states, encoder_hidden_states=None, encoder_attention_m
value = value.view(batch, kv_seq_len, self.num_heads, self.head_dim) # (B, T, nh, hs)

if self.use_memory_efficient_attention_xformers:
<<<<<<< HEAD
attn_output = xops.memory_efficient_attention(query, key, value, op=self.xformers_attention_op, attn_bias=bias)
=======
attn_output = xops.memory_efficient_attention(
query, key, value, op=self.xformers_attention_op, p=self.attention_dropout if self.training else 0.0
query, key, value, op=self.xformers_attention_op, p=self.attention_dropout if self.training else 0.0, attn_bias=bias
)
>>>>>>> 21b5600a636f39569160876cd6ea291220e8cbcb
attn_output = attn_output.view(batch, q_seq_len, self.hidden_size)
else:
attention_mask = None
Expand Down

0 comments on commit c8107b7

Please sign in to comment.