arch/tile: clarify flush_buffer()/finv_buffer() function names

They are only applicable for locally-homecached memory ranges, so
change their names to {flush,finv}_buffer_local().  Change inv_buffer()
to just do an mf instead of any kind of fancier barrier, since you're
obviously not going to be waiting for anything once the local homecache
is invalidated.

Fix tilepro.c network driver not to bother calling finv_buffer when
stopping the EPP, but just mf after memset to ensure that it will not
see any packet data after we finish stopping; use finv_buffer_remote()
when doing exit-time cleanup.

This also fixes a (not very interesting) generic Linux build failure
where drivers/scsi/st.c declares its own flush_buffer().

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
Chris Metcalf 2011-05-02 16:36:48 -04:00
parent 5386e73589
commit d07bd86d82
2 changed files with 15 additions and 11 deletions

View File

@ -116,22 +116,28 @@ static inline void __finv_buffer(void *buffer, size_t size)
}
/* Invalidate a VA range, then memory fence. */
/* Invalidate a VA range and wait for it to be complete. */
static inline void inv_buffer(void *buffer, size_t size)
{
__inv_buffer(buffer, size);
mb_incoherent();
mb();
}
/* Flush a VA range, then memory fence. */
static inline void flush_buffer(void *buffer, size_t size)
/*
* Flush a locally-homecached VA range and wait for the evicted
* cachelines to hit memory.
*/
static inline void flush_buffer_local(void *buffer, size_t size)
{
__flush_buffer(buffer, size);
mb_incoherent();
}
/* Flush & invalidate a VA range, then memory fence. */
static inline void finv_buffer(void *buffer, size_t size)
/*
* Flush and invalidate a locally-homecached VA range and wait for the
* evicted cachelines to hit memory.
*/
static inline void finv_buffer_local(void *buffer, size_t size)
{
__finv_buffer(buffer, size);
mb_incoherent();

View File

@ -1658,11 +1658,9 @@ static int tile_net_stop(struct net_device *dev)
while (tile_net_lepp_free_comps(dev, true))
/* loop */;
/* Wipe the EPP queue. */
/* Wipe the EPP queue, and wait till the stores hit the EPP. */
memset(priv->eq, 0, sizeof(lepp_queue_t));
/* Evict the EPP queue. */
finv_buffer(priv->eq, EQ_SIZE);
mb();
return 0;
}
@ -2398,7 +2396,7 @@ static void tile_net_cleanup(void)
struct net_device *dev = tile_net_devs[i];
struct tile_net_priv *priv = netdev_priv(dev);
unregister_netdev(dev);
finv_buffer(priv->eq, EQ_SIZE);
finv_buffer_remote(priv->eq, EQ_SIZE, 0);
__free_pages(priv->eq_pages, EQ_ORDER);
free_netdev(dev);
}