A fix for a NULL dereference that turns out to be easily triggerable
by fsync (marked for stable) and a false positive WARN and snap_rwsem locking fixups. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmJsELYTHGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHziwW9CACcunarIMNtKWRRoQjOh/2RUbqEqZaA amz5mb6BIkGiZ092UggQ+5SKRJ0eIWayCatMZ5PKpvAMUGpOBgPjQsG1WvqzFzd5 m84FQ16CsywcD1AYAUlArq9Y59VFQyBXh3kovwDCEywh9F9FPgpDC0MrjeHsBQ0z MtsuhzBoLxyVwANV7WFOH2/+U+EPfkK8pNDKluJDy2P6QavsJAI8lk4oEMFgVTPl avLdeSC6EIJ8ZwFs//PgGsmjHPLdgA8cEMJEWxa7Sw0zy7+CZpOTuUn95KERIDrc 7XKc6QdvNdcGSs2boQSFUrfpNV6NHjB7xb0b9fMAqFan9Vb9TFdv2B6x =OEJo -----END PGP SIGNATURE----- Merge tag 'ceph-for-5.18-rc5' of https://github.com/ceph/ceph-client Pull ceph client fixes from Ilya Dryomov: "A fix for a NULL dereference that turns out to be easily triggerable by fsync (marked for stable) and a false positive WARN and snap_rwsem locking fixups" * tag 'ceph-for-5.18-rc5' of https://github.com/ceph/ceph-client: ceph: fix possible NULL pointer dereference for req->r_session ceph: remove incorrect session state check ceph: get snap_rwsem read lock in handle_cap_export for ceph_add_cap libceph: disambiguate cluster/pool full log message
This commit is contained in:
commit
bd383b8e32
|
@ -2274,6 +2274,8 @@ retry:
|
|||
list_for_each_entry(req, &ci->i_unsafe_dirops,
|
||||
r_unsafe_dir_item) {
|
||||
s = req->r_session;
|
||||
if (!s)
|
||||
continue;
|
||||
if (unlikely(s->s_mds >= max_sessions)) {
|
||||
spin_unlock(&ci->i_unsafe_lock);
|
||||
for (i = 0; i < max_sessions; i++) {
|
||||
|
@ -2294,6 +2296,8 @@ retry:
|
|||
list_for_each_entry(req, &ci->i_unsafe_iops,
|
||||
r_unsafe_target_item) {
|
||||
s = req->r_session;
|
||||
if (!s)
|
||||
continue;
|
||||
if (unlikely(s->s_mds >= max_sessions)) {
|
||||
spin_unlock(&ci->i_unsafe_lock);
|
||||
for (i = 0; i < max_sessions; i++) {
|
||||
|
@ -3870,6 +3874,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
|
|||
dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
|
||||
inode, ci, mds, mseq, target);
|
||||
retry:
|
||||
down_read(&mdsc->snap_rwsem);
|
||||
spin_lock(&ci->i_ceph_lock);
|
||||
cap = __get_cap_for_mds(ci, mds);
|
||||
if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
|
||||
|
@ -3933,6 +3938,7 @@ retry:
|
|||
}
|
||||
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
up_read(&mdsc->snap_rwsem);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
|
||||
/* open target session */
|
||||
|
@ -3958,6 +3964,7 @@ retry:
|
|||
|
||||
out_unlock:
|
||||
spin_unlock(&ci->i_ceph_lock);
|
||||
up_read(&mdsc->snap_rwsem);
|
||||
mutex_unlock(&session->s_mutex);
|
||||
if (tsession) {
|
||||
mutex_unlock(&tsession->s_mutex);
|
||||
|
|
|
@ -4434,8 +4434,6 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
|
|||
|
||||
bool check_session_state(struct ceph_mds_session *s)
|
||||
{
|
||||
struct ceph_fs_client *fsc = s->s_mdsc->fsc;
|
||||
|
||||
switch (s->s_state) {
|
||||
case CEPH_MDS_SESSION_OPEN:
|
||||
if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
|
||||
|
@ -4444,10 +4442,6 @@ bool check_session_state(struct ceph_mds_session *s)
|
|||
}
|
||||
break;
|
||||
case CEPH_MDS_SESSION_CLOSING:
|
||||
/* Should never reach this when not force unmounting */
|
||||
WARN_ON_ONCE(s->s_ttl &&
|
||||
READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
|
||||
fallthrough;
|
||||
case CEPH_MDS_SESSION_NEW:
|
||||
case CEPH_MDS_SESSION_RESTARTING:
|
||||
case CEPH_MDS_SESSION_CLOSED:
|
||||
|
|
|
@ -2385,7 +2385,11 @@ again:
|
|||
if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
|
||||
err = -ENOSPC;
|
||||
} else {
|
||||
pr_warn_ratelimited("FULL or reached pool quota\n");
|
||||
if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
|
||||
pr_warn_ratelimited("cluster is full (osdmap FULL)\n");
|
||||
else
|
||||
pr_warn_ratelimited("pool %lld is full or reached quota\n",
|
||||
req->r_t.base_oloc.pool);
|
||||
req->r_t.paused = true;
|
||||
maybe_request_map(osdc);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue