Skip to content

BUG: Fix typo-related bug to resolve #9266 #10576

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions doc/source/whatsnew/v0.17.0.txt
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,13 @@ Bug Fixes
- Bug in ``DatetimeIndex`` and ``PeriodIndex.value_counts`` resets name from its result, but retains in result's ``Index``. (:issue:`10150`)

- Bug in `pandas.concat` with ``axis=0`` when column is of dtype ``category`` (:issue:`10177`)

- Bug in ``read_msgpack`` where input type is not always checked (:issue:`10369`)

- Bug in `pandas.read_csv` with ``index_col=False`` or with ``index_col=['a', 'b']`` (:issue:`10413`, :issue:`10467`)

- Bug in `Series.from_csv` with ``header`` kwarg not setting the ``Series.name`` or the ``Series.index.name`` (:issue:`10483`)

- Bug in `groupby.var` which caused variance to be inaccurate for small float values (:issue:`10448`)

- Bug in `_convert_to_ndarrays` which cause an `AttributeError` in some cases (:issue:`9266`)
2 changes: 1 addition & 1 deletion pandas/core/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -1055,7 +1055,7 @@ def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)

def __setitem__(self, key, value):
raise TypeError("Indexes do not support mutable operations")
raise TypeError("Index does not support mutable operations")

def __getitem__(self, key):
"""
Expand Down
2 changes: 1 addition & 1 deletion pandas/io/parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -997,7 +997,7 @@ def _convert_to_ndarrays(self, dct, na_values, na_fvalues, verbose=False,
try:
values = lib.map_infer(values, conv_f)
except ValueError:
mask = lib.ismember(values, na_values).view(np.uin8)
mask = lib.ismember(values, na_values).view(np.uint8)
values = lib.map_infer_mask(values, conv_f, mask)
coerce_type = False

Expand Down
21 changes: 21 additions & 0 deletions pandas/io/tests/test_parsers.py
Original file line number Diff line number Diff line change
Expand Up @@ -2679,6 +2679,27 @@ def test_fwf_compression(self):
compression=comp_name)
tm.assert_frame_equal(result, expected)

def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71"""
df = read_fwf(StringIO(data),
colspecs=[(0,17),(25,26),(33,37),(49,51),(58,62),(63,1000)],
names=['time','pri','pgn','dst','src','data'],
converters={
'pgn':lambda x: int(x,16),
'src':lambda x: int(x,16),
'dst':lambda x: int(x,16),
'data':lambda x: len(x.split(' '))})

expected = DataFrame([[1421302965.213420,3,61184,23,40,8],
[1421302964.226776,6,61442,None, 71,8]],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can you specify: dtype = { 'pgn' : 'uint8' } as well (and fix the expected to do this too). I think should work.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@jreback — so still halfway trying to figure this out but adding the dtype argument to read_fwf returns an engine not support error even when I set the engine to the C engine.

I'm not entirely certain what effect adding the dtype will have on the tests as it appears that the error is thrown when parsing the dictionary in the converters parameter. Specifically for this case, the except clause is trigger when parsing the values associated with DST since only one of them is provided in the test data and the conversion from hex to decimal on line 996 fails.

Let me know if I am not understanding something.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

sorry. that actually would be nice, but that is a separate issue #7141. so don't worry about this for now.

columns = ["time", "pri", "pgn", "dst", "src","data"])

# Hacky fix for dst column dtype
expected["dst"] = expected["dst"].astype(object)

tm.assert_frame_equal(df, expected)

def test_BytesIO_input(self):
if not compat.PY3:
raise nose.SkipTest("Bytes-related test - only needs to work on Python 3")
Expand Down
53 changes: 19 additions & 34 deletions pandas/src/generate_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -1147,67 +1147,52 @@ def group_prod_bin_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,

group_var_template = """@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
def group_var_%(name)s(ndarray[%(dest_type2)s, ndim=2] out,
ndarray[int64_t] counts,
ndarray[%(dest_type2)s, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
%(dest_type2)s val, ct
ndarray[%(dest_type2)s, ndim=2] nobs, sumx, sumxx
%(dest_type2)s val, ct, oldmean
ndarray[%(dest_type2)s, ndim=2] nobs, mean

if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
mean = np.zeros_like(out)

N, K = (<object> values).shape

with nogil:
if K > 1:
for i in range(N):

lab = labels[i]
if lab < 0:
continue
out[:, :] = 0.0

counts[lab] += 1

for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, j] += 1
sumx[lab, j] += val
sumxx[lab, j] += val * val
else:
for i in range(N):
with nogil:
for i in range(N):
lab = labels[i]
if lab < 0:
continue

lab = labels[i]
if lab < 0:
continue
counts[lab] += 1

counts[lab] += 1
val = values[i, 0]
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, 0] += 1
sumx[lab, 0] += val
sumxx[lab, 0] += val * val

nobs[lab, j] += 1
oldmean = mean[lab, j]
mean[lab, j] += (val - oldmean) / nobs[lab, j]
out[lab, j] += (val - mean[lab, j]) * (val - oldmean)

for i in range(ncounts):
for j in range(K):
ct = nobs[i, j]
if ct < 2:
out[i, j] = NAN
else:
out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
(ct * ct - ct))
out[i, j] /= (ct - 1)

"""

group_var_bin_template = """@cython.wraparound(False)
Expand Down
106 changes: 38 additions & 68 deletions pandas/src/generated.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -7232,131 +7232,101 @@ def group_prod_bin_float32(ndarray[float32_t, ndim=2] out,

@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
def group_var_float64(ndarray[float64_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float64_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float64_t val, ct
ndarray[float64_t, ndim=2] nobs, sumx, sumxx
float64_t val, ct, oldmean
ndarray[float64_t, ndim=2] nobs, mean

if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
mean = np.zeros_like(out)

N, K = (<object> values).shape

with nogil:
if K > 1:
for i in range(N):

lab = labels[i]
if lab < 0:
continue
out[:, :] = 0.0

counts[lab] += 1

for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, j] += 1
sumx[lab, j] += val
sumxx[lab, j] += val * val
else:
for i in range(N):
with nogil:
for i in range(N):
lab = labels[i]
if lab < 0:
continue

lab = labels[i]
if lab < 0:
continue
counts[lab] += 1

counts[lab] += 1
val = values[i, 0]
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, 0] += 1
sumx[lab, 0] += val
sumxx[lab, 0] += val * val

nobs[lab, j] += 1
oldmean = mean[lab, j]
mean[lab, j] += (val - oldmean) / nobs[lab, j]
out[lab, j] += (val - mean[lab, j]) * (val - oldmean)

for i in range(ncounts):
for j in range(K):
ct = nobs[i, j]
if ct < 2:
out[i, j] = NAN
else:
out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
(ct * ct - ct))
out[i, j] /= (ct - 1)


@cython.wraparound(False)
@cython.boundscheck(False)
@cython.cdivision(True)
def group_var_float32(ndarray[float32_t, ndim=2] out,
ndarray[int64_t] counts,
ndarray[float32_t, ndim=2] values,
ndarray[int64_t] labels):
cdef:
Py_ssize_t i, j, N, K, lab, ncounts = len(counts)
float32_t val, ct
ndarray[float32_t, ndim=2] nobs, sumx, sumxx
float32_t val, ct, oldmean
ndarray[float32_t, ndim=2] nobs, mean

if not len(values) == len(labels):
raise AssertionError("len(index) != len(labels)")

nobs = np.zeros_like(out)
sumx = np.zeros_like(out)
sumxx = np.zeros_like(out)
mean = np.zeros_like(out)

N, K = (<object> values).shape

with nogil:
if K > 1:
for i in range(N):

lab = labels[i]
if lab < 0:
continue
out[:, :] = 0.0

counts[lab] += 1

for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, j] += 1
sumx[lab, j] += val
sumxx[lab, j] += val * val
else:
for i in range(N):
with nogil:
for i in range(N):
lab = labels[i]
if lab < 0:
continue

lab = labels[i]
if lab < 0:
continue
counts[lab] += 1

counts[lab] += 1
val = values[i, 0]
for j in range(K):
val = values[i, j]

# not nan
if val == val:
nobs[lab, 0] += 1
sumx[lab, 0] += val
sumxx[lab, 0] += val * val

nobs[lab, j] += 1
oldmean = mean[lab, j]
mean[lab, j] += (val - oldmean) / nobs[lab, j]
out[lab, j] += (val - mean[lab, j]) * (val - oldmean)

for i in range(ncounts):
for j in range(K):
ct = nobs[i, j]
if ct < 2:
out[i, j] = NAN
else:
out[i, j] = ((ct * sumxx[i, j] - sumx[i, j] * sumx[i, j]) /
(ct * ct - ct))
out[i, j] /= (ct - 1)



@cython.wraparound(False)
Expand Down
Loading