-
-
Save roberto257/be857a74400c574587de2c5884fd7ae3 to your computer and use it in GitHub Desktop.
Tennis |
<?xml version="1.0" encoding="UTF-8"?> | |
<project version="4"> | |
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7" project-jdk-type="Python SDK" /> | |
<component name="PyPackaging"> | |
<option name="earlyReleasesAsUpgrades" value="true" /> | |
</component> | |
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |
<project version="4"> | |
<component name="ProjectModuleManager"> | |
<modules> | |
<module fileurl="file://$PROJECT_DIR$/.idea/Tennis.iml" filepath="$PROJECT_DIR$/.idea/Tennis.iml" /> | |
</modules> | |
</component> | |
</project> |
<?xml version="1.0" encoding="UTF-8"?> | |
<module type="PYTHON_MODULE" version="4"> | |
<component name="NewModuleRootManager"> | |
<content url="file://$MODULE_DIR$"> | |
<excludeFolder url="file://$MODULE_DIR$/venv" /> | |
</content> | |
<orderEntry type="jdk" jdkName="Python 3.7" jdkType="Python SDK" /> | |
<orderEntry type="sourceFolder" forTests="false" /> | |
</component> | |
<component name="TestRunnerService"> | |
<option name="PROJECT_TEST_RUNNER" value="Unittests" /> | |
</component> | |
</module> |
import pygame | |
pygame.init() | |
# Define some colors | |
BLACK = (0, 0, 0) | |
OUT = (193, 58, 34) | |
COURT = (69, 150, 81) | |
WHITE = (255, 255, 255) | |
GREEN = (0, 255, 0) | |
SKIN = (232, 214, 162) | |
#Create the screen | |
windowSize = (700, 650) | |
screen = pygame.display.set_mode(windowSize) | |
pygame.display.set_caption('Tennis') | |
#Start screen | |
startGame = False | |
while startGame == False: | |
screen.fill(BLACK) | |
font = pygame.font.Font('freesansbold.ttf', 60) | |
startLabel = font.render('Clinton Tennis Tour', 1, (WHITE)) | |
for event in pygame.event.get(): | |
keyState = pygame.key.get_pressed() | |
if keyState[pygame.K_RSHIFT] or keyState[pygame.K_LSHIFT]: | |
startGame = True | |
screen.blit(startLabel, (65, 225)) | |
pygame.display.flip() | |
# Player Sprites | |
class Robert(pygame.sprite.Sprite): | |
def __init__(self): | |
pygame.sprite.Sprite.__init__(self) | |
self.image = pygame.image.load("robert_imgs/Robert_tennis.png") | |
self.rect = self.image.get_rect() | |
self.rect.center = (400, 575) | |
self.speedx = 0 | |
self.speedy = 0 | |
def update(self): | |
self.speedx = 0 | |
self.speedy = 0 | |
keyState = pygame.key.get_pressed() | |
if keyState[pygame.K_LEFT]: | |
self.speedx = -3 | |
if keyState[pygame.K_RIGHT]: | |
self.speedx = 3 | |
self.rect.x += self.speedx | |
if self.rect.right > 700: | |
self.rect.right = 700 | |
if self.rect.right < 0: | |
self.rect.left = 0 | |
if keyState[pygame.K_UP]: | |
self.speedy = -4.65 | |
if keyState[pygame.K_DOWN]: | |
self.speedy = 2.7 | |
self.rect.y += self.speedy | |
if self.rect.y < 325: | |
self.rect.y = 325 | |
if self.rect.y < 0: | |
self.rect.y = 0 | |
class Camden(pygame.sprite.Sprite): | |
def __init__(self): | |
pygame.sprite.Sprite.__init__(self) | |
self.image = pygame.image.load("camden_imgs/camden_tennis_front.png") | |
self.rect = self.image.get_rect() | |
self.rect.center = (260, 80) | |
self.speedx = 0 | |
self.speedy = 0 | |
def update(self): | |
self.speedx = 0 | |
self.speedy = 0 | |
keyState = pygame.key.get_pressed() | |
if keyState[pygame.K_a]: | |
self.speedx = -4.2 | |
if keyState[pygame.K_d]: | |
self.speedx = 4.2 | |
self.rect.x += self.speedx | |
if self.rect.right > 700: | |
self.rect.right = 700 | |
if self.rect.right < 0: | |
self.rect.left = 0 | |
if keyState[pygame.K_w]: | |
self.speedy = -5 | |
if keyState[pygame.K_s]: | |
self.speedy = 3.75 | |
self.rect.y += self.speedy | |
if self.rect.y > 250: | |
self.rect.y = 250 | |
if self.rect.y < 0: | |
self.rect.y = 0 | |
class Ball(pygame.sprite.Sprite): | |
def __init__(self): | |
pygame.sprite.Sprite.__init__(self) | |
self.image = pygame.image.load("tennisBall.png") | |
self.rect = self.image.get_rect() | |
self.rect.center = (420, 450) | |
self.speedx = 0 | |
self.speedy = 0 | |
def update(self): | |
#Robert's forehand | |
if tennisBall.rect.colliderect(robert) and tennisBall.rect.x > robert.rect.x + 10: | |
robert.image = pygame.image.load("robert_imgs/Robert_tennis2 (1).png") | |
effect = pygame.mixer.Sound('tennisserve.wav') | |
effect.play(0) | |
robert.rect.y -5 | |
self.speedy = -8 | |
self.speedx = 3 | |
#Robert's backhand | |
if tennisBall.rect.colliderect(robert) and tennisBall.rect.x < robert.rect.x - 10: | |
robert.image = pygame.image.load("robert_imgs/Robert_tennis2_backhand.png") | |
effect = pygame.mixer.Sound('tennisserve.wav') | |
effect.play(0) | |
robert.rect.y -5 | |
self.speedy = -7 | |
self.speedx = -2 | |
#Camden's forehand | |
if tennisBall.rect.colliderect(camden) and tennisBall.rect.x < camden.rect.x -10: | |
camden.image = pygame.image.load("camden_imgs/camden_front_forehand.png") | |
effect = pygame.mixer.Sound('tennisserve.wav') | |
effect.play(0) | |
camden.rect.y -5 | |
self.speedy = 9 | |
self.speedx = 2 | |
#Camden's forehand | |
if tennisBall.rect.colliderect(camden) and tennisBall.rect.x > camden.rect.x + 10: | |
camden.image = pygame.image.load("camden_imgs/camden_front_backhand-1.png.png") | |
effect = pygame.mixer.Sound('tennisserve.wav') | |
effect.play(0) | |
camden.rect.y -5 | |
self.speedy = 8 | |
self.speedx = 2 | |
keyState = pygame.key.get_pressed() | |
#Robert's deuce side serve | |
if keyState[pygame.K_PERIOD] and 350 < robert.rect.x < 575 and robert.rect.y > 449: | |
robert.image = pygame.image.load("robert_imgs/Robert_tennisserve-1.png.png") | |
self.rect.center = (robert.rect.x + 15, robert.rect.y) | |
self.speedx = -7 | |
self.speedy = -10 | |
#Robert's add side serve | |
if keyState[pygame.K_PERIOD] and 175 < robert.rect.x < 350 and robert.rect.y > 449: | |
robert.image = pygame.image.load("robert_imgs/Robert_tennisserve-1.png.png") | |
self.rect.center = (robert.rect.x + 15, robert.rect.y) | |
self.speedx = 7 | |
self.speedy = -10 | |
#Camden's deuce side serve | |
if keyState[pygame.K_TAB] and 175 < camden.rect.x < 350 and camden.rect.y < 78: | |
camden.image = pygame.image.load("camden_imgs/camden_tennis_serve-1.png.png") | |
self.rect.center = (camden.rect.x, camden.rect.y + 40) | |
self.speedx = 7 | |
self.speedy = 14 | |
#Camden's add side serve | |
if keyState[pygame.K_TAB] and 350 < camden.rect.x < 575 and camden.rect.y < 78: | |
camden.image = pygame.image.load("camden_imgs/camden_tennis_serve-1.png.png") | |
self.rect.center = (camden.rect.x, camden.rect.y + 40) | |
self.speedx = -7 | |
self.speedy = 14 | |
#Make the ball slow down | |
self.speedy = self.speedy * .98 | |
self.speedx = self.speedx * .98 | |
self.rect = self.rect.move(self.speedx, self.speedy) | |
#Add people | |
all_sprites = pygame.sprite.Group() | |
robert = Robert() | |
camden = Camden() | |
tennisBall = Ball() | |
all_sprites.add(robert) | |
all_sprites.add(tennisBall) | |
all_sprites.add(camden) | |
carryOn = True | |
clock = pygame.time.Clock() | |
#Declare scoring variables so that they can be used within the loop | |
global score | |
score = 0 | |
global score2 | |
score2 = 0 | |
global setScore | |
setScore = 0 | |
global setScore2 | |
setScore2 = 0 | |
stops = 0 | |
ball_is_stopped = False | |
stops2 = 0 | |
ball_is_stopped2 = False | |
#Main game loop | |
while carryOn: | |
font = pygame.font.Font('freesansbold.ttf', 32) | |
screen.fill(OUT) | |
camden.update() | |
robert.update() | |
tennisBall.update() | |
epsilonComp = .2 | |
#Checks to see if the top player's shot made it over the net | |
if tennisBall.rect.y > 325: | |
#Checks to make sure it's in bounds | |
if 175 < tennisBall.rect.x < 575: | |
if abs(tennisBall.speedx) > epsilonComp and abs(tennisBall.speedy) > epsilonComp: | |
ball_is_stopped = False | |
elif abs(tennisBall.speedx) < epsilonComp and abs(tennisBall.speedy) < epsilonComp: | |
if not ball_is_stopped: | |
stops += 1 | |
ball_is_stopped = True | |
if stops == 2: | |
score = 15 | |
if stops == 3: | |
score = 30 | |
if stops == 4: | |
score = 40 | |
if stops == 5: | |
score = 0 | |
score2 = 0 | |
setScore = 1 | |
if stops == 6: | |
score = 15 | |
if stops == 7: | |
score = 30 | |
if stops == 8: | |
score = 40 | |
if stops == 9: | |
score = 0 | |
score2 = 0 | |
setScore = 2 | |
if stops == 10: | |
score = 15 | |
if stops == 11: | |
score = 30 | |
if stops == 12: | |
score = 40 | |
if stops == 13: | |
score = 0 | |
score2 = 0 | |
setScore = 3 | |
if stops == 14: | |
score = 15 | |
if stops == 15: | |
score = 30 | |
if stops == 16: | |
score = 40 | |
if stops == 17: | |
score = 0 | |
score2 = 0 | |
setScore = 4 | |
if stops == 18: | |
score = 15 | |
if stops == 19: | |
score = 30 | |
if stops == 20: | |
score = 40 | |
if stops == 21: | |
score = 0 | |
score2 = 0 | |
setScore = 5 | |
if stops == 22: | |
score = 15 | |
if stops == 23: | |
score = 30 | |
if stops == 24: | |
score = 40 | |
if stops == 25: | |
score = 0 | |
score2 = 0 | |
setScore = 6 | |
else: | |
#If the shot was not in bounds, the bottom player scores a point | |
if abs(tennisBall.speedx) > epsilonComp and abs(tennisBall.speedy) > epsilonComp: | |
ball_is_stopped2 = False | |
elif abs(tennisBall.speedx) < epsilonComp and abs(tennisBall.speedy) < epsilonComp: | |
if not ball_is_stopped2: | |
stops2 += 1 | |
ball_is_stopped2 = True | |
if stops2 == 1: | |
score2 = 15 | |
if stops2 == 2: | |
score2 = 30 | |
if stops2 == 3: | |
score2 = 40 | |
if stops2 == 4: | |
score2 = 0 | |
score = 0 | |
setScore2 = 1 | |
if stops2 == 5: | |
score2 = 15 | |
if stops2 == 6: | |
score2 = 30 | |
if stops2 == 7: | |
score2 = 40 | |
if stops2 == 8: | |
score2 = 0 | |
score = 0 | |
setScore2 = 2 | |
if stops2 == 9: | |
score2 = 15 | |
if stops2 == 10: | |
score2 = 30 | |
if stops2 == 11: | |
score2 = 40 | |
if stops2 == 12: | |
score2 = 0 | |
score = 0 | |
setScore2 = 3 | |
if stops2 == 13: | |
score2 = 15 | |
if stops2 == 14: | |
score2 = 30 | |
if stops2 == 15: | |
score2 = 40 | |
if stops2 == 16: | |
score2 = 0 | |
score = 0 | |
setScore2 = 4 | |
if stops2 == 17: | |
score2 = 15 | |
if stops2 == 18: | |
score2 = 30 | |
if stops2 == 19: | |
score2 = 40 | |
if stops2 == 20: | |
score2 = 0 | |
score = 0 | |
setScore2 = 5 | |
if stops2 == 21: | |
score2 = 15 | |
if stops2 == 22: | |
score2 = 30 | |
if stops2 == 23: | |
score2 = 40 | |
if stops2 == 24: | |
score2 = 0 | |
score = 0 | |
setScore2 = 6 | |
#Checks to see if the bottom player's shot made it over the net | |
elif tennisBall.rect.y < 325: | |
if 175 < tennisBall.rect.x < 575: | |
if abs(tennisBall.speedx) > epsilonComp and abs(tennisBall.speedy) > epsilonComp: | |
ball_is_stopped2 = False | |
elif abs(tennisBall.speedx) < epsilonComp and abs(tennisBall.speedy) < epsilonComp: | |
if not ball_is_stopped2: | |
stops2 += 1 | |
ball_is_stopped2 = True | |
if stops2 == 1: | |
score2 = 15 | |
if stops2 == 2: | |
score2 = 30 | |
if stops2 == 3: | |
score2 = 40 | |
if stops2 == 4: | |
score2 = 0 | |
score = 0 | |
setScore2 = 1 | |
if stops2 == 5: | |
score2 = 15 | |
if stops2 == 6: | |
score2 = 30 | |
if stops2 == 7: | |
score2 = 40 | |
if stops2 == 8: | |
score2 = 0 | |
score = 0 | |
setScore2 = 2 | |
if stops2 == 9: | |
score2 = 15 | |
if stops2 == 10: | |
score2 = 30 | |
if stops2 == 11: | |
score2 = 40 | |
if stops2 == 12: | |
score2 = 0 | |
score = 0 | |
setScore2 = 3 | |
if stops2 == 13: | |
score2 = 15 | |
if stops2 == 14: | |
score2 = 30 | |
if stops2 == 15: | |
score2 = 40 | |
if stops2 == 16: | |
score2 = 0 | |
score = 0 | |
setScore2 = 4 | |
if stops2 == 17: | |
score2 = 15 | |
if stops2 == 18: | |
score2 = 30 | |
if stops2 == 19: | |
score2 = 40 | |
if stops2 == 20: | |
score2 = 0 | |
score = 0 | |
setScore2 = 5 | |
if stops2 == 21: | |
score2 = 15 | |
if stops2 == 22: | |
score2 = 30 | |
if stops2 == 23: | |
score2 = 40 | |
if stops2 == 24: | |
score2 = 0 | |
score = 0 | |
setScore2 = 6 | |
else: | |
#If the shot was not in bounds, the top player scores a point | |
if abs(tennisBall.speedx) > epsilonComp and abs(tennisBall.speedy) > epsilonComp: | |
ball_is_stopped = False | |
elif abs(tennisBall.speedx) < epsilonComp and abs(tennisBall.speedy) < epsilonComp: | |
if not ball_is_stopped: | |
stops += 1 | |
ball_is_stopped = True | |
if stops == 2: | |
score = 15 | |
if stops == 3: | |
score = 30 | |
if stops == 4: | |
score = 40 | |
if stops == 5: | |
score = 0 | |
score2 = 0 | |
setScore = 1 | |
if stops == 6: | |
score = 15 | |
if stops == 7: | |
score = 30 | |
if stops == 8: | |
score = 40 | |
if stops == 9: | |
score = 0 | |
score2 = 0 | |
setScore = 2 | |
if stops == 10: | |
score = 15 | |
if stops == 11: | |
score = 30 | |
if stops == 12: | |
score = 40 | |
if stops == 13: | |
score = 0 | |
score2 = 0 | |
setScore = 3 | |
if stops == 14: | |
score = 15 | |
if stops == 15: | |
score = 30 | |
if stops == 16: | |
score = 40 | |
if stops == 17: | |
score = 0 | |
score2 = 0 | |
setScore = 4 | |
if stops == 18: | |
score = 15 | |
if stops == 19: | |
score = 30 | |
if stops == 20: | |
score = 40 | |
if stops == 21: | |
score = 0 | |
score2 = 0 | |
setScore = 5 | |
if stops == 22: | |
score = 15 | |
if stops == 23: | |
score = 30 | |
if stops == 24: | |
score = 40 | |
if stops == 25: | |
score = 0 | |
score2 = 0 | |
setScore = 6 | |
#Render both scoreboards | |
scorebox = font.render(str(score), True, WHITE, BLACK) | |
scoreRect = scorebox.get_rect() | |
scoreRect.center = (625, 50) | |
screen.blit(scorebox, scoreRect) | |
scorebox2 = font.render(str(score2), True, WHITE, BLACK) | |
scoreRect2 = scorebox2.get_rect() | |
scoreRect2.center = (625, 600) | |
screen.blit(scorebox2, scoreRect2) | |
setbox = font.render(str(setScore), True, WHITE, BLACK) | |
setrect = setbox.get_rect() | |
setrect.center = (625, 175) | |
screen.blit(setbox, setrect) | |
setbox2 = font.render(str(setScore2), True, WHITE, BLACK) | |
setrect2 = setbox2.get_rect() | |
setrect2.center = (625, 475) | |
screen.blit(setbox2, setrect2) | |
for event in pygame.event.get(): | |
if event.type == pygame.QUIT: | |
carryOn = False | |
elif event.type == pygame.KEYDOWN: | |
if event.key == pygame.K_x: | |
carryOn = False | |
all_sprites.update() | |
""" | |
All the court lines drawn here in the main loop | |
""" | |
#Draw the court | |
pygame.draw.rect(screen, COURT, [175, 75, 350, 500]) | |
#outer left line | |
pygame.draw.line(screen, WHITE, (175,574), (175,75), 7) | |
#outer right line | |
pygame.draw.line(screen, WHITE, (525,574), (525,75), 7) | |
#top center line | |
pygame.draw.line(screen, WHITE, (175, 200), (525,200), 7) | |
#top outer line | |
pygame.draw.line(screen, WHITE, (175, 78), (525,78), 7) | |
#bottom outer line | |
pygame.draw.line(screen, WHITE, (175, 571), (525,571), 7) | |
#bottom center line | |
pygame.draw.line(screen, WHITE, (175, 450), (525,450), 7) | |
#center white line | |
pygame.draw.line(screen, WHITE, (350,200), (350,450), 7) | |
#net | |
pygame.draw.line(screen, BLACK, (175,325), (525,325), 10) | |
#bottom serve line | |
pygame.draw.line(screen, WHITE, (350,574), (350,584), 7) | |
#top serve line | |
pygame.draw.line(screen, WHITE, (350,65), (350,75), 7) | |
#Update | |
all_sprites.draw(screen) | |
pygame.display.update() | |
clock.tick(60) | |
pygame.quit() |
# This file must be used with "source bin/activate" *from bash* | |
# you cannot run it directly | |
deactivate () { | |
# reset old environment variables | |
if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then | |
PATH="${_OLD_VIRTUAL_PATH:-}" | |
export PATH | |
unset _OLD_VIRTUAL_PATH | |
fi | |
if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then | |
PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" | |
export PYTHONHOME | |
unset _OLD_VIRTUAL_PYTHONHOME | |
fi | |
# This should detect bash and zsh, which have a hash command that must | |
# be called to get it to forget past commands. Without forgetting | |
# past commands the $PATH changes we made may not be respected | |
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then | |
hash -r | |
fi | |
if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then | |
PS1="${_OLD_VIRTUAL_PS1:-}" | |
export PS1 | |
unset _OLD_VIRTUAL_PS1 | |
fi | |
unset VIRTUAL_ENV | |
if [ ! "$1" = "nondestructive" ] ; then | |
# Self destruct! | |
unset -f deactivate | |
fi | |
} | |
# unset irrelevant variables | |
deactivate nondestructive | |
VIRTUAL_ENV="/Users/roberto257/PycharmProjects/Pygame/venv" | |
export VIRTUAL_ENV | |
_OLD_VIRTUAL_PATH="$PATH" | |
PATH="$VIRTUAL_ENV/bin:$PATH" | |
export PATH | |
# unset PYTHONHOME if set | |
# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) | |
# could use `if (set -u; : $PYTHONHOME) ;` in bash | |
if [ -n "${PYTHONHOME:-}" ] ; then | |
_OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" | |
unset PYTHONHOME | |
fi | |
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then | |
_OLD_VIRTUAL_PS1="${PS1:-}" | |
if [ "x(venv) " != x ] ; then | |
PS1="(venv) ${PS1:-}" | |
else | |
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then | |
# special case for Aspen magic directories | |
# see http://www.zetadev.com/software/aspen/ | |
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" | |
else | |
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" | |
fi | |
fi | |
export PS1 | |
fi | |
# This should detect bash and zsh, which have a hash command that must | |
# be called to get it to forget past commands. Without forgetting | |
# past commands the $PATH changes we made may not be respected | |
if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then | |
hash -r | |
fi |
# This file must be used with "source bin/activate.csh" *from csh*. | |
# You cannot run it directly. | |
# Created by Davide Di Blasi <davidedb@gmail.com>. | |
# Ported to Python 3.3 venv by Andrew Svetlov <andrew.svetlov@gmail.com> | |
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' | |
# Unset irrelevant variables. | |
deactivate nondestructive | |
setenv VIRTUAL_ENV "/Users/roberto257/PycharmProjects/Pygame/venv" | |
set _OLD_VIRTUAL_PATH="$PATH" | |
setenv PATH "$VIRTUAL_ENV/bin:$PATH" | |
set _OLD_VIRTUAL_PROMPT="$prompt" | |
if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then | |
if ("venv" != "") then | |
set env_name = "venv" | |
else | |
if (`basename "VIRTUAL_ENV"` == "__") then | |
# special case for Aspen magic directories | |
# see http://www.zetadev.com/software/aspen/ | |
set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` | |
else | |
set env_name = `basename "$VIRTUAL_ENV"` | |
endif | |
endif | |
set prompt = "[$env_name] $prompt" | |
unset env_name | |
endif | |
alias pydoc python -m pydoc | |
rehash |
# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) | |
# you cannot run it directly | |
function deactivate -d "Exit virtualenv and return to normal shell environment" | |
# reset old environment variables | |
if test -n "$_OLD_VIRTUAL_PATH" | |
set -gx PATH $_OLD_VIRTUAL_PATH | |
set -e _OLD_VIRTUAL_PATH | |
end | |
if test -n "$_OLD_VIRTUAL_PYTHONHOME" | |
set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME | |
set -e _OLD_VIRTUAL_PYTHONHOME | |
end | |
if test -n "$_OLD_FISH_PROMPT_OVERRIDE" | |
functions -e fish_prompt | |
set -e _OLD_FISH_PROMPT_OVERRIDE | |
functions -c _old_fish_prompt fish_prompt | |
functions -e _old_fish_prompt | |
end | |
set -e VIRTUAL_ENV | |
if test "$argv[1]" != "nondestructive" | |
# Self destruct! | |
functions -e deactivate | |
end | |
end | |
# unset irrelevant variables | |
deactivate nondestructive | |
set -gx VIRTUAL_ENV "/Users/roberto257/PycharmProjects/Pygame/venv" | |
set -gx _OLD_VIRTUAL_PATH $PATH | |
set -gx PATH "$VIRTUAL_ENV/bin" $PATH | |
# unset PYTHONHOME if set | |
if set -q PYTHONHOME | |
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME | |
set -e PYTHONHOME | |
end | |
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" | |
# fish uses a function instead of an env var to generate the prompt. | |
# save the current fish_prompt function as the function _old_fish_prompt | |
functions -c fish_prompt _old_fish_prompt | |
# with the original prompt function renamed, we can override with our own. | |
function fish_prompt | |
# Save the return status of the last command | |
set -l old_status $status | |
# Prompt override? | |
if test -n "(venv) " | |
printf "%s%s" "(venv) " (set_color normal) | |
else | |
# ...Otherwise, prepend env | |
set -l _checkbase (basename "$VIRTUAL_ENV") | |
if test $_checkbase = "__" | |
# special case for Aspen magic directories | |
# see http://www.zetadev.com/software/aspen/ | |
printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) | |
else | |
printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) | |
end | |
end | |
# Restore the return status of the previous command. | |
echo "exit $old_status" | . | |
_old_fish_prompt | |
end | |
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" | |
end |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install' | |
__requires__ = 'setuptools==40.8.0' | |
import re | |
import sys | |
from pkg_resources import load_entry_point | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit( | |
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')() | |
) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7' | |
__requires__ = 'setuptools==40.8.0' | |
import re | |
import sys | |
from pkg_resources import load_entry_point | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit( | |
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')() | |
) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# -*- coding: utf-8 -*- | |
import re | |
import sys | |
from numpy.f2py.f2py2e import main | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit(main()) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# -*- coding: utf-8 -*- | |
import re | |
import sys | |
from numpy.f2py.f2py2e import main | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit(main()) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# -*- coding: utf-8 -*- | |
import re | |
import sys | |
from numpy.f2py.f2py2e import main | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit(main()) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip' | |
__requires__ = 'pip==19.0.3' | |
import re | |
import sys | |
from pkg_resources import load_entry_point | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit( | |
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')() | |
) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3' | |
__requires__ = 'pip==19.0.3' | |
import re | |
import sys | |
from pkg_resources import load_entry_point | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit( | |
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')() | |
) |
#!/Users/roberto257/PycharmProjects/Pygame/venv/bin/python | |
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7' | |
__requires__ = 'pip==19.0.3' | |
import re | |
import sys | |
from pkg_resources import load_entry_point | |
if __name__ == '__main__': | |
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) | |
sys.exit( | |
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')() | |
) |
/* | |
pygame - Python Game Library | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
*/ | |
#ifndef _CAMERA_H | |
#define _CAMERA_H | |
#include "_pygame.h" | |
#include "camera.h" | |
#endif | |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#ifndef _PYGAME_H | |
#define _PYGAME_H | |
/** This header file includes all the definitions for the | |
** base pygame extensions. This header only requires | |
** SDL and Python includes. The reason for functions | |
** prototyped with #define's is to allow for maximum | |
** python portability. It also uses python as the | |
** runtime linker, which allows for late binding. For more | |
** information on this style of development, read the Python | |
** docs on this subject. | |
** http://www.python.org/doc/current/ext/using-cobjects.html | |
** | |
** If using this to build your own derived extensions, | |
** you'll see that the functions available here are mainly | |
** used to help convert between python objects and SDL objects. | |
** Since this library doesn't add a lot of functionality to | |
** the SDL libarary, it doesn't need to offer a lot either. | |
** | |
** When initializing your extension module, you must manually | |
** import the modules you want to use. (this is the part about | |
** using python as the runtime linker). Each module has its | |
** own import_xxx() routine. You need to perform this import | |
** after you have initialized your own module, and before | |
** you call any routines from that module. Since every module | |
** in pygame does this, there are plenty of examples. | |
** | |
** The base module does include some useful conversion routines | |
** that you are free to use in your own extension. | |
** | |
** When making changes, it is very important to keep the | |
** FIRSTSLOT and NUMSLOT constants up to date for each | |
** section. Also be sure not to overlap any of the slots. | |
** When you do make a mistake with this, it will result | |
** is a dereferenced NULL pointer that is easier to diagnose | |
** than it could be :] | |
**/ | |
#if defined(HAVE_SNPRINTF) /* defined in python.h (pyerrors.h) and SDL.h \ | |
(SDL_config.h) */ | |
#undef HAVE_SNPRINTF /* remove GCC redefine warning */ | |
#endif | |
// This must be before all else | |
#if defined(__SYMBIAN32__) && defined(OPENC) | |
#include <sys/types.h> | |
#if defined(__WINS__) | |
void * | |
_alloca(size_t size); | |
#define alloca _alloca | |
#endif | |
#endif | |
#define PG_STRINGIZE_HELPER(x) #x | |
#define PG_STRINGIZE(x) PG_STRINGIZE_HELPER(x) | |
#define PG_WARN(desc) message(__FILE__ "(" PG_STRINGIZE(__LINE__) "): WARNING: " #desc) | |
/* This is unconditionally defined in Python.h */ | |
#if defined(_POSIX_C_SOURCE) | |
#undef _POSIX_C_SOURCE | |
#endif | |
#include <Python.h> | |
/* the version macros are defined since version 1.9.5 */ | |
#define PG_MAJOR_VERSION 1 | |
#define PG_MINOR_VERSION 9 | |
#define PG_PATCH_VERSION 6 | |
#define PG_VERSIONNUM(MAJOR, MINOR, PATCH) (1000*(MAJOR) + 100*(MINOR) + (PATCH)) | |
#define PG_VERSION_ATLEAST(MAJOR, MINOR, PATCH) \ | |
(PG_VERSIONNUM(PG_MAJOR_VERSION, PG_MINOR_VERSION, PG_PATCH_VERSION) >= \ | |
PG_VERSIONNUM(MAJOR, MINOR, PATCH)) | |
/* Cobjects vanish in Python 3.2; so we will code as though we use capsules */ | |
#if defined(Py_CAPSULE_H) | |
#define PG_HAVE_CAPSULE 1 | |
#else | |
#define PG_HAVE_CAPSULE 0 | |
#endif | |
#if defined(Py_COBJECT_H) | |
#define PG_HAVE_COBJECT 1 | |
#else | |
#define PG_HAVE_COBJECT 0 | |
#endif | |
#if !PG_HAVE_CAPSULE | |
#define PyCapsule_New(ptr, n, dfn) PyCObject_FromVoidPtr(ptr, dfn) | |
#define PyCapsule_GetPointer(obj, n) PyCObject_AsVoidPtr(obj) | |
#define PyCapsule_CheckExact(obj) PyCObject_Check(obj) | |
#endif | |
/* Pygame uses Py_buffer (PEP 3118) to exchange array information internally; | |
* define here as needed. | |
*/ | |
#if !defined(PyBUF_SIMPLE) | |
typedef struct bufferinfo { | |
void *buf; | |
PyObject *obj; | |
Py_ssize_t len; | |
Py_ssize_t itemsize; | |
int readonly; | |
int ndim; | |
char *format; | |
Py_ssize_t *shape; | |
Py_ssize_t *strides; | |
Py_ssize_t *suboffsets; | |
void *internal; | |
} Py_buffer; | |
/* Flags for getting buffers */ | |
#define PyBUF_SIMPLE 0 | |
#define PyBUF_WRITABLE 0x0001 | |
/* we used to include an E, backwards compatible alias */ | |
#define PyBUF_WRITEABLE PyBUF_WRITABLE | |
#define PyBUF_FORMAT 0x0004 | |
#define PyBUF_ND 0x0008 | |
#define PyBUF_STRIDES (0x0010 | PyBUF_ND) | |
#define PyBUF_C_CONTIGUOUS (0x0020 | PyBUF_STRIDES) | |
#define PyBUF_F_CONTIGUOUS (0x0040 | PyBUF_STRIDES) | |
#define PyBUF_ANY_CONTIGUOUS (0x0080 | PyBUF_STRIDES) | |
#define PyBUF_INDIRECT (0x0100 | PyBUF_STRIDES) | |
#define PyBUF_CONTIG (PyBUF_ND | PyBUF_WRITABLE) | |
#define PyBUF_CONTIG_RO (PyBUF_ND) | |
#define PyBUF_STRIDED (PyBUF_STRIDES | PyBUF_WRITABLE) | |
#define PyBUF_STRIDED_RO (PyBUF_STRIDES) | |
#define PyBUF_RECORDS (PyBUF_STRIDES | PyBUF_WRITABLE | PyBUF_FORMAT) | |
#define PyBUF_RECORDS_RO (PyBUF_STRIDES | PyBUF_FORMAT) | |
#define PyBUF_FULL (PyBUF_INDIRECT | PyBUF_WRITABLE | PyBUF_FORMAT) | |
#define PyBUF_FULL_RO (PyBUF_INDIRECT | PyBUF_FORMAT) | |
#define PyBUF_READ 0x100 | |
#define PyBUF_WRITE 0x200 | |
#define PyBUF_SHADOW 0x400 | |
typedef int (*getbufferproc)(PyObject *, Py_buffer *, int); | |
typedef void (*releasebufferproc)(Py_buffer *); | |
#endif /* #if !defined(PyBUF_SIMPLE) */ | |
/* Flag indicating a pg_buffer; used for assertions within callbacks */ | |
#ifndef NDEBUG | |
#define PyBUF_PYGAME 0x4000 | |
#endif | |
#define PyBUF_HAS_FLAG(f, F) (((f) & (F)) == (F)) | |
/* Array information exchange struct C type; inherits from Py_buffer | |
* | |
* Pygame uses its own Py_buffer derived C struct as an internal representation | |
* of an imported array buffer. The extended Py_buffer allows for a | |
* per-instance release callback, | |
*/ | |
typedef void (*pybuffer_releaseproc)(Py_buffer *); | |
typedef struct pg_bufferinfo_s { | |
Py_buffer view; | |
PyObject *consumer; /* Input: Borrowed reference */ | |
pybuffer_releaseproc release_buffer; | |
} pg_buffer; | |
/* Operating system specific adjustments | |
*/ | |
// No signal() | |
#if defined(__SYMBIAN32__) && defined(HAVE_SIGNAL_H) | |
#undef HAVE_SIGNAL_H | |
#endif | |
#if defined(HAVE_SNPRINTF) | |
#undef HAVE_SNPRINTF | |
#endif | |
#ifdef MS_WIN32 /*Python gives us MS_WIN32, SDL needs just WIN32*/ | |
#ifndef WIN32 | |
#define WIN32 | |
#endif | |
#endif | |
/// Prefix when initializing module | |
#define MODPREFIX "" | |
/// Prefix when importing module | |
#define IMPPREFIX "pygame." | |
#ifdef __SYMBIAN32__ | |
#undef MODPREFIX | |
#undef IMPPREFIX | |
// On Symbian there is no pygame package. The extensions are built-in or in | |
// sys\bin. | |
#define MODPREFIX "pygame_" | |
#define IMPPREFIX "pygame_" | |
#endif | |
#include <SDL.h> | |
/* Pygame's SDL version macros: | |
* IS_SDLv1 is 1 if SDL 1.x.x, 0 otherwise | |
* IS_SDLv2 is 1 if at least SDL 2.0.0, 0 otherwise | |
*/ | |
#if (SDL_VERSION_ATLEAST(2, 0, 0)) | |
#define IS_SDLv1 0 | |
#define IS_SDLv2 1 | |
#else | |
#define IS_SDLv1 1 | |
#define IS_SDLv2 0 | |
#endif | |
/*#if IS_SDLv1 && PG_MAJOR_VERSION >= 2 | |
#error pygame 2 requires SDL 2 | |
#endif*/ | |
#if IS_SDLv2 | |
/* SDL 1.2 constants removed from SDL 2 */ | |
typedef enum { | |
SDL_HWSURFACE = 0, | |
SDL_RESIZABLE = SDL_WINDOW_RESIZABLE, | |
SDL_ASYNCBLIT = 0, | |
SDL_OPENGL = SDL_WINDOW_OPENGL, | |
SDL_OPENGLBLIT = 0, | |
SDL_ANYFORMAT = 0, | |
SDL_HWPALETTE = 0, | |
SDL_DOUBLEBUF = 0, | |
SDL_FULLSCREEN = SDL_WINDOW_FULLSCREEN, | |
SDL_HWACCEL = 0, | |
SDL_SRCCOLORKEY = 0, | |
SDL_RLEACCELOK = 0, | |
SDL_SRCALPHA = 0, | |
SDL_NOFRAME = SDL_WINDOW_BORDERLESS, | |
SDL_GL_SWAP_CONTROL = 0, | |
TIMER_RESOLUTION = 0 | |
} PygameVideoFlags; | |
/* the wheel button constants were removed from SDL 2 */ | |
typedef enum { | |
PGM_BUTTON_LEFT = SDL_BUTTON_LEFT, | |
PGM_BUTTON_RIGHT = SDL_BUTTON_RIGHT, | |
PGM_BUTTON_MIDDLE = SDL_BUTTON_MIDDLE, | |
PGM_BUTTON_WHEELUP = 4, | |
PGM_BUTTON_WHEELDOWN = 5, | |
PGM_BUTTON_X1 = SDL_BUTTON_X1 + 2, | |
PGM_BUTTON_X2 = SDL_BUTTON_X2 + 2, | |
PGM_BUTTON_KEEP = 0x80 | |
} PygameMouseFlags; | |
typedef enum { | |
SDL_NOEVENT = 0, | |
/* SDL 1.2 allowed for 8 user defined events. */ | |
SDL_NUMEVENTS = SDL_USEREVENT + 8, | |
SDL_ACTIVEEVENT = SDL_NUMEVENTS, | |
PGE_EVENTBEGIN = SDL_NUMEVENTS, | |
SDL_VIDEORESIZE, | |
SDL_VIDEOEXPOSE, | |
PGE_KEYREPEAT, | |
PGE_EVENTEND | |
} PygameEventCode; | |
#define PGE_NUMEVENTS (PGE_EVENTEND - PGE_EVENTBEGIN) | |
typedef enum { | |
SDL_APPFOCUSMOUSE, | |
SDL_APPINPUTFOCUS, | |
SDL_APPACTIVE | |
} PygameAppCode; | |
/* Surface flags: based on SDL 1.2 flags */ | |
typedef enum { | |
PGS_SWSURFACE = 0x00000000, | |
PGS_HWSURFACE = 0x00000001, | |
PGS_ASYNCBLIT = 0x00000004, | |
PGS_ANYFORMAT = 0x10000000, | |
PGS_HWPALETTE = 0x20000000, | |
PGS_DOUBLEBUF = 0x40000000, | |
PGS_FULLSCREEN = 0x80000000, | |
PGS_OPENGL = 0x00000002, | |
PGS_OPENGLBLIT = 0x0000000A, | |
PGS_RESIZABLE = 0x00000010, | |
PGS_NOFRAME = 0x00000020, | |
PGS_SHOWN = 0x00000040, /* Added from SDL 2 */ | |
PGS_HIDDEN = 0x00000080, /* Added from SDL 2 */ | |
PGS_HWACCEL = 0x00000100, | |
PGS_SRCCOLORKEY = 0x00001000, | |
PGS_RLEACCELOK = 0x00002000, | |
PGS_RLEACCEL = 0x00004000, | |
PGS_SRCALPHA = 0x00010000, | |
PGS_PREALLOC = 0x01000000 | |
} PygameSurfaceFlags; | |
typedef struct { | |
Uint32 hw_available:1; | |
Uint32 wm_available:1; | |
Uint32 blit_hw:1; | |
Uint32 blit_hw_CC:1; | |
Uint32 blit_hw_A:1; | |
Uint32 blit_sw:1; | |
Uint32 blit_sw_CC:1; | |
Uint32 blit_sw_A:1; | |
Uint32 blit_fill:1; | |
Uint32 video_mem; | |
SDL_PixelFormat *vfmt; | |
SDL_PixelFormat vfmt_data; | |
int current_w; | |
int current_h; | |
} pg_VideoInfo; | |
#endif /* IS_SDLv2 */ | |
/* macros used throughout the source */ | |
#define RAISE(x, y) (PyErr_SetString((x), (y)), (PyObject *)NULL) | |
#ifdef WITH_THREAD | |
#define PG_CHECK_THREADS() (1) | |
#else /* ~WITH_THREAD */ | |
#define PG_CHECK_THREADS() \ | |
(RAISE(PyExc_NotImplementedError, \ | |
"Python built without thread support")) | |
#endif /* ~WITH_THREAD */ | |
#define PyType_Init(x) (((x).ob_type) = &PyType_Type) | |
#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" | |
#ifndef MIN | |
#define MIN(a, b) ((a) < (b) ? (a) : (b)) | |
#endif | |
#ifndef MAX | |
#define MAX(a, b) ((a) > (b) ? (a) : (b)) | |
#endif | |
#ifndef ABS | |
#define ABS(a) (((a) < 0) ? -(a) : (a)) | |
#endif | |
/* test sdl initializations */ | |
#define VIDEO_INIT_CHECK() \ | |
if (!SDL_WasInit(SDL_INIT_VIDEO)) \ | |
return RAISE(pgExc_SDLError, "video system not initialized") | |
#define CDROM_INIT_CHECK() \ | |
if (!SDL_WasInit(SDL_INIT_CDROM)) \ | |
return RAISE(pgExc_SDLError, "cdrom system not initialized") | |
#define JOYSTICK_INIT_CHECK() \ | |
if (!SDL_WasInit(SDL_INIT_JOYSTICK)) \ | |
return RAISE(pgExc_SDLError, "joystick system not initialized") | |
/* BASE */ | |
#define VIEW_CONTIGUOUS 1 | |
#define VIEW_C_ORDER 2 | |
#define VIEW_F_ORDER 4 | |
#define PYGAMEAPI_BASE_FIRSTSLOT 0 | |
#if IS_SDLv1 | |
#define PYGAMEAPI_BASE_NUMSLOTS 19 | |
#else /* IS_SDLv2 */ | |
#define PYGAMEAPI_BASE_NUMSLOTS 23 | |
#endif /* IS_SDLv2 */ | |
#ifndef PYGAMEAPI_BASE_INTERNAL | |
#define pgExc_SDLError ((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT]) | |
#define pg_RegisterQuit \ | |
(*(void (*)(void (*)(void)))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 1]) | |
#define pg_IntFromObj \ | |
(*(int (*)(PyObject *, int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 2]) | |
#define pg_IntFromObjIndex \ | |
(*(int (*)(PyObject *, int, \ | |
int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 3]) | |
#define pg_TwoIntsFromObj \ | |
(*(int (*)(PyObject *, int *, \ | |
int *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 4]) | |
#define pg_FloatFromObj \ | |
(*(int (*)(PyObject *, float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 5]) | |
#define pg_FloatFromObjIndex \ | |
(*(int (*)(PyObject *, int, \ | |
float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 6]) | |
#define pg_TwoFloatsFromObj \ | |
(*(int (*)(PyObject *, float *, \ | |
float *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 7]) | |
#define pg_UintFromObj \ | |
(*(int (*)(PyObject *, \ | |
Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 8]) | |
#define pg_UintFromObjIndex \ | |
(*(int (*)(PyObject *, int, \ | |
Uint32 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 9]) | |
#define pgVideo_AutoQuit \ | |
(*(void (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 10]) | |
#define pgVideo_AutoInit \ | |
(*(int (*)(void))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 11]) | |
#define pg_RGBAFromObj \ | |
(*(int (*)(PyObject *, \ | |
Uint8 *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 12]) | |
#define pgBuffer_AsArrayInterface \ | |
(*(PyObject * (*)(Py_buffer *)) \ | |
PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 13]) | |
#define pgBuffer_AsArrayStruct \ | |
(*(PyObject * (*)(Py_buffer *)) \ | |
PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 14]) | |
#define pgObject_GetBuffer \ | |
(*(int (*)(PyObject *, pg_buffer *, \ | |
int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 15]) | |
#define pgBuffer_Release \ | |
(*(void (*)(pg_buffer *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 16]) | |
#define pgDict_AsBuffer \ | |
(*(int (*)(pg_buffer *, PyObject *, \ | |
int))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 17]) | |
#define pgExc_BufferError \ | |
((PyObject *)PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 18]) | |
#if IS_SDLv2 | |
#define pg_GetDefaultWindow \ | |
(*(SDL_Window * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 19]) | |
#define pg_SetDefaultWindow \ | |
(*(void (*)(SDL_Window *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 20]) | |
#define pg_GetDefaultWindowSurface \ | |
(*(PyObject * (*)(void)) PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 21]) | |
#define pg_SetDefaultWindowSurface \ | |
(*(void (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_BASE_FIRSTSLOT + 22]) | |
#endif /* IS_SDLv2 */ | |
#define import_pygame_base() IMPORT_PYGAME_MODULE(base, BASE) | |
#endif | |
/* RECT */ | |
#define PYGAMEAPI_RECT_FIRSTSLOT \ | |
(PYGAMEAPI_BASE_FIRSTSLOT + PYGAMEAPI_BASE_NUMSLOTS) | |
#define PYGAMEAPI_RECT_NUMSLOTS 4 | |
#if IS_SDLv1 | |
typedef struct { | |
int x, y; | |
int w, h; | |
} GAME_Rect; | |
#else | |
typedef SDL_Rect GAME_Rect; | |
#endif | |
typedef struct { | |
PyObject_HEAD GAME_Rect r; | |
PyObject *weakreflist; | |
} pgRectObject; | |
#define pgRect_AsRect(x) (((pgRectObject *)x)->r) | |
#ifndef PYGAMEAPI_RECT_INTERNAL | |
#define pgRect_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) | |
#define pgRect_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 0]) | |
#define pgRect_New \ | |
(*(PyObject * (*)(SDL_Rect *)) PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 1]) | |
#define pgRect_New4 \ | |
(*(PyObject * (*)(int, int, int, int)) \ | |
PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 2]) | |
#define pgRect_FromObject \ | |
(*(GAME_Rect * (*)(PyObject *, GAME_Rect *)) \ | |
PyGAME_C_API[PYGAMEAPI_RECT_FIRSTSLOT + 3]) | |
#define import_pygame_rect() IMPORT_PYGAME_MODULE(rect, RECT) | |
#endif | |
/* CDROM */ | |
#define PYGAMEAPI_CDROM_FIRSTSLOT \ | |
(PYGAMEAPI_RECT_FIRSTSLOT + PYGAMEAPI_RECT_NUMSLOTS) | |
#define PYGAMEAPI_CDROM_NUMSLOTS 2 | |
typedef struct { | |
PyObject_HEAD int id; | |
} pgCDObject; | |
#define pgCD_AsID(x) (((pgCDObject *)x)->id) | |
#ifndef PYGAMEAPI_CDROM_INTERNAL | |
#define pgCD_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) | |
#define pgCD_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 0]) | |
#define pgCD_New \ | |
(*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_CDROM_FIRSTSLOT + 1]) | |
#define import_pygame_cd() IMPORT_PYGAME_MODULE(cdrom, CDROM) | |
#endif | |
/* JOYSTICK */ | |
#define PYGAMEAPI_JOYSTICK_FIRSTSLOT \ | |
(PYGAMEAPI_CDROM_FIRSTSLOT + PYGAMEAPI_CDROM_NUMSLOTS) | |
#define PYGAMEAPI_JOYSTICK_NUMSLOTS 2 | |
typedef struct { | |
PyObject_HEAD int id; | |
} pgJoystickObject; | |
#define pgJoystick_AsID(x) (((pgJoystickObject *)x)->id) | |
#ifndef PYGAMEAPI_JOYSTICK_INTERNAL | |
#define pgJoystick_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) | |
#define pgJoystick_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 0]) | |
#define pgJoystick_New \ | |
(*(PyObject * (*)(int)) PyGAME_C_API[PYGAMEAPI_JOYSTICK_FIRSTSLOT + 1]) | |
#define import_pygame_joystick() IMPORT_PYGAME_MODULE(joystick, JOYSTICK) | |
#endif | |
/* DISPLAY */ | |
#define PYGAMEAPI_DISPLAY_FIRSTSLOT \ | |
(PYGAMEAPI_JOYSTICK_FIRSTSLOT + PYGAMEAPI_JOYSTICK_NUMSLOTS) | |
#define PYGAMEAPI_DISPLAY_NUMSLOTS 2 | |
typedef struct { | |
#if IS_SDLv1 | |
PyObject_HEAD SDL_VideoInfo info; | |
#else | |
PyObject_HEAD pg_VideoInfo info; | |
#endif | |
} pgVidInfoObject; | |
#define pgVidInfo_AsVidInfo(x) (((pgVidInfoObject *)x)->info) | |
#ifndef PYGAMEAPI_DISPLAY_INTERNAL | |
#define pgVidInfo_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) | |
#define pgVidInfo_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 0]) | |
#if IS_SDLv1 | |
#define pgVidInfo_New \ | |
(*(PyObject * (*)(SDL_VideoInfo *)) \ | |
PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) | |
#else | |
#define pgVidInfo_New \ | |
(*(PyObject * (*)(pg_VideoInfo *)) \ | |
PyGAME_C_API[PYGAMEAPI_DISPLAY_FIRSTSLOT + 1]) | |
#endif | |
#define import_pygame_display() IMPORT_PYGAME_MODULE(display, DISPLAY) | |
#endif | |
/* SURFACE */ | |
#define PYGAMEAPI_SURFACE_FIRSTSLOT \ | |
(PYGAMEAPI_DISPLAY_FIRSTSLOT + PYGAMEAPI_DISPLAY_NUMSLOTS) | |
#define PYGAMEAPI_SURFACE_NUMSLOTS 3 | |
typedef struct { | |
PyObject_HEAD SDL_Surface *surf; | |
#if IS_SDLv2 | |
int owner; | |
#endif /* IS_SDLv2 */ | |
struct pgSubSurface_Data *subsurface; /*ptr to subsurface data (if a | |
* subsurface)*/ | |
PyObject *weakreflist; | |
PyObject *locklist; | |
PyObject *dependency; | |
} pgSurfaceObject; | |
#define pgSurface_AsSurface(x) (((pgSurfaceObject *)x)->surf) | |
#ifndef PYGAMEAPI_SURFACE_INTERNAL | |
#define pgSurface_Check(x) \ | |
(PyObject_IsInstance((x), \ | |
(PyObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0])) | |
#define pgSurface_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 0]) | |
#if IS_SDLv1 | |
#define pgSurface_New \ | |
(*(PyObject * (*)(SDL_Surface *)) \ | |
PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) | |
#else /* IS_SDLv2 */ | |
#define pgSurface_New2 \ | |
(*(PyObject * (*)(SDL_Surface *, int)) \ | |
PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 1]) | |
#endif /* IS_SDLv2 */ | |
#define pgSurface_Blit \ | |
(*(int (*)(PyObject *, PyObject *, SDL_Rect *, SDL_Rect *, \ | |
int))PyGAME_C_API[PYGAMEAPI_SURFACE_FIRSTSLOT + 2]) | |
#define import_pygame_surface() \ | |
do { \ | |
IMPORT_PYGAME_MODULE(surface, SURFACE); \ | |
if (PyErr_Occurred() != NULL) \ | |
break; \ | |
IMPORT_PYGAME_MODULE(surflock, SURFLOCK); \ | |
} while (0) | |
#if IS_SDLv2 | |
#define pgSurface_New(surface) pgSurface_New2((surface), 1) | |
#define pgSurface_NewNoOwn(surface) pgSurface_New2((surface), 0) | |
#endif /* IS_SDLv2 */ | |
#endif | |
/* SURFLOCK */ /*auto import/init by surface*/ | |
#define PYGAMEAPI_SURFLOCK_FIRSTSLOT \ | |
(PYGAMEAPI_SURFACE_FIRSTSLOT + PYGAMEAPI_SURFACE_NUMSLOTS) | |
#define PYGAMEAPI_SURFLOCK_NUMSLOTS 8 | |
struct pgSubSurface_Data { | |
PyObject *owner; | |
int pixeloffset; | |
int offsetx, offsety; | |
}; | |
typedef struct { | |
PyObject_HEAD PyObject *surface; | |
PyObject *lockobj; | |
PyObject *weakrefs; | |
} pgLifetimeLockObject; | |
#ifndef PYGAMEAPI_SURFLOCK_INTERNAL | |
#define pgLifetimeLock_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 0]) | |
#define pgSurface_Prep(x) \ | |
if (((pgSurfaceObject *)x)->subsurface) \ | |
(*(*(void (*)( \ | |
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 1]))(x) | |
#define pgSurface_Unprep(x) \ | |
if (((pgSurfaceObject *)x)->subsurface) \ | |
(*(*(void (*)( \ | |
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 2]))(x) | |
#define pgSurface_Lock \ | |
(*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 3]) | |
#define pgSurface_Unlock \ | |
(*(int (*)(PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 4]) | |
#define pgSurface_LockBy \ | |
(*(int (*)(PyObject *, \ | |
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 5]) | |
#define pgSurface_UnlockBy \ | |
(*(int (*)(PyObject *, \ | |
PyObject *))PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 6]) | |
#define pgSurface_LockLifetime \ | |
(*(PyObject * (*)(PyObject *, PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_SURFLOCK_FIRSTSLOT + 7]) | |
#endif | |
/* EVENT */ | |
#define PYGAMEAPI_EVENT_FIRSTSLOT \ | |
(PYGAMEAPI_SURFLOCK_FIRSTSLOT + PYGAMEAPI_SURFLOCK_NUMSLOTS) | |
#if IS_SDLv1 | |
#define PYGAMEAPI_EVENT_NUMSLOTS 4 | |
#else /* IS_SDLv2 */ | |
#define PYGAMEAPI_EVENT_NUMSLOTS 6 | |
#endif /* IS_SDLv2 */ | |
typedef struct { | |
PyObject_HEAD int type; | |
PyObject *dict; | |
} pgEventObject; | |
#ifndef PYGAMEAPI_EVENT_INTERNAL | |
#define pgEvent_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) | |
#define pgEvent_Type \ | |
(*(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 0]) | |
#define pgEvent_New \ | |
(*(PyObject * (*)(SDL_Event *)) \ | |
PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 1]) | |
#define pgEvent_New2 \ | |
(*(PyObject * (*)(int, PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 2]) | |
#define pgEvent_FillUserEvent \ | |
(*(int (*)(pgEventObject *, \ | |
SDL_Event *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 3]) | |
#if IS_SDLv2 | |
#define pg_EnableKeyRepeat \ | |
(*(int (*)(int, int))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 4]) | |
#define pg_GetKeyRepeat \ | |
(*(void (*)(int *, int *))PyGAME_C_API[PYGAMEAPI_EVENT_FIRSTSLOT + 5]) | |
#endif /* IS_SDLv2 */ | |
#define import_pygame_event() IMPORT_PYGAME_MODULE(event, EVENT) | |
#endif | |
/* RWOBJECT */ | |
/*the rwobject are only needed for C side work, not accessable from python*/ | |
#define PYGAMEAPI_RWOBJECT_FIRSTSLOT \ | |
(PYGAMEAPI_EVENT_FIRSTSLOT + PYGAMEAPI_EVENT_NUMSLOTS) | |
#define PYGAMEAPI_RWOBJECT_NUMSLOTS 6 | |
#ifndef PYGAMEAPI_RWOBJECT_INTERNAL | |
#define pgRWops_FromObject \ | |
(*(SDL_RWops * (*)(PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 0]) | |
#define pgRWops_IsFileObject \ | |
(*(int (*)(SDL_RWops *))PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 1]) | |
#define pg_EncodeFilePath \ | |
(*(PyObject * (*)(PyObject *, PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 2]) | |
#define pg_EncodeString \ | |
(*(PyObject * (*)(PyObject *, const char *, const char *, PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 3]) | |
#define pgRWops_FromFileObject \ | |
(*(SDL_RWops * (*)(PyObject *)) \ | |
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 4]) | |
#define pgRWops_ReleaseObject \ | |
(*(int (*)(SDL_RWops *)) \ | |
PyGAME_C_API[PYGAMEAPI_RWOBJECT_FIRSTSLOT + 5]) | |
#define import_pygame_rwobject() IMPORT_PYGAME_MODULE(rwobject, RWOBJECT) | |
#endif | |
/* PixelArray */ | |
#define PYGAMEAPI_PIXELARRAY_FIRSTSLOT \ | |
(PYGAMEAPI_RWOBJECT_FIRSTSLOT + PYGAMEAPI_RWOBJECT_NUMSLOTS) | |
#define PYGAMEAPI_PIXELARRAY_NUMSLOTS 2 | |
#ifndef PYGAMEAPI_PIXELARRAY_INTERNAL | |
#define PyPixelArray_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 0]) | |
#define PyPixelArray_New \ | |
(*(PyObject * (*)) PyGAME_C_API[PYGAMEAPI_PIXELARRAY_FIRSTSLOT + 1]) | |
#define import_pygame_pixelarray() IMPORT_PYGAME_MODULE(pixelarray, PIXELARRAY) | |
#endif /* PYGAMEAPI_PIXELARRAY_INTERNAL */ | |
/* Color */ | |
#define PYGAMEAPI_COLOR_FIRSTSLOT \ | |
(PYGAMEAPI_PIXELARRAY_FIRSTSLOT + PYGAMEAPI_PIXELARRAY_NUMSLOTS) | |
#define PYGAMEAPI_COLOR_NUMSLOTS 4 | |
#ifndef PYGAMEAPI_COLOR_INTERNAL | |
#define pgColor_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 0]) | |
#define pgColor_Type (*(PyObject *)PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT]) | |
#define pgColor_New \ | |
(*(PyObject * (*)(Uint8 *)) PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 1]) | |
#define pgColor_NewLength \ | |
(*(PyObject * (*)(Uint8 *, Uint8)) \ | |
PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 3]) | |
#define pg_RGBAFromColorObj \ | |
(*(int (*)(PyObject *, \ | |
Uint8 *))PyGAME_C_API[PYGAMEAPI_COLOR_FIRSTSLOT + 2]) | |
#define import_pygame_color() IMPORT_PYGAME_MODULE(color, COLOR) | |
#endif /* PYGAMEAPI_COLOR_INTERNAL */ | |
/* Math */ | |
#define PYGAMEAPI_MATH_FIRSTSLOT \ | |
(PYGAMEAPI_COLOR_FIRSTSLOT + PYGAMEAPI_COLOR_NUMSLOTS) | |
#define PYGAMEAPI_MATH_NUMSLOTS 2 | |
#ifndef PYGAMEAPI_MATH_INTERNAL | |
#define pgVector2_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 0]) | |
#define pgVector3_Check(x) \ | |
((x)->ob_type == \ | |
(PyTypeObject *)PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) | |
/* | |
#define pgVector2_New \ | |
(*(PyObject*(*)) PyGAME_C_API[PYGAMEAPI_MATH_FIRSTSLOT + 1]) | |
*/ | |
#define import_pygame_math() IMPORT_PYGAME_MODULE(math, MATH) | |
#endif /* PYGAMEAPI_MATH_INTERNAL */ | |
#define PG_CAPSULE_NAME(m) (IMPPREFIX m "." PYGAMEAPI_LOCAL_ENTRY) | |
#define _IMPORT_PYGAME_MODULE(module, MODULE, api_root) \ | |
{ \ | |
PyObject *_module = PyImport_ImportModule(IMPPREFIX #module); \ | |
\ | |
if (_module != NULL) { \ | |
PyObject *_c_api = \ | |
PyObject_GetAttrString(_module, PYGAMEAPI_LOCAL_ENTRY); \ | |
\ | |
Py_DECREF(_module); \ | |
if (_c_api != NULL && PyCapsule_CheckExact(_c_api)) { \ | |
void **localptr = (void **)PyCapsule_GetPointer( \ | |
_c_api, PG_CAPSULE_NAME(#module)); \ | |
\ | |
if (localptr != NULL) { \ | |
memcpy(api_root + PYGAMEAPI_##MODULE##_FIRSTSLOT, \ | |
localptr, \ | |
sizeof(void **) * PYGAMEAPI_##MODULE##_NUMSLOTS); \ | |
} \ | |
} \ | |
Py_XDECREF(_c_api); \ | |
} \ | |
} | |
#ifndef NO_PYGAME_C_API | |
#define IMPORT_PYGAME_MODULE(module, MODULE) \ | |
_IMPORT_PYGAME_MODULE(module, MODULE, PyGAME_C_API) | |
#define PYGAMEAPI_TOTALSLOTS \ | |
(PYGAMEAPI_MATH_FIRSTSLOT + PYGAMEAPI_MATH_NUMSLOTS) | |
#ifdef PYGAME_H | |
void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS] = {NULL}; | |
#else | |
extern void *PyGAME_C_API[PYGAMEAPI_TOTALSLOTS]; | |
#endif | |
#endif | |
#if PG_HAVE_CAPSULE | |
#define encapsulate_api(ptr, module) \ | |
PyCapsule_New(ptr, PG_CAPSULE_NAME(module), NULL) | |
#else | |
#define encapsulate_api(ptr, module) PyCObject_FromVoidPtr(ptr, NULL) | |
#endif | |
#ifndef PG_INLINE | |
#if defined(__clang__) | |
#define PG_INLINE __inline__ __attribute__((__unused__)) | |
#elif defined(__GNUC__) | |
#define PG_INLINE __inline__ | |
#elif defined(_MSC_VER) | |
#define PG_INLINE __inline | |
#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L | |
#define PG_INLINE inline | |
#else | |
#define PG_INLINE | |
#endif | |
#endif | |
/*last platform compiler stuff*/ | |
#if defined(macintosh) && defined(__MWERKS__) || defined(__SYMBIAN32__) | |
#define PYGAME_EXPORT __declspec(export) | |
#else | |
#define PYGAME_EXPORT | |
#endif | |
#endif /* PYGAME_H */ |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
Copyright (C) 2007 Marcus von Appen | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#ifndef _SURFACE_H | |
#define _SURFACE_H | |
#include "_pygame.h" | |
#include "surface.h" | |
#endif | |
/* | |
Bitmask 1.7 - A pixel-perfect collision detection library. | |
Copyright (C) 2002-2005 Ulf Ekstrom except for the bitcount | |
function which is copyright (C) Donald W. Gillies, 1992. | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
*/ | |
#ifndef BITMASK_H | |
#define BITMASK_H | |
#ifdef __cplusplus | |
extern "C" { | |
#endif | |
#include <limits.h> | |
/* Define INLINE for different compilers. If your compiler does not | |
support inlining then there might be a performance hit in | |
bitmask_overlap_area(). | |
*/ | |
#ifndef INLINE | |
# ifdef __GNUC__ | |
# define INLINE inline | |
# else | |
# ifdef _MSC_VER | |
# define INLINE __inline | |
# else | |
# define INLINE | |
# endif | |
# endif | |
#endif | |
#define BITMASK_W unsigned long int | |
#define BITMASK_W_LEN (sizeof(BITMASK_W)*CHAR_BIT) | |
#define BITMASK_W_MASK (BITMASK_W_LEN - 1) | |
#define BITMASK_N(n) ((BITMASK_W)1 << (n)) | |
typedef struct bitmask | |
{ | |
int w,h; | |
BITMASK_W bits[1]; | |
} bitmask_t; | |
/* Creates a bitmask of width w and height h, where | |
w and h must both be greater than or equal to 0. | |
The mask is automatically cleared when created. | |
*/ | |
bitmask_t *bitmask_create(int w, int h); | |
/* Frees all the memory allocated by bitmask_create for m. */ | |
void bitmask_free(bitmask_t *m); | |
/* Clears all bits in the mask */ | |
void bitmask_clear(bitmask_t *m); | |
/* Sets all bits in the mask */ | |
void bitmask_fill(bitmask_t *m); | |
/* Flips all bits in the mask */ | |
void bitmask_invert(bitmask_t *m); | |
/* Counts the bits in the mask */ | |
unsigned int bitmask_count(bitmask_t *m); | |
/* Returns nonzero if the bit at (x,y) is set. Coordinates start at | |
(0,0) */ | |
static INLINE int bitmask_getbit(const bitmask_t *m, int x, int y) | |
{ | |
return (m->bits[x/BITMASK_W_LEN*m->h + y] & BITMASK_N(x & BITMASK_W_MASK)) != 0; | |
} | |
/* Sets the bit at (x,y) */ | |
static INLINE void bitmask_setbit(bitmask_t *m, int x, int y) | |
{ | |
m->bits[x/BITMASK_W_LEN*m->h + y] |= BITMASK_N(x & BITMASK_W_MASK); | |
} | |
/* Clears the bit at (x,y) */ | |
static INLINE void bitmask_clearbit(bitmask_t *m, int x, int y) | |
{ | |
m->bits[x/BITMASK_W_LEN*m->h + y] &= ~BITMASK_N(x & BITMASK_W_MASK); | |
} | |
/* Returns nonzero if the masks overlap with the given offset. | |
The overlap tests uses the following offsets (which may be negative): | |
+----+----------.. | |
|A | yoffset | |
| +-+----------.. | |
+--|B | |
|xoffset | |
| | | |
: : | |
*/ | |
int bitmask_overlap(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); | |
/* Like bitmask_overlap(), but will also give a point of intersection. | |
x and y are given in the coordinates of mask a, and are untouched | |
if there is no overlap. */ | |
int bitmask_overlap_pos(const bitmask_t *a, const bitmask_t *b, | |
int xoffset, int yoffset, int *x, int *y); | |
/* Returns the number of overlapping 'pixels' */ | |
int bitmask_overlap_area(const bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); | |
/* Fills a mask with the overlap of two other masks. A bitwise AND. */ | |
void bitmask_overlap_mask (const bitmask_t *a, const bitmask_t *b, bitmask_t *c, int xoffset, int yoffset); | |
/* Draws mask b onto mask a (bitwise OR). Can be used to compose large | |
(game background?) mask from several submasks, which may speed up | |
the testing. */ | |
void bitmask_draw(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); | |
void bitmask_erase(bitmask_t *a, const bitmask_t *b, int xoffset, int yoffset); | |
/* Return a new scaled bitmask, with dimensions w*h. The quality of the | |
scaling may not be perfect for all circumstances, but it should | |
be reasonable. If either w or h is 0 a clear 1x1 mask is returned. */ | |
bitmask_t *bitmask_scale(const bitmask_t *m, int w, int h); | |
/* Convolve b into a, drawing the output into o, shifted by offset. If offset | |
* is 0, then the (x,y) bit will be set if and only if | |
* bitmask_overlap(a, b, x - b->w - 1, y - b->h - 1) returns true. | |
* | |
* Modifies bits o[xoffset ... xoffset + a->w + b->w - 1) | |
* [yoffset ... yoffset + a->h + b->h - 1). */ | |
void bitmask_convolve(const bitmask_t *a, const bitmask_t *b, bitmask_t *o, int xoffset, int yoffset); | |
#ifdef __cplusplus | |
} /* End of extern "C" { */ | |
#endif | |
#endif |
/* | |
pygame - Python Game Library | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
*/ | |
#include "pygame.h" | |
#include "doc/camera_doc.h" | |
#if defined(__unix__) | |
#include <structmember.h> | |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <string.h> | |
#include <assert.h> | |
#include <fcntl.h> /* low-level i/o */ | |
#include <unistd.h> | |
#include <errno.h> | |
#include <sys/stat.h> | |
#include <sys/types.h> | |
#include <sys/time.h> | |
#include <sys/mman.h> | |
#include <sys/ioctl.h> | |
/* on freebsd there is no asm/types */ | |
#ifdef linux | |
#include <asm/types.h> /* for videodev2.h */ | |
#endif | |
#include <linux/videodev2.h> | |
#elif defined(__APPLE__) | |
#include <AvailabilityMacros.h> | |
/* We support OSX 10.6 and below. */ | |
#if __MAC_OS_X_VERSION_MAX_ALLOWED <= 1060 | |
#define PYGAME_MAC_CAMERA_OLD 1 | |
#endif | |
#endif | |
#if defined(PYGAME_MAC_CAMERA_OLD) | |
#include <QuickTime/QuickTime.h> | |
#include <QuickTime/Movies.h> | |
#include <QuickTime/ImageCompression.h> | |
#endif | |
/* some constants used which are not defined on non-v4l machines. */ | |
#ifndef V4L2_PIX_FMT_RGB24 | |
#define V4L2_PIX_FMT_RGB24 'RGB3' | |
#endif | |
#ifndef V4L2_PIX_FMT_RGB444 | |
#define V4L2_PIX_FMT_RGB444 'R444' | |
#endif | |
#ifndef V4L2_PIX_FMT_YUYV | |
#define V4L2_PIX_FMT_YUYV 'YUYV' | |
#endif | |
#define CLEAR(x) memset (&(x), 0, sizeof (x)) | |
#define SAT(c) if (c & (~255)) { if (c < 0) c = 0; else c = 255; } | |
#define SAT2(c) ((c) & (~255) ? ((c) < 0 ? 0 : 255) : (c)) | |
#define DEFAULT_WIDTH 640 | |
#define DEFAULT_HEIGHT 480 | |
#define RGB_OUT 1 | |
#define YUV_OUT 2 | |
#define HSV_OUT 4 | |
#define CAM_V4L 1 /* deprecated. the incomplete support in pygame was removed */ | |
#define CAM_V4L2 2 | |
struct buffer { | |
void * start; | |
size_t length; | |
}; | |
#if defined(__unix__) | |
typedef struct pgCameraObject { | |
PyObject_HEAD | |
char* device_name; | |
int camera_type; | |
unsigned long pixelformat; | |
unsigned int color_out; | |
struct buffer* buffers; | |
unsigned int n_buffers; | |
int width; | |
int height; | |
int size; | |
int hflip; | |
int vflip; | |
int brightness; | |
int fd; | |
} pgCameraObject; | |
#elif defined(PYGAME_MAC_CAMERA_OLD) | |
typedef struct pgCameraObject { | |
PyObject_HEAD | |
char* device_name; /* unieke name of the device */ | |
OSType pixelformat; | |
unsigned int color_out; | |
SeqGrabComponent component; /* A type used by the Sequence Grabber API */ | |
SGChannel channel; /* Channel of the Sequence Grabber */ | |
GWorldPtr gworld; /* Pointer to the struct that holds the data of the captured image */ | |
Rect boundsRect; /* bounds of the image frame */ | |
long size; /* size of the image in our buffer to draw */ | |
int hflip; | |
int vflip; | |
short depth; | |
struct buffer pixels; | |
//struct buffer tmp_pixels /* place where the flipped image in temporarly stored if hflip or vflip is true.*/ | |
} pgCameraObject; | |
#else | |
/* generic definition. | |
*/ | |
typedef struct pgCameraObject { | |
PyObject_HEAD | |
char* device_name; | |
int camera_type; | |
unsigned long pixelformat; | |
unsigned int color_out; | |
struct buffer* buffers; | |
unsigned int n_buffers; | |
int width; | |
int height; | |
int size; | |
int hflip; | |
int vflip; | |
int brightness; | |
int fd; | |
} pgCameraObject; | |
#endif | |
/* internal functions for colorspace conversion */ | |
void colorspace (SDL_Surface *src, SDL_Surface *dst, int cspace); | |
void rgb24_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void rgb444_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void rgb_to_yuv (const void* src, void* dst, int length, | |
unsigned long source, SDL_PixelFormat* format); | |
void rgb_to_hsv (const void* src, void* dst, int length, | |
unsigned long source, SDL_PixelFormat* format); | |
void yuyv_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void yuyv_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void uyvy_to_rgb (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void uyvy_to_yuv (const void* src, void* dst, int length, SDL_PixelFormat* format); | |
void sbggr8_to_rgb (const void* src, void* dst, int width, int height, | |
SDL_PixelFormat* format); | |
void yuv420_to_rgb (const void* src, void* dst, int width, int height, | |
SDL_PixelFormat* format); | |
void yuv420_to_yuv (const void* src, void* dst, int width, int height, | |
SDL_PixelFormat* format); | |
#if defined(__unix__) | |
/* internal functions specific to v4l2 */ | |
char** v4l2_list_cameras (int* num_devices); | |
int v4l2_get_control (int fd, int id, int *value); | |
int v4l2_set_control (int fd, int id, int value); | |
PyObject* v4l2_read_raw (pgCameraObject* self); | |
int v4l2_xioctl (int fd, int request, void *arg); | |
int v4l2_process_image (pgCameraObject* self, const void *image, | |
unsigned int buffer_size, SDL_Surface* surf); | |
int v4l2_query_buffer (pgCameraObject* self); | |
int v4l2_read_frame (pgCameraObject* self, SDL_Surface* surf); | |
int v4l2_stop_capturing (pgCameraObject* self); | |
int v4l2_start_capturing (pgCameraObject* self); | |
int v4l2_uninit_device (pgCameraObject* self); | |
int v4l2_init_mmap (pgCameraObject* self); | |
int v4l2_init_device (pgCameraObject* self); | |
int v4l2_close_device (pgCameraObject* self); | |
int v4l2_open_device (pgCameraObject* self); | |
#elif defined(PYGAME_MAC_CAMERA_OLD) | |
/* internal functions specific to mac */ | |
char** mac_list_cameras(int* num_devices); | |
int mac_open_device (pgCameraObject* self); | |
int mac_init_device(pgCameraObject* self); | |
int mac_close_device (pgCameraObject* self); | |
int mac_start_capturing(pgCameraObject* self); | |
int mac_stop_capturing (pgCameraObject* self); | |
int mac_get_control(pgCameraObject* self, int id, int* value); | |
int mac_set_control(pgCameraObject* self, int id, int value); | |
PyObject* mac_read_raw(pgCameraObject *self); | |
int mac_read_frame(pgCameraObject* self, SDL_Surface* surf); | |
int mac_camera_idle(pgCameraObject* self); | |
int mac_copy_gworld_to_surface(pgCameraObject* self, SDL_Surface* surf); | |
void flip_image(const void* image, void* flipped_image, int width, int height, | |
short depth, int hflip, int vflip); | |
#endif |
#ifndef _FASTEVENTS_H_ | |
#define _FASTEVENTS_H_ | |
/* | |
NET2 is a threaded, event based, network IO library for SDL. | |
Copyright (C) 2002 Bob Pendleton | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Lesser General Public License | |
as published by the Free Software Foundation; either version 2.1 | |
of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Lesser General Public License for more details. | |
You should have received a copy of the GNU Lesser General Public | |
License along with this library; if not, write to the Free | |
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA | |
02111-1307 USA | |
If you do not wish to comply with the terms of the LGPL please | |
contact the author as other terms are available for a fee. | |
Bob Pendleton | |
Bob@Pendleton.com | |
*/ | |
#include "SDL.h" | |
#ifdef __cplusplus | |
extern "C" { | |
#endif | |
int FE_Init(void); // Initialize FE | |
void FE_Quit(void); // shutdown FE | |
void FE_PumpEvents(void); // replacement for SDL_PumpEvents | |
int FE_PollEvent(SDL_Event *event); // replacement for SDL_PollEvent | |
int FE_WaitEvent(SDL_Event *event); // replacement for SDL_WaitEvent | |
int FE_PushEvent(SDL_Event *event); // replacement for SDL_PushEvent | |
char *FE_GetError(void); // get the last error | |
#ifdef __cplusplus | |
} | |
#endif | |
#endif |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#include <Python.h> | |
#if defined(HAVE_SNPRINTF) /* also defined in SDL_ttf (SDL.h) */ | |
#undef HAVE_SNPRINTF /* remove GCC macro redefine warning */ | |
#endif | |
#include <SDL_ttf.h> | |
/* test font initialization */ | |
#define FONT_INIT_CHECK() \ | |
if(!(*(int*)PyFONT_C_API[2])) \ | |
return RAISE(pgExc_SDLError, "font system not initialized") | |
#define PYGAMEAPI_FONT_FIRSTSLOT 0 | |
#define PYGAMEAPI_FONT_NUMSLOTS 3 | |
typedef struct { | |
PyObject_HEAD | |
TTF_Font* font; | |
PyObject* weakreflist; | |
} PyFontObject; | |
#define PyFont_AsFont(x) (((PyFontObject*)x)->font) | |
#ifndef PYGAMEAPI_FONT_INTERNAL | |
#define PyFont_Check(x) ((x)->ob_type == (PyTypeObject*)PyFONT_C_API[0]) | |
#define PyFont_Type (*(PyTypeObject*)PyFONT_C_API[0]) | |
#define PyFont_New (*(PyObject*(*)(TTF_Font*))PyFONT_C_API[1]) | |
/*slot 2 taken by FONT_INIT_CHECK*/ | |
#define import_pygame_font() \ | |
_IMPORT_PYGAME_MODULE(font, FONT, PyFONT_C_API) | |
static void* PyFONT_C_API[PYGAMEAPI_FONT_NUMSLOTS] = {NULL}; | |
#endif | |
/* | |
pygame - Python Game Library | |
Copyright (C) 2009 Vicent Marti | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
*/ | |
#ifndef _PYGAME_FREETYPE_H_ | |
#define _PYGAME_FREETYPE_H_ | |
#define PGFT_PYGAME1_COMPAT | |
#define HAVE_PYGAME_SDL_VIDEO | |
#define HAVE_PYGAME_SDL_RWOPS | |
#include "pygame.h" | |
#include "pgcompat.h" | |
#if PY3 | |
# define IS_PYTHON_3 | |
#endif | |
#include <ft2build.h> | |
#include FT_FREETYPE_H | |
#include FT_CACHE_H | |
#include FT_XFREE86_H | |
#include FT_TRIGONOMETRY_H | |
/********************************************************** | |
* Global module constants | |
**********************************************************/ | |
/* Render styles */ | |
#define FT_STYLE_NORMAL 0x00 | |
#define FT_STYLE_STRONG 0x01 | |
#define FT_STYLE_OBLIQUE 0x02 | |
#define FT_STYLE_UNDERLINE 0x04 | |
#define FT_STYLE_WIDE 0x08 | |
#define FT_STYLE_DEFAULT 0xFF | |
/* Bounding box modes */ | |
#define FT_BBOX_EXACT FT_GLYPH_BBOX_SUBPIXELS | |
#define FT_BBOX_EXACT_GRIDFIT FT_GLYPH_BBOX_GRIDFIT | |
#define FT_BBOX_PIXEL FT_GLYPH_BBOX_TRUNCATE | |
#define FT_BBOX_PIXEL_GRIDFIT FT_GLYPH_BBOX_PIXELS | |
/* Rendering flags */ | |
#define FT_RFLAG_NONE (0) | |
#define FT_RFLAG_ANTIALIAS (1 << 0) | |
#define FT_RFLAG_AUTOHINT (1 << 1) | |
#define FT_RFLAG_VERTICAL (1 << 2) | |
#define FT_RFLAG_HINTED (1 << 3) | |
#define FT_RFLAG_KERNING (1 << 4) | |
#define FT_RFLAG_TRANSFORM (1 << 5) | |
#define FT_RFLAG_PAD (1 << 6) | |
#define FT_RFLAG_ORIGIN (1 << 7) | |
#define FT_RFLAG_UCS4 (1 << 8) | |
#define FT_RFLAG_USE_BITMAP_STRIKES (1 << 9) | |
#define FT_RFLAG_DEFAULTS (FT_RFLAG_HINTED | \ | |
FT_RFLAG_USE_BITMAP_STRIKES | \ | |
FT_RFLAG_ANTIALIAS) | |
#define FT_RENDER_NEWBYTEARRAY 0x0 | |
#define FT_RENDER_NEWSURFACE 0x1 | |
#define FT_RENDER_EXISTINGSURFACE 0x2 | |
/********************************************************** | |
* Global module types | |
**********************************************************/ | |
typedef struct _scale_s { | |
FT_UInt x, y; | |
} Scale_t; | |
typedef FT_Angle Angle_t; | |
struct fontinternals_; | |
struct freetypeinstance_; | |
typedef struct { | |
FT_Long font_index; | |
FT_Open_Args open_args; | |
} pgFontId; | |
typedef struct { | |
PyObject_HEAD | |
pgFontId id; | |
PyObject *path; | |
int is_scalable; | |
Scale_t face_size; | |
FT_Int16 style; | |
FT_Int16 render_flags; | |
double strength; | |
double underline_adjustment; | |
FT_UInt resolution; | |
Angle_t rotation; | |
FT_Matrix transform; | |
FT_Byte fgcolor[4]; | |
struct freetypeinstance_ *freetype; /* Personal reference */ | |
struct fontinternals_ *_internals; | |
} pgFontObject; | |
#define pgFont_IS_ALIVE(o) \ | |
(((pgFontObject *)(o))->_internals != 0) | |
/********************************************************** | |
* Module declaration | |
**********************************************************/ | |
#define PYGAMEAPI_FREETYPE_FIRSTSLOT 0 | |
#define PYGAMEAPI_FREETYPE_NUMSLOTS 2 | |
#ifndef PYGAME_FREETYPE_INTERNAL | |
#define pgFont_Check(x) ((x)->ob_type == (PyTypeObject*)PgFREETYPE_C_API[0]) | |
#define pgFont_Type (*(PyTypeObject*)PgFREETYPE_C_API[1]) | |
#define pgFont_New (*(PyObject*(*)(const char*, long))PgFREETYPE_C_API[1]) | |
#define import_pygame_freetype() \ | |
_IMPORT_PYGAME_MODULE(freetype, FREETYPE, PgFREETYPE_C_API) | |
static void *PgFREETYPE_C_API[PYGAMEAPI_FREETYPE_NUMSLOTS] = {0}; | |
#endif /* PYGAME_FREETYPE_INTERNAL */ | |
#endif /* _PYGAME_FREETYPE_H_ */ |
#include <Python.h> | |
#include "bitmask.h" | |
#define PYGAMEAPI_MASK_FIRSTSLOT 0 | |
#define PYGAMEAPI_MASK_NUMSLOTS 1 | |
#define PYGAMEAPI_LOCAL_ENTRY "_PYGAME_C_API" | |
typedef struct { | |
PyObject_HEAD | |
bitmask_t *mask; | |
} pgMaskObject; | |
#define pgMask_AsBitmap(x) (((pgMaskObject*)x)->mask) | |
#ifndef PYGAMEAPI_MASK_INTERNAL | |
#define pgMask_Type (*(PyTypeObject*)PyMASK_C_API[0]) | |
#define pgMask_Check(x) ((x)->ob_type == &pgMask_Type) | |
#define import_pygame_mask() \ | |
_IMPORT_PYGAME_MODULE(mask, MASK, PyMASK_C_API) | |
static void* PyMASK_C_API[PYGAMEAPI_MASK_NUMSLOTS] = {NULL}; | |
#endif /* #ifndef PYGAMEAPI_MASK_INTERNAL */ | |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#include <Python.h> | |
#include <SDL_mixer.h> | |
#include <structmember.h> | |
/* test mixer initializations */ | |
#define MIXER_INIT_CHECK() \ | |
if(!SDL_WasInit(SDL_INIT_AUDIO)) \ | |
return RAISE(pgExc_SDLError, "mixer not initialized") | |
#define PYGAMEAPI_MIXER_FIRSTSLOT 0 | |
#define PYGAMEAPI_MIXER_NUMSLOTS 7 | |
typedef struct { | |
PyObject_HEAD | |
Mix_Chunk *chunk; | |
Uint8 *mem; | |
PyObject *weakreflist; | |
} pgSoundObject; | |
typedef struct { | |
PyObject_HEAD | |
int chan; | |
} pgChannelObject; | |
#define pgSound_AsChunk(x) (((pgSoundObject*)x)->chunk) | |
#define pgChannel_AsInt(x) (((pgChannelObject*)x)->chan) | |
#ifndef PYGAMEAPI_MIXER_INTERNAL | |
#define pgSound_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[0]) | |
#define pgSound_Type (*(PyTypeObject*)pgMIXER_C_API[0]) | |
#define pgSound_New (*(PyObject*(*)(Mix_Chunk*))pgMIXER_C_API[1]) | |
#define pgSound_Play (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[2]) | |
#define pgChannel_Check(x) ((x)->ob_type == (PyTypeObject*)pgMIXER_C_API[3]) | |
#define pgChannel_Type (*(PyTypeObject*)pgMIXER_C_API[3]) | |
#define pgChannel_New (*(PyObject*(*)(int))pgMIXER_C_API[4]) | |
#define pgMixer_AutoInit (*(PyObject*(*)(PyObject*, PyObject*))pgMIXER_C_API[5]) | |
#define pgMixer_AutoQuit (*(void(*)(void))pgMIXER_C_API[6]) | |
#define import_pygame_mixer() \ | |
_IMPORT_PYGAME_MODULE(mixer, MIXER, pgMIXER_C_API) | |
static void* pgMIXER_C_API[PYGAMEAPI_MIXER_NUMSLOTS] = {NULL}; | |
#endif | |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#ifndef PALETTE_H | |
#define PALETTE_H | |
#include <SDL.h> | |
/* SDL 2 does not assign a default palette color scheme to a new 8 bit | |
* surface. Instead, the palette is set all white. This defines the SDL 1.2 | |
* default palette. | |
*/ | |
static const SDL_Color default_palette_colors[] = { | |
{0, 0, 0, 255}, {0, 0, 85, 255}, {0, 0, 170, 255}, | |
{0, 0, 255, 255}, {0, 36, 0, 255}, {0, 36, 85, 255}, | |
{0, 36, 170, 255}, {0, 36, 255, 255}, {0, 73, 0, 255}, | |
{0, 73, 85, 255}, {0, 73, 170, 255}, {0, 73, 255, 255}, | |
{0, 109, 0, 255}, {0, 109, 85, 255}, {0, 109, 170, 255}, | |
{0, 109, 255, 255}, {0, 146, 0, 255}, {0, 146, 85, 255}, | |
{0, 146, 170, 255}, {0, 146, 255, 255}, {0, 182, 0, 255}, | |
{0, 182, 85, 255}, {0, 182, 170, 255}, {0, 182, 255, 255}, | |
{0, 219, 0, 255}, {0, 219, 85, 255}, {0, 219, 170, 255}, | |
{0, 219, 255, 255}, {0, 255, 0, 255}, {0, 255, 85, 255}, | |
{0, 255, 170, 255}, {0, 255, 255, 255}, {85, 0, 0, 255}, | |
{85, 0, 85, 255}, {85, 0, 170, 255}, {85, 0, 255, 255}, | |
{85, 36, 0, 255}, {85, 36, 85, 255}, {85, 36, 170, 255}, | |
{85, 36, 255, 255}, {85, 73, 0, 255}, {85, 73, 85, 255}, | |
{85, 73, 170, 255}, {85, 73, 255, 255}, {85, 109, 0, 255}, | |
{85, 109, 85, 255}, {85, 109, 170, 255}, {85, 109, 255, 255}, | |
{85, 146, 0, 255}, {85, 146, 85, 255}, {85, 146, 170, 255}, | |
{85, 146, 255, 255}, {85, 182, 0, 255}, {85, 182, 85, 255}, | |
{85, 182, 170, 255}, {85, 182, 255, 255}, {85, 219, 0, 255}, | |
{85, 219, 85, 255}, {85, 219, 170, 255}, {85, 219, 255, 255}, | |
{85, 255, 0, 255}, {85, 255, 85, 255}, {85, 255, 170, 255}, | |
{85, 255, 255, 255}, {170, 0, 0, 255}, {170, 0, 85, 255}, | |
{170, 0, 170, 255}, {170, 0, 255, 255}, {170, 36, 0, 255}, | |
{170, 36, 85, 255}, {170, 36, 170, 255}, {170, 36, 255, 255}, | |
{170, 73, 0, 255}, {170, 73, 85, 255}, {170, 73, 170, 255}, | |
{170, 73, 255, 255}, {170, 109, 0, 255}, {170, 109, 85, 255}, | |
{170, 109, 170, 255}, {170, 109, 255, 255}, {170, 146, 0, 255}, | |
{170, 146, 85, 255}, {170, 146, 170, 255}, {170, 146, 255, 255}, | |
{170, 182, 0, 255}, {170, 182, 85, 255}, {170, 182, 170, 255}, | |
{170, 182, 255, 255}, {170, 219, 0, 255}, {170, 219, 85, 255}, | |
{170, 219, 170, 255}, {170, 219, 255, 255}, {170, 255, 0, 255}, | |
{170, 255, 85, 255}, {170, 255, 170, 255}, {170, 255, 255, 255}, | |
{255, 0, 0, 255}, {255, 0, 85, 255}, {255, 0, 170, 255}, | |
{255, 0, 255, 255}, {255, 36, 0, 255}, {255, 36, 85, 255}, | |
{255, 36, 170, 255}, {255, 36, 255, 255}, {255, 73, 0, 255}, | |
{255, 73, 85, 255}, {255, 73, 170, 255}, {255, 73, 255, 255}, | |
{255, 109, 0, 255}, {255, 109, 85, 255}, {255, 109, 170, 255}, | |
{255, 109, 255, 255}, {255, 146, 0, 255}, {255, 146, 85, 255}, | |
{255, 146, 170, 255}, {255, 146, 255, 255}, {255, 182, 0, 255}, | |
{255, 182, 85, 255}, {255, 182, 170, 255}, {255, 182, 255, 255}, | |
{255, 219, 0, 255}, {255, 219, 85, 255}, {255, 219, 170, 255}, | |
{255, 219, 255, 255}, {255, 255, 0, 255}, {255, 255, 85, 255}, | |
{255, 255, 170, 255}, {255, 255, 255, 255}, {0, 0, 0, 255}, | |
{0, 0, 85, 255}, {0, 0, 170, 255}, {0, 0, 255, 255}, | |
{0, 36, 0, 255}, {0, 36, 85, 255}, {0, 36, 170, 255}, | |
{0, 36, 255, 255}, {0, 73, 0, 255}, {0, 73, 85, 255}, | |
{0, 73, 170, 255}, {0, 73, 255, 255}, {0, 109, 0, 255}, | |
{0, 109, 85, 255}, {0, 109, 170, 255}, {0, 109, 255, 255}, | |
{0, 146, 0, 255}, {0, 146, 85, 255}, {0, 146, 170, 255}, | |
{0, 146, 255, 255}, {0, 182, 0, 255}, {0, 182, 85, 255}, | |
{0, 182, 170, 255}, {0, 182, 255, 255}, {0, 219, 0, 255}, | |
{0, 219, 85, 255}, {0, 219, 170, 255}, {0, 219, 255, 255}, | |
{0, 255, 0, 255}, {0, 255, 85, 255}, {0, 255, 170, 255}, | |
{0, 255, 255, 255}, {85, 0, 0, 255}, {85, 0, 85, 255}, | |
{85, 0, 170, 255}, {85, 0, 255, 255}, {85, 36, 0, 255}, | |
{85, 36, 85, 255}, {85, 36, 170, 255}, {85, 36, 255, 255}, | |
{85, 73, 0, 255}, {85, 73, 85, 255}, {85, 73, 170, 255}, | |
{85, 73, 255, 255}, {85, 109, 0, 255}, {85, 109, 85, 255}, | |
{85, 109, 170, 255}, {85, 109, 255, 255}, {85, 146, 0, 255}, | |
{85, 146, 85, 255}, {85, 146, 170, 255}, {85, 146, 255, 255}, | |
{85, 182, 0, 255}, {85, 182, 85, 255}, {85, 182, 170, 255}, | |
{85, 182, 255, 255}, {85, 219, 0, 255}, {85, 219, 85, 255}, | |
{85, 219, 170, 255}, {85, 219, 255, 255}, {85, 255, 0, 255}, | |
{85, 255, 85, 255}, {85, 255, 170, 255}, {85, 255, 255, 255}, | |
{170, 0, 0, 255}, {170, 0, 85, 255}, {170, 0, 170, 255}, | |
{170, 0, 255, 255}, {170, 36, 0, 255}, {170, 36, 85, 255}, | |
{170, 36, 170, 255}, {170, 36, 255, 255}, {170, 73, 0, 255}, | |
{170, 73, 85, 255}, {170, 73, 170, 255}, {170, 73, 255, 255}, | |
{170, 109, 0, 255}, {170, 109, 85, 255}, {170, 109, 170, 255}, | |
{170, 109, 255, 255}, {170, 146, 0, 255}, {170, 146, 85, 255}, | |
{170, 146, 170, 255}, {170, 146, 255, 255}, {170, 182, 0, 255}, | |
{170, 182, 85, 255}, {170, 182, 170, 255}, {170, 182, 255, 255}, | |
{170, 219, 0, 255}, {170, 219, 85, 255}, {170, 219, 170, 255}, | |
{170, 219, 255, 255}, {170, 255, 0, 255}, {170, 255, 85, 255}, | |
{170, 255, 170, 255}, {170, 255, 255, 255}, {255, 0, 0, 255}, | |
{255, 0, 85, 255}, {255, 0, 170, 255}, {255, 0, 255, 255}, | |
{255, 36, 0, 255}, {255, 36, 85, 255}, {255, 36, 170, 255}, | |
{255, 36, 255, 255}, {255, 73, 0, 255}, {255, 73, 85, 255}, | |
{255, 73, 170, 255}, {255, 73, 255, 255}, {255, 109, 0, 255}, | |
{255, 109, 85, 255}, {255, 109, 170, 255}, {255, 109, 255, 255}, | |
{255, 146, 0, 255}, {255, 146, 85, 255}, {255, 146, 170, 255}, | |
{255, 146, 255, 255}, {255, 182, 0, 255}, {255, 182, 85, 255}, | |
{255, 182, 170, 255}, {255, 182, 255, 255}, {255, 219, 0, 255}, | |
{255, 219, 85, 255}, {255, 219, 170, 255}, {255, 219, 255, 255}, | |
{255, 255, 0, 255}, {255, 255, 85, 255}, {255, 255, 170, 255}, | |
{255, 255, 255, 255}}; | |
static const int default_palette_size = | |
(int)(sizeof(default_palette_colors) / sizeof(SDL_Color)); | |
#endif |
/* array structure interface version 3 declarations */ | |
#if !defined(PG_ARRAYINTER_HEADER) | |
#define PG_ARRAYINTER_HEADER | |
static const int PAI_CONTIGUOUS = 0x01; | |
static const int PAI_FORTRAN = 0x02; | |
static const int PAI_ALIGNED = 0x100; | |
static const int PAI_NOTSWAPPED = 0x200; | |
static const int PAI_WRITEABLE = 0x400; | |
static const int PAI_ARR_HAS_DESCR = 0x800; | |
typedef struct { | |
int two; /* contains the integer 2 -- simple sanity check */ | |
int nd; /* number of dimensions */ | |
char typekind; /* kind in array -- character code of typestr */ | |
int itemsize; /* size of each element */ | |
int flags; /* flags indicating how the data should be */ | |
/* interpreted */ | |
Py_intptr_t *shape; /* A length-nd array of shape information */ | |
Py_intptr_t *strides; /* A length-nd array of stride information */ | |
void *data; /* A pointer to the first element of the array */ | |
PyObject *descr; /* NULL or a data-description */ | |
} PyArrayInterface; | |
#endif |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
Copyright (C) 2007 Rene Dudfield, Richard Goedeken | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
/* Bufferproxy module C api. | |
Depends on pygame.h being included first. | |
*/ | |
#if !defined(PG_BUFPROXY_HEADER) | |
#define PYGAMEAPI_BUFPROXY_NUMSLOTS 4 | |
#define PYGAMEAPI_BUFPROXY_FIRSTSLOT 0 | |
#if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || defined(NO_PYGAME_C_API)) | |
static void *PgBUFPROXY_C_API[PYGAMEAPI_BUFPROXY_NUMSLOTS]; | |
typedef PyObject *(*_pgbufproxy_new_t)(PyObject *, getbufferproc); | |
typedef PyObject *(*_pgbufproxy_get_obj_t)(PyObject *); | |
typedef int (*_pgbufproxy_trip_t)(PyObject *); | |
#define pgBufproxy_Type (*(PyTypeObject*)PgBUFPROXY_C_API[0]) | |
#define pgBufproxy_New (*(_pgbufproxy_new_t)PgBUFPROXY_C_API[1]) | |
#define pgBufproxy_GetParent \ | |
(*(_pgbufproxy_get_obj_t)PgBUFPROXY_C_API[2]) | |
#define pgBufproxy_Trip (*(_pgbufproxy_trip_t)PgBUFPROXY_C_API[3]) | |
#define pgBufproxy_Check(x) ((x)->ob_type == (pgBufproxy_Type)) | |
#define import_pygame_bufferproxy() \ | |
_IMPORT_PYGAME_MODULE(bufferproxy, BUFPROXY, PgBUFPROXY_C_API) | |
#endif /* #if !(defined(PYGAMEAPI_BUFPROXY_INTERNAL) || ... */ | |
#define PG_BUFPROXY_HEADER | |
#endif /* #if !defined(PG_BUFPROXY_HEADER) */ |
/* Python 2.x/3.x compitibility tools | |
*/ | |
#if !defined(PGCOMPAT_H) | |
#define PGCOMPAT_H | |
#if PY_MAJOR_VERSION >= 3 | |
#define PY3 1 | |
/* Define some aliases for the removed PyInt_* functions */ | |
#define PyInt_Check(op) PyLong_Check(op) | |
#define PyInt_FromString PyLong_FromString | |
#define PyInt_FromUnicode PyLong_FromUnicode | |
#define PyInt_FromLong PyLong_FromLong | |
#define PyInt_FromSize_t PyLong_FromSize_t | |
#define PyInt_FromSsize_t PyLong_FromSsize_t | |
#define PyInt_AsLong PyLong_AsLong | |
#define PyInt_AsSsize_t PyLong_AsSsize_t | |
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask | |
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask | |
#define PyInt_AS_LONG PyLong_AS_LONG | |
#define PyNumber_Int PyNumber_Long | |
/* Weakrefs flags changed in 3.x */ | |
#define Py_TPFLAGS_HAVE_WEAKREFS 0 | |
/* Module init function returns new module instance. */ | |
#define MODINIT_RETURN(x) return x | |
#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC PyInit_##mod_name (void) | |
#define DECREF_MOD(mod) Py_DECREF (mod) | |
/* Type header differs. */ | |
#define TYPE_HEAD(x,y) PyVarObject_HEAD_INIT(x,y) | |
/* Text interface. Use unicode strings. */ | |
#define Text_Type PyUnicode_Type | |
#define Text_Check PyUnicode_Check | |
#ifndef PYPY_VERSION | |
#define Text_FromLocale(s) PyUnicode_DecodeLocale((s), "strict") | |
#else /* PYPY_VERSION */ | |
/* workaround: missing function for pypy */ | |
#define Text_FromLocale PyUnicode_FromString | |
#endif /* PYPY_VERSION */ | |
#define Text_FromUTF8 PyUnicode_FromString | |
#define Text_FromUTF8AndSize PyUnicode_FromStringAndSize | |
#define Text_FromFormat PyUnicode_FromFormat | |
#define Text_GetSize PyUnicode_GetSize | |
#define Text_GET_SIZE PyUnicode_GET_SIZE | |
/* Binary interface. Use bytes. */ | |
#define Bytes_Type PyBytes_Type | |
#define Bytes_Check PyBytes_Check | |
#define Bytes_Size PyBytes_Size | |
#define Bytes_AsString PyBytes_AsString | |
#define Bytes_AsStringAndSize PyBytes_AsStringAndSize | |
#define Bytes_FromStringAndSize PyBytes_FromStringAndSize | |
#define Bytes_FromFormat PyBytes_FromFormat | |
#define Bytes_AS_STRING PyBytes_AS_STRING | |
#define Bytes_GET_SIZE PyBytes_GET_SIZE | |
#define Bytes_AsDecodeObject PyBytes_AsDecodedObject | |
#define Object_Unicode PyObject_Str | |
#define IsTextObj(x) (PyUnicode_Check(x) || PyBytes_Check(x)) | |
/* Renamed builtins */ | |
#define BUILTINS_MODULE "builtins" | |
#define BUILTINS_UNICODE "str" | |
#define BUILTINS_UNICHR "chr" | |
/* Defaults for unicode file path encoding */ | |
#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding | |
#if defined(MS_WIN32) | |
#define UNICODE_DEF_FS_ERROR "replace" | |
#else | |
#define UNICODE_DEF_FS_ERROR "surrogateescape" | |
#endif | |
#else /* #if PY_MAJOR_VERSION >= 3 */ | |
#define PY3 0 | |
/* Module init function returns nothing. */ | |
#define MODINIT_RETURN(x) return | |
#define MODINIT_DEFINE(mod_name) PyMODINIT_FUNC init##mod_name (void) | |
#define DECREF_MOD(mod) | |
/* Type header differs. */ | |
#define TYPE_HEAD(x,y) \ | |
PyObject_HEAD_INIT(x) \ | |
0, | |
/* Text interface. Use ascii strings. */ | |
#define Text_Type PyString_Type | |
#define Text_Check PyString_Check | |
#define Text_FromLocale PyString_FromString | |
#define Text_FromUTF8 PyString_FromString | |
#define Text_FromUTF8AndSize PyString_FromStringAndSize | |
#define Text_FromFormat PyString_FromFormat | |
#define Text_GetSize PyString_GetSize | |
#define Text_GET_SIZE PyString_GET_SIZE | |
/* Binary interface. Use ascii strings. */ | |
#define Bytes_Type PyString_Type | |
#define Bytes_Check PyString_Check | |
#define Bytes_Size PyString_Size | |
#define Bytes_AsString PyString_AsString | |
#define Bytes_AsStringAndSize PyString_AsStringAndSize | |
#define Bytes_FromStringAndSize PyString_FromStringAndSize | |
#define Bytes_FromFormat PyString_FromFormat | |
#define Bytes_AS_STRING PyString_AS_STRING | |
#define Bytes_GET_SIZE PyString_GET_SIZE | |
#define Bytes_AsDecodedObject PyString_AsDecodedObject | |
#define Object_Unicode PyObject_Unicode | |
/* Renamed builtins */ | |
#define BUILTINS_MODULE "__builtin__" | |
#define BUILTINS_UNICODE "unicode" | |
#define BUILTINS_UNICHR "unichr" | |
/* Defaults for unicode file path encoding */ | |
#define UNICODE_DEF_FS_CODEC Py_FileSystemDefaultEncoding | |
#define UNICODE_DEF_FS_ERROR "strict" | |
#endif /* #if PY_MAJOR_VERSION >= 3 */ | |
#define PY2 (!PY3) | |
#define MODINIT_ERROR MODINIT_RETURN (NULL) | |
/* Module state. These macros are used to define per-module macros. | |
* v - global state variable (Python 2.x) | |
* s - global state structure (Python 3.x) | |
*/ | |
#define PY2_GETSTATE(v) (&(v)) | |
#define PY3_GETSTATE(s, m) ((struct s *) PyModule_GetState (m)) | |
/* Pep 3123: Making PyObject_HEAD conform to standard C */ | |
#if !defined(Py_TYPE) | |
#define Py_TYPE(o) (((PyObject *)(o))->ob_type) | |
#define Py_REFCNT(o) (((PyObject *)(o))->ob_refcnt) | |
#define Py_SIZE(o) (((PyVarObject *)(o))->ob_size) | |
#endif | |
/* Encode a unicode file path */ | |
#define Unicode_AsEncodedPath(u) \ | |
PyUnicode_AsEncodedString ((u), UNICODE_DEF_FS_CODEC, UNICODE_DEF_FS_ERROR) | |
#define RELATIVE_MODULE(m) ("." m) | |
#define HAVE_OLD_BUFPROTO PY2 | |
#if !defined(PG_ENABLE_OLDBUF) /* allow for command line override */ | |
#if HAVE_OLD_BUFPROTO | |
#define PG_ENABLE_OLDBUF 1 | |
#else | |
#define PG_ENABLE_OLDBUF 0 | |
#endif | |
#endif | |
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER | |
#define Py_TPFLAGS_HAVE_NEWBUFFER 0 | |
#endif | |
#ifndef Py_TPFLAGS_HAVE_CLASS | |
#define Py_TPFLAGS_HAVE_CLASS 0 | |
#endif | |
#ifndef Py_TPFLAGS_CHECKTYPES | |
#define Py_TPFLAGS_CHECKTYPES 0 | |
#endif | |
#if PY_VERSION_HEX >= 0x03020000 | |
#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ | |
PySlice_GetIndicesEx(slice, length, start, stop, step, slicelength) | |
#else | |
#define Slice_GET_INDICES_EX(slice, length, start, stop, step, slicelength) \ | |
PySlice_GetIndicesEx((PySliceObject *)(slice), length, \ | |
start, stop, step, slicelength) | |
#endif | |
/* Support new buffer protocol? */ | |
#if !defined(PG_ENABLE_NEWBUF) /* allow for command line override */ | |
#if !defined(PYPY_VERSION) | |
#define PG_ENABLE_NEWBUF 1 | |
#else | |
#define PG_ENABLE_NEWBUF 0 | |
#endif | |
#endif | |
#endif /* #if !defined(PGCOMPAT_H) */ |
#if !defined(PGOPENGL_H) | |
#define PGOPENGL_H | |
/** This header includes definitions of Opengl functions as pointer types for | |
** use with the SDL function SDL_GL_GetProcAddress. | |
**/ | |
#if defined(_WIN32) | |
#define GL_APIENTRY __stdcall | |
#else | |
#define GL_APIENTRY | |
#endif | |
typedef void (GL_APIENTRY *GL_glReadPixels_Func)(int, int, int, int, unsigned int, unsigned int, void*); | |
#endif |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
/* To allow the Pygame C api to be globally shared by all code within an | |
* extension module built from multiple C files, only include the pygame.h | |
* header within the top level C file, the one which calls the | |
* 'import_pygame_*' macros. All other C source files of the module should | |
* include _pygame.h instead. | |
*/ | |
#ifndef PYGAME_H | |
#define PYGAME_H | |
#include "_pygame.h" | |
#endif |
/* | |
pygame - Python Game Library | |
Copyright (C) 2006, 2007 Rene Dudfield, Marcus von Appen | |
Originally put in the public domain by Sam Lantinga. | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
*/ | |
/* This is unconditionally defined in Python.h */ | |
#if defined(_POSIX_C_SOURCE) | |
#undef _POSIX_C_SOURCE | |
#endif | |
#include <Python.h> | |
/* Handle clipboard text and data in arbitrary formats */ | |
/** | |
* Predefined supported pygame scrap types. | |
*/ | |
#define PYGAME_SCRAP_TEXT "text/plain" | |
#define PYGAME_SCRAP_BMP "image/bmp" | |
#define PYGAME_SCRAP_PPM "image/ppm" | |
#define PYGAME_SCRAP_PBM "image/pbm" | |
/** | |
* The supported scrap clipboard types. | |
* | |
* This is only relevant in a X11 environment, which supports mouse | |
* selections as well. For Win32 and MacOS environments the default | |
* clipboard is used, no matter what value is passed. | |
*/ | |
typedef enum | |
{ | |
SCRAP_CLIPBOARD, | |
SCRAP_SELECTION /* only supported in X11 environments. */ | |
} ScrapClipType; | |
/** | |
* Macro for initialization checks. | |
*/ | |
#define PYGAME_SCRAP_INIT_CHECK() \ | |
if(!pygame_scrap_initialized()) \ | |
return (PyErr_SetString (pgExc_SDLError, \ | |
"scrap system not initialized."), NULL) | |
/** | |
* \brief Checks, whether the pygame scrap module was initialized. | |
* | |
* \return 1 if the modules was initialized, 0 otherwise. | |
*/ | |
extern int | |
pygame_scrap_initialized (void); | |
/** | |
* \brief Initializes the pygame scrap module internals. Call this before any | |
* other method. | |
* | |
* \return 1 on successful initialization, 0 otherwise. | |
*/ | |
extern int | |
pygame_scrap_init (void); | |
/** | |
* \brief Checks, whether the pygame window lost the clipboard focus or not. | |
* | |
* \return 1 if the window lost the focus, 0 otherwise. | |
*/ | |
extern int | |
pygame_scrap_lost (void); | |
/** | |
* \brief Places content of a specific type into the clipboard. | |
* | |
* \note For X11 the following notes are important: The following types | |
* are reserved for internal usage and thus will throw an error on | |
* setting them: "TIMESTAMP", "TARGETS", "SDL_SELECTION". | |
* Setting PYGAME_SCRAP_TEXT ("text/plain") will also automatically | |
* set the X11 types "STRING" (XA_STRING), "TEXT" and "UTF8_STRING". | |
* | |
* For Win32 the following notes are important: Setting | |
* PYGAME_SCRAP_TEXT ("text/plain") will also automatically set | |
* the Win32 type "TEXT" (CF_TEXT). | |
* | |
* For QNX the following notes are important: Setting | |
* PYGAME_SCRAP_TEXT ("text/plain") will also automatically set | |
* the QNX type "TEXT" (Ph_CL_TEXT). | |
* | |
* \param type The type of the content. | |
* \param srclen The length of the content. | |
* \param src The NULL terminated content. | |
* \return 1, if the content could be successfully pasted into the clipboard, | |
* 0 otherwise. | |
*/ | |
extern int | |
pygame_scrap_put (char *type, int srclen, char *src); | |
/** | |
* \brief Gets the current content from the clipboard. | |
* | |
* \note The received content does not need to be the content previously | |
* placed in the clipboard using pygame_put_scrap(). See the | |
* pygame_put_scrap() notes for more details. | |
* | |
* \param type The type of the content to receive. | |
* \param count The size of the returned content. | |
* \return The content or NULL in case of an error or if no content of the | |
* specified type was available. | |
*/ | |
extern char* | |
pygame_scrap_get (char *type, unsigned long *count); | |
/** | |
* \brief Gets the currently available content types from the clipboard. | |
* | |
* \return The different available content types or NULL in case of an | |
* error or if no content type is available. | |
*/ | |
extern char** | |
pygame_scrap_get_types (void); | |
/** | |
* \brief Checks whether content for the specified scrap type is currently | |
* available in the clipboard. | |
* | |
* \param type The type to check for. | |
* \return 1, if there is content and 0 otherwise. | |
*/ | |
extern int | |
pygame_scrap_contains (char *type); |
/* | |
pygame - Python Game Library | |
Copyright (C) 2000-2001 Pete Shinners | |
Copyright (C) 2007 Marcus von Appen | |
This library is free software; you can redistribute it and/or | |
modify it under the terms of the GNU Library General Public | |
License as published by the Free Software Foundation; either | |
version 2 of the License, or (at your option) any later version. | |
This library is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
Library General Public License for more details. | |
You should have received a copy of the GNU Library General Public | |
License along with this library; if not, write to the Free | |
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
Pete Shinners | |
pete@shinners.org | |
*/ | |
#ifndef SURFACE_H | |
#define SURFACE_H | |
/* This is defined in SDL.h */ | |
#if defined(_POSIX_C_SOURCE) | |
#undef _POSIX_C_SOURCE | |
#endif | |
#include <SDL.h> | |
#include "pygame.h" | |
/* Blend modes */ | |
#define PYGAME_BLEND_ADD 0x1 | |
#define PYGAME_BLEND_SUB 0x2 | |
#define PYGAME_BLEND_MULT 0x3 | |
#define PYGAME_BLEND_MIN 0x4 | |
#define PYGAME_BLEND_MAX 0x5 | |
#define PYGAME_BLEND_RGB_ADD 0x1 | |
#define PYGAME_BLEND_RGB_SUB 0x2 | |
#define PYGAME_BLEND_RGB_MULT 0x3 | |
#define PYGAME_BLEND_RGB_MIN 0x4 | |
#define PYGAME_BLEND_RGB_MAX 0x5 | |
#define PYGAME_BLEND_RGBA_ADD 0x6 | |
#define PYGAME_BLEND_RGBA_SUB 0x7 | |
#define PYGAME_BLEND_RGBA_MULT 0x8 | |
#define PYGAME_BLEND_RGBA_MIN 0x9 | |
#define PYGAME_BLEND_RGBA_MAX 0x10 | |
#define PYGAME_BLEND_PREMULTIPLIED 0x11 | |
#if SDL_BYTEORDER == SDL_LIL_ENDIAN | |
#define GET_PIXEL_24(b) (b[0] + (b[1] << 8) + (b[2] << 16)) | |
#else | |
#define GET_PIXEL_24(b) (b[2] + (b[1] << 8) + (b[0] << 16)) | |
#endif | |
#define GET_PIXEL(pxl, bpp, source) \ | |
switch (bpp) \ | |
{ \ | |
case 2: \ | |
pxl = *((Uint16 *) (source)); \ | |
break; \ | |
case 4: \ | |
pxl = *((Uint32 *) (source)); \ | |
break; \ | |
default: \ | |
{ \ | |
Uint8 *b = (Uint8 *) source; \ | |
pxl = GET_PIXEL_24(b); \ | |
} \ | |
break; \ | |
} | |
#if IS_SDLv1 | |
#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ | |
_sR = ((px & fmt->Rmask) >> fmt->Rshift); \ | |
_sR = (_sR << fmt->Rloss) + (_sR >> (8 - (fmt->Rloss << 1))); \ | |
_sG = ((px & fmt->Gmask) >> fmt->Gshift); \ | |
_sG = (_sG << fmt->Gloss) + (_sG >> (8 - (fmt->Gloss << 1))); \ | |
_sB = ((px & fmt->Bmask) >> fmt->Bshift); \ | |
_sB = (_sB << fmt->Bloss) + (_sB >> (8 - (fmt->Bloss << 1))); \ | |
if (ppa) \ | |
{ \ | |
_sA = ((px & fmt->Amask) >> fmt->Ashift); \ | |
_sA = (_sA << fmt->Aloss) + (_sA >> (8 - (fmt->Aloss << 1))); \ | |
} \ | |
else \ | |
{ \ | |
_sA = 255; \ | |
} | |
#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ | |
sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ | |
sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ | |
sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ | |
sa = 255; | |
/* For 1 byte palette pixels */ | |
#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ | |
*(px) = (Uint8) SDL_MapRGB(fmt, _dR, _dG, _dB) | |
#else /* IS_SDLv2 */ | |
#define GET_PIXELVALS(_sR, _sG, _sB, _sA, px, fmt, ppa) \ | |
SDL_GetRGBA(px, fmt, &(_sR), &(_sG), &(_sB), &(_sA)); \ | |
if (!ppa) { \ | |
_sA = 255; \ | |
} | |
#define GET_PIXELVALS_1(sr, sg, sb, sa, _src, _fmt) \ | |
sr = _fmt->palette->colors[*((Uint8 *) (_src))].r; \ | |
sg = _fmt->palette->colors[*((Uint8 *) (_src))].g; \ | |
sb = _fmt->palette->colors[*((Uint8 *) (_src))].b; \ | |
sa = 255; | |
/* For 1 byte palette pixels */ | |
#define SET_PIXELVAL(px, fmt, _dR, _dG, _dB, _dA) \ | |
*(px) = (Uint8) SDL_MapRGBA(fmt, _dR, _dG, _dB, _dA) | |
#endif /* IS_SDLv2 */ | |
#if SDL_BYTEORDER == SDL_LIL_ENDIAN | |
#define SET_OFFSETS_24(or, og, ob, fmt) \ | |
{ \ | |
or = (fmt->Rshift == 0 ? 0 : \ | |
fmt->Rshift == 8 ? 1 : \ | |
2 ); \ | |
og = (fmt->Gshift == 0 ? 0 : \ | |
fmt->Gshift == 8 ? 1 : \ | |
2 ); \ | |
ob = (fmt->Bshift == 0 ? 0 : \ | |
fmt->Bshift == 8 ? 1 : \ | |
2 ); \ | |
} | |
#define SET_OFFSETS_32(or, og, ob, fmt) \ | |
{ \ | |
or = (fmt->Rshift == 0 ? 0 : \ | |
fmt->Rshift == 8 ? 1 : \ | |
fmt->Rshift == 16 ? 2 : \ | |
3 ); \ | |
og = (fmt->Gshift == 0 ? 0 : \ | |
fmt->Gshift == 8 ? 1 : \ | |
fmt->Gshift == 16 ? 2 : \ | |
3 ); \ | |
ob = (fmt->Bshift == 0 ? 0 : \ | |
fmt->Bshift == 8 ? 1 : \ | |
fmt->Bshift == 16 ? 2 : \ | |
3 ); \ | |
} | |
#else | |
#define SET_OFFSETS_24(or, og, ob, fmt) \ | |
{ \ | |
or = (fmt->Rshift == 0 ? 2 : \ | |
fmt->Rshift == 8 ? 1 : \ | |
0 ); \ | |
og = (fmt->Gshift == 0 ? 2 : \ | |
fmt->Gshift == 8 ? 1 : \ | |
0 ); \ | |
ob = (fmt->Bshift == 0 ? 2 : \ | |
fmt->Bshift == 8 ? 1 : \ | |
0 ); \ | |
} | |
#define SET_OFFSETS_32(or, og, ob, fmt) \ | |
{ \ | |
or = (fmt->Rshift == 0 ? 3 : \ | |
fmt->Rshift == 8 ? 2 : \ | |
fmt->Rshift == 16 ? 1 : \ | |
0 ); \ | |
og = (fmt->Gshift == 0 ? 3 : \ | |
fmt->Gshift == 8 ? 2 : \ | |
fmt->Gshift == 16 ? 1 : \ | |
0 ); \ | |
ob = (fmt->Bshift == 0 ? 3 : \ | |
fmt->Bshift == 8 ? 2 : \ | |
fmt->Bshift == 16 ? 1 : \ | |
0 ); \ | |
} | |
#endif | |
#define CREATE_PIXEL(buf, r, g, b, a, bp, ft) \ | |
switch (bp) \ | |
{ \ | |
case 2: \ | |
*((Uint16 *) (buf)) = \ | |
((r >> ft->Rloss) << ft->Rshift) | \ | |
((g >> ft->Gloss) << ft->Gshift) | \ | |
((b >> ft->Bloss) << ft->Bshift) | \ | |
((a >> ft->Aloss) << ft->Ashift); \ | |
break; \ | |
case 4: \ | |
*((Uint32 *) (buf)) = \ | |
((r >> ft->Rloss) << ft->Rshift) | \ | |
((g >> ft->Gloss) << ft->Gshift) | \ | |
((b >> ft->Bloss) << ft->Bshift) | \ | |
((a >> ft->Aloss) << ft->Ashift); \ | |
break; \ | |
} | |
/* Pretty good idea from Tom Duff :-). */ | |
#define LOOP_UNROLLED4(code, n, width) \ | |
n = (width + 3) / 4; \ | |
switch (width & 3) \ | |
{ \ | |
case 0: do { code; \ | |
case 3: code; \ | |
case 2: code; \ | |
case 1: code; \ | |
} while (--n > 0); \ | |
} | |
/* Used in the srcbpp == dstbpp == 1 blend functions */ | |
#define REPEAT_3(code) \ | |
code; \ | |
code; \ | |
code; | |
#define REPEAT_4(code) \ | |
code; \ | |
code; \ | |
code; \ | |
code; | |
#define BLEND_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ | |
tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ | |
tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ | |
tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); | |
#define BLEND_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ | |
tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ | |
tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ | |
tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); | |
#define BLEND_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ | |
dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ | |
dB = (dB && sB) ? (dB * sB) >> 8 : 0; | |
#define BLEND_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
if(sR < dR) { dR = sR; } \ | |
if(sG < dG) { dG = sG; } \ | |
if(sB < dB) { dB = sB; } | |
#define BLEND_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
if(sR > dR) { dR = sR; } \ | |
if(sG > dG) { dG = sG; } \ | |
if(sB > dB) { dB = sB; } | |
#define BLEND_RGBA_ADD(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ | |
tmp = dR + sR; dR = (tmp <= 255 ? tmp : 255); \ | |
tmp = dG + sG; dG = (tmp <= 255 ? tmp : 255); \ | |
tmp = dB + sB; dB = (tmp <= 255 ? tmp : 255); \ | |
tmp = dA + sA; dA = (tmp <= 255 ? tmp : 255); | |
#define BLEND_RGBA_SUB(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ | |
tmp = dR - sR; dR = (tmp >= 0 ? tmp : 0); \ | |
tmp = dG - sG; dG = (tmp >= 0 ? tmp : 0); \ | |
tmp = dB - sB; dB = (tmp >= 0 ? tmp : 0); \ | |
tmp = dA - sA; dA = (tmp >= 0 ? tmp : 0); | |
#define BLEND_RGBA_MULT(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
dR = (dR && sR) ? (dR * sR) >> 8 : 0; \ | |
dG = (dG && sG) ? (dG * sG) >> 8 : 0; \ | |
dB = (dB && sB) ? (dB * sB) >> 8 : 0; \ | |
dA = (dA && sA) ? (dA * sA) >> 8 : 0; | |
#define BLEND_RGBA_MIN(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
if(sR < dR) { dR = sR; } \ | |
if(sG < dG) { dG = sG; } \ | |
if(sB < dB) { dB = sB; } \ | |
if(sA < dA) { dA = sA; } | |
#define BLEND_RGBA_MAX(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
if(sR > dR) { dR = sR; } \ | |
if(sG > dG) { dG = sG; } \ | |
if(sB > dB) { dB = sB; } \ | |
if(sA > dA) { dA = sA; } | |
#if 1 | |
/* Choose an alpha blend equation. If the sign is preserved on a right shift | |
* then use a specialized, faster, equation. Otherwise a more general form, | |
* where all additions are done before the shift, is needed. | |
*/ | |
#if (-1 >> 1) < 0 | |
#define ALPHA_BLEND_COMP(sC, dC, sA) ((((sC - dC) * sA + sC) >> 8) + dC) | |
#else | |
#define ALPHA_BLEND_COMP(sC, dC, sA) (((dC << 8) + (sC - dC) * sA + sC) >> 8) | |
#endif | |
#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
do { \ | |
if (dA) \ | |
{ \ | |
dR = ALPHA_BLEND_COMP(sR, dR, sA); \ | |
dG = ALPHA_BLEND_COMP(sG, dG, sA); \ | |
dB = ALPHA_BLEND_COMP(sB, dB, sA); \ | |
dA = sA + dA - ((sA * dA) / 255); \ | |
} \ | |
else \ | |
{ \ | |
dR = sR; \ | |
dG = sG; \ | |
dB = sB; \ | |
dA = sA; \ | |
} \ | |
} while(0) | |
#define ALPHA_BLEND_PREMULTIPLIED_COMP(sC, dC, sA) (sC + dC - ((dC * sA) >> 8)) | |
#define ALPHA_BLEND_PREMULTIPLIED(tmp, sR, sG, sB, sA, dR, dG, dB, dA) \ | |
do { \ | |
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sR, dR, sA); dR = (tmp > 255 ? 255 : tmp); \ | |
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sG, dG, sA); dG = (tmp > 255 ? 255 : tmp); \ | |
tmp = ALPHA_BLEND_PREMULTIPLIED_COMP(sB, dB, sA); dB = (tmp > 255 ? 255 : tmp); \ | |
dA = sA + dA - ((sA * dA) / 255); \ | |
} while(0) | |
#elif 0 | |
#define ALPHA_BLEND(sR, sG, sB, sA, dR, dG, dB, dA) \ | |
do { \ | |
if(sA){ \ | |
if(dA && sA < 255){ \ | |
int dContrib = dA*(255 - sA)/255; \ | |
dA = sA+dA - ((sA*dA)/255); \ | |
dR = (dR*dContrib + sR*sA)/dA; \ | |
dG = (dG*dContrib + sG*sA)/dA; \ | |
dB = (dB*dContrib + sB*sA)/dA; \ | |
}else{ \ | |
dR = sR; \ | |
dG = sG; \ | |
dB = sB; \ | |
dA = sA; \ | |
} \ | |
} \ | |
} while(0) | |
#endif | |
int | |
surface_fill_blend (SDL_Surface *surface, SDL_Rect *rect, Uint32 color, | |
int blendargs); | |
void | |
surface_respect_clip_rect (SDL_Surface *surface, SDL_Rect *rect); | |
int | |
pygame_AlphaBlit (SDL_Surface * src, SDL_Rect * srcrect, | |
SDL_Surface * dst, SDL_Rect * dstrect, int the_args); | |
int | |
pygame_Blit (SDL_Surface * src, SDL_Rect * srcrect, | |
SDL_Surface * dst, SDL_Rect * dstrect, int the_args); | |
#endif /* SURFACE_H */ |
./setuptools-40.8.0-py3.7.egg | |
./pip-19.0.3-py3.7.egg |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
from game.scripts.constants import Constants | |
from pygame.math import Vector2 | |
class CirclePlayerInitialAnimation(Animation): | |
def __init__(self, game_obj): | |
dist = abs(Constants.circCenter_y-(Constants.screen_height+15))*0.1 | |
key_frame_list = list() | |
key_frame_list.append( | |
KeyFrame(0.0, position=Vector2(game_obj.transform.position.x, Constants.screen_height+15), interpolation="out_cubic")) | |
key_frame_list.append( | |
KeyFrame(0.5, position=Vector2(game_obj.transform.position.x, Constants.circCenter_y-(dist)), interpolation="in_out_quint")) | |
key_frame_list.append( | |
KeyFrame(0.7, position=Vector2(game_obj.transform.position.x, Constants.circCenter_y))) | |
super().__init__(game_obj, key_frame_list, should_loop=False) |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
from pygame.math import Vector2 | |
import random | |
class LitterBounce(Animation): | |
def __init__(self, game_object): | |
""" | |
:param game_object: | |
""" | |
inter = "in_out_quint" | |
gap = 10 | |
key_frames = list() | |
key_frames.append(KeyFrame(0.0, position=Vector2(0, 0), interpolation=inter)) | |
key_frames.append(KeyFrame(0.5, position=Vector2(self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(1, position=Vector2(self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(1.5, position=Vector2(self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(2, position=Vector2(-self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(2.5, position=Vector2(-self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(3, position=Vector2(0, 0), interpolation=inter)) | |
super().__init__(game_object, key_frames) | |
def rand(self): | |
return random.randint(-2, 8) |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
from pygame.math import Vector2 | |
import random | |
class ObstaclePulsingAnimation(Animation): | |
def __init__(self, game_obj): | |
key_frame_list = list() | |
key_frame_list.append(KeyFrame(0.00, | |
scale=Vector2(1.0, 1.0), | |
interpolation="out_cubic")) | |
key_frame_list.append(KeyFrame(0.35, | |
scale=Vector2(1.07, 1.07), | |
interpolation="out_cubic")) | |
key_frame_list.append(KeyFrame(0.70, scale=Vector2(1.0, 1.0))) | |
super().__init__(game_obj, key_frame_list, should_loop=True) | |
def rand(self): | |
return random.randint(-2, 8) |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
class ParticleFadeAnimation(Animation): | |
def __init__(self, game_obj, duration): | |
key_frame_list = list() | |
key_frame_list.append(KeyFrame(0.0, alpha=255, interpolation="in_cubic")) | |
key_frame_list.append(KeyFrame(duration, alpha=0)) | |
super().__init__(game_obj, key_frame_list, should_loop=False, unscaled=True) |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
from pygame.math import Vector2 | |
import random | |
class PlayerBounce(Animation): | |
def __init__(self, game_object): | |
""" | |
:param game_object: | |
""" | |
inter = "in_out_quint" | |
gap = 0.5 | |
key_frames = list() | |
key_frames.append(KeyFrame(0.0, position=Vector2(0, 0), interpolation=inter)) | |
key_frames.append(KeyFrame(0.25, position=Vector2(self.rand()*gap, self.rand()*gap), interpolation=inter)) | |
key_frames.append(KeyFrame(0.5, position=Vector2(0, 0), interpolation=inter)) | |
super().__init__(game_object, key_frames) | |
def rand(self): | |
return 3 |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
class PowerUpFadeOut(Animation): | |
def __init__(self, game_obj): | |
key_frame_list = list() | |
key_frame_list.append(KeyFrame(0.00, alpha=255, interpolation="in_cubic")) | |
key_frame_list.append(KeyFrame(0.30, alpha=0)) | |
super().__init__(game_obj, key_frame_list, should_loop=False) |
from game_engine.components.animation import Animation | |
from game_engine.key_frame import KeyFrame | |
from game.scripts.constants import Constants | |
from pygame.math import Vector2 | |
class TextUpFadeOutAnimation(Animation): | |
def __init__(self, game_obj): | |
key_frame_list = list() | |
key_frame_list.append( | |
KeyFrame(0.00, position=game_obj.transform.position, alpha = 255, interpolation="in_out_quint")) | |
key_frame_list.append( | |
KeyFrame(0.8, position=Vector2(game_obj.transform.position.x, 0.95 * game_obj.transform.position.y), alpha=0)) | |
super().__init__(game_obj, key_frame_list, should_loop=False) |
from game_engine.components.circle_mesh import CircleMesh | |
from game_engine.game_object import GameObject | |
from game_engine.material import Material | |
from game_engine.color import Color | |
from pygame.math import Vector2 | |
class BasicCircle(GameObject): | |
def __init__(self, position=Vector2(0, 0), radius=2, material=Material(Color.white), layer=0): | |
""" | |
Add the circle mesh component | |
Call the superclass constructor passing basic game_object parameters | |
:param position_x: initial position x of the circle | |
:param position_y: initial position y of the circle | |
:param radius: initial radius of the circle | |
:param color: initial color of the circle | |
""" | |
super(BasicCircle, self).__init__(position, 0, Vector2(1, 1), layer) | |
self.material = material | |
self.radius = radius | |
self.circle_mesh = CircleMesh(self, radius) | |
def start(self): | |
pass | |
def update(self): | |
pass | |
def change_color(self, color): | |
self.material.color = color |
from game_engine.basic_objects.basic_circle import BasicCircle | |
from game_engine.time import Time | |
class BasicParticleCirc(BasicCircle): | |
def __init__(self, position, destroy_time=1.0): | |
self.destroy_time = destroy_time | |
self.creation_time = Time.now() | |
self.creator_obj = None | |
super().__init__(position=position, radius=1) | |
def set_creator_object(self, creator_obj): | |
""" | |
Set to this particle the game_object that has the particle system that create this particle | |
:param creator_obj: the game_object reference | |
""" | |
self.creator_obj = creator_obj | |
def start(self): | |
pass | |
def update(self): | |
if Time.now() - self.creation_time > self.destroy_time: | |
self.destroy_me() |
from game_engine.components.polygon_mesh import PolygonMesh | |
from game_engine.material import Material | |
from game_engine.game_object import GameObject | |
from game_engine.geometry import Geometry | |
from pygame.math import Vector2 | |
class BasicRectangle(GameObject): | |
def __init__(self, position=Vector2(0, 0), dimension=Vector2(10, 10), material=Material(), layer=0, scale=Vector2(1, 1)): | |
""" | |
Add the rectangle mesh component | |
Call the superclass constructor passing basic game_object parameters | |
:param position.x: initial position x of the rectangle | |
:param position.y: initial position y of the rectangle | |
:param dimension.x: initial width of the rectangle | |
:param dimension.y: initial height of the rectangle | |
:param color: initial color of the rectangle | |
""" | |
super(BasicRectangle, self).__init__(position, 0, Vector2(1, 1), layer) | |
self.material = Material(material.color, material.alpha) | |
self.dimension = dimension | |
self.polygon_mesh = PolygonMesh(self) | |
def _get_points(self): | |
point_list = [Vector2(self.transform.position.x, self.transform.position.y), | |
Vector2(self.transform.position.x, self.transform.position.y + self.dimension.y), | |
Vector2(self.transform.position.x + self.dimension.x, | |
self.transform.position.y + self.dimension.y), | |
Vector2(self.transform.position.x + self.dimension.x, self.transform.position.y)] | |
for i in range(len(point_list)): | |
point = point_list[i] | |
point_list[i] = Geometry.rotate_point(Vector2(self.transform.position.x + self.dimension.x/2, self.transform.position.y + self.dimension.y/2), | |
point, self.transform.rotation) | |
return point_list |
from game_engine.components.text_mesh import TextMesh | |
from game_engine.game_object import GameObject | |
from pygame.math import Vector2 | |
import pygame | |
class Text(GameObject): | |
def __init__(self, position, message, material, size, font_path, layer=10): | |
super(Text, self).__init__(position, 0, Vector2(1, 1), layer=layer) | |
self.material = material | |
font = pygame.font.Font(font_path, size) | |
self.text_mesh = TextMesh(self, message, size, font) |
from game_engine.component import Component | |
class Collider(Component): | |
collider_list = [] | |
@classmethod | |
def add_collider(cls, collider): | |
""" | |
Add a new collider to the collider list | |
:param collider: the collider to be added | |
""" | |
cls.collider_list.append(collider) | |
@classmethod | |
def remove(cls, game_object): | |
""" | |
Remove a collider from the collider list | |
:param game_object: the game_object that contains the collider | |
""" | |
if game_object.box_collider is not None: | |
cls.collider_list.remove(game_object.box_collider) | |
elif game_object.circle_collider is not None: | |
cls.collider_list.remove(game_object.circle_collider) | |
elif game_object.polygon_collider is not None: | |
cls.collider_list.remove(game_object.polygon_collider) | |
def on_collision(self): | |
""" | |
Check if the have occurred a collision between two colliders | |
loop on the collider list to check if this collider have collided with other | |
:return: True if collided | |
""" | |
pass | |
def __box_collision(self, box): | |
""" | |
Check a collision between this collider and a box_collider | |
:param box: the box collider reference | |
:return: True if collided | |
""" | |
pass | |
def __circle_collision(self, circle): | |
""" | |
Check a collision between this collider and a circle_collider | |
:param circle: the circle collider reference | |
:return: True if collided | |
""" | |
pass |
import random | |
class Color: | |
white = (255, 255, 255) | |
black = (0, 0, 0) | |
red = (255, 0, 0) | |
yellow = (247, 251, 0) | |
blue = (36, 127, 244) | |
blue_0 = (102, 102, 255) | |
green = (86, 244, 85) | |
silver = (192, 192, 192) | |
gray = (112, 112, 112) | |
orange = (253, 102, 0) | |
purple = (244, 113, 244) | |
mask = (122, 116, 116) | |
@classmethod | |
def random_color(cls): | |
""" | |
:return: a random color | |
""" | |
return random.randint(0, 255), random.randint(0, 255), random.randint(0, 255) |
class Component: | |
def __init__(self, game_object): | |
self.game_object = game_object | |
self.transform = game_object.transform |
from game_engine.component import Component | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
class Animation(Component): | |
def __init__(self, game_object, key_frames, should_loop=True, num_of_loops="inf", unscaled=False): | |
""" | |
Set every thing up for the animation | |
:param game_object: the game object the called this | |
:param key_frames: a list of keyframes that defines the animation | |
:param should_loop: if it should loop | |
:param num_of_loops: the number of times it should loop | |
:param unscaled: if the time of this animation is unscaled | |
""" | |
self.animator = None | |
self.unscaled = unscaled | |
self.is_playing = False | |
self.key_frames = key_frames | |
self.interpolation = None | |
self.current_animation_time = 0.0 | |
self.total_parameter = dict() | |
self.max_parameter = dict() | |
self.current_kf_idx = 0 | |
self.current_loop = 0 | |
self.new_frame = True | |
self.loop = should_loop | |
self.num_of_loops = num_of_loops | |
super().__init__(game_object) | |
def update(self): | |
""" | |
Method that will run every frame while this animation is running | |
""" | |
self.current_animation_time += Time.delta_time(self.unscaled, self.game_object.time_scale) | |
if self.should_change_key_frame(): | |
# todo remove this set interpolation | |
self.__set_interpolation(self.key_frames[self.current_kf_idx].interpolation) | |
self.__play_on_each_parameter() | |
else: | |
self.__next_key() | |
self.new_frame = True | |
self.__set_interpolation(self.key_frames[self.current_kf_idx].interpolation) | |
self.__play_on_each_parameter() | |
def should_change_key_frame(self): | |
return (self.key_frames[self.current_kf_idx].time <= self.current_animation_time < self.key_frames[ | |
self.current_kf_idx + 1].time) and (self.current_kf_idx + 1 < len(self.key_frames)) | |
def not_out_of_key_frame(self): | |
return self.current_kf_idx + 1 < len(self.key_frames) | |
def set_animator(self, animator_obj): | |
""" | |
set the reference of the animator | |
:param animator_obj: The animator's reference | |
""" | |
self.animator = animator_obj | |
def reset(self): | |
""" | |
reset the animation | |
""" | |
self.current_animation_time = 0.0 | |
self.current_kf_idx = 0 | |
self.current_loop = 0 | |
self.new_frame = True | |
def __play_on_each_parameter(self): | |
""" | |
Do the modifications of that frame on each parameter | |
If it is defined a change in a keyframe, | |
the parameter will not be None and will occur a change | |
""" | |
if self.animator.current_playing_animation is not None and self.not_out_of_key_frame(): | |
if self.key_frames[self.current_kf_idx].position is not None: | |
max_x = self.key_frames[self.current_kf_idx + 1].position.x | |
min_x = self.key_frames[self.current_kf_idx].position.x | |
max_y = self.key_frames[self.current_kf_idx + 1].position.y | |
min_y = self.key_frames[self.current_kf_idx].position.y | |
distance_x = self.__play(min_x, max_x, "position_x") | |
distance_y = self.__play(min_y, max_y, "position_y") | |
self.transform.translate(Vector2(self.transform.position.x + distance_x, | |
self.transform.position.y + distance_y)) | |
if self.key_frames[self.current_kf_idx].rotation is not None: | |
max_rotation = self.key_frames[self.current_kf_idx + 1].rotation | |
min_rotation = self.key_frames[self.current_kf_idx].rotation | |
dist_rotation = self.__play(min_rotation, max_rotation, "rotation") | |
self.transform.rotation += dist_rotation | |
if self.key_frames[self.current_kf_idx].scale is not None: | |
max_x = self.key_frames[self.current_kf_idx + 1].scale.x | |
min_x = self.key_frames[self.current_kf_idx].scale.x | |
max_y = self.key_frames[self.current_kf_idx + 1].scale.y | |
min_y = self.key_frames[self.current_kf_idx].scale.y | |
dist_scale_x = self.__play(min_x, max_x, "scale_x") | |
dist_scale_y = self.__play(min_y, max_y, "scale_y") | |
self.transform.scale.x = self.transform.scale.x + dist_scale_x | |
self.transform.scale.y = self.transform.scale.y + dist_scale_y | |
if self.key_frames[self.current_kf_idx].alpha is not None: | |
max_alpha = self.key_frames[self.current_kf_idx + 1].alpha | |
min_alpha = self.key_frames[self.current_kf_idx].alpha | |
dist_alpha = self.__play(min_alpha, max_alpha, "alpha") | |
self.game_object.material.alpha += dist_alpha | |
if self.new_frame: | |
self.new_frame = False | |
def __play(self, value_min, value_max, value_name): | |
""" | |
Calculate the difference that must be add at this frame to a parameter in this frame | |
:param value_min: the value in the keyframe before this time | |
:param value_max: the value in the keyframe after this time | |
:param value_name: a string that specify the name of value | |
""" | |
if self.new_frame: | |
self.max_parameter[value_name] = value_max - value_min | |
self.total_parameter[value_name] = 0.0 | |
interpolated_value = self.interpolation(self.current_animation_time, value_min, value_max, | |
self.key_frames[self.current_kf_idx].time, | |
self.key_frames[self.current_kf_idx + 1].time) | |
dist_value = interpolated_value - self.total_parameter[value_name] | |
if self.__is_end_of_key_frame(): | |
dist_value = self.max_parameter[value_name] - self.total_parameter[value_name] | |
self.total_parameter[value_name] = self.max_parameter[value_name] | |
else: | |
self.total_parameter[value_name] += dist_value | |
return dist_value | |
def __next_key(self): | |
""" | |
Change to next key frame in keyframe list | |
""" | |
self.current_kf_idx += 1 | |
if self.current_kf_idx >= len(self.key_frames)-1: | |
self.__next_loop() | |
def __next_loop(self): | |
if self.__should_loop(): | |
self.current_loop += 1 | |
self.current_kf_idx = 0 | |
self.current_animation_time = 0.0 | |
else: | |
self.__end_animation() | |
def __should_loop(self): | |
""" | |
Verify whether it should loop or not | |
""" | |
if self.loop: | |
if self.num_of_loops == "inf": | |
return True | |
else: | |
return self.current_loop >= self.num_of_loops | |
else: | |
return False | |
def __end_animation(self): | |
""" | |
end this animation | |
""" | |
self.animator.play_next_animation() | |
def __is_end_of_key_frame(self): | |
""" | |
Verify if it is the end of a keyframe | |
""" | |
return abs(self.current_animation_time - self.key_frames[self.current_kf_idx + 1].time)\ | |
< Time.delta_time(self.unscaled, self.game_object.time_scale) * (3 / 2) | |
def __set_interpolation(self, kind): | |
""" | |
Will define which interpolation will be made between the two points | |
linear function: just a linear interpolation between the two points | |
ease funcions: in_cubic, out_cubic, in_out_quint | |
You can see demos of how these ease interpolation happens: | |
http://easings.net/ | |
:param kind: the string specifing the interpolation | |
""" | |
if kind == "linear": | |
self.interpolation = self.__linear | |
elif kind == "in_cubic": | |
self.interpolation = self.__in_cubic | |
elif kind == "out_cubic": | |
self.interpolation = self.__out_cubic | |
elif kind == "in_out_quint": | |
self.interpolation = self.__in_out_quint | |
def __linear(self, t, value1, value2, t1, t2): | |
""" | |
Linear interpolation | |
Constant variation in time | |
:param t: current time | |
:param value1: function upper bounder result value | |
:param value2: function lower bounder result value | |
:param t1: function upper bounder time value | |
:param t2: function lower bounder time value | |
:return: the result value for that given time | |
""" | |
if t1 != t2 and abs(t - t1) > 0.01: | |
tn = (t - t1) / (t2 - t1) | |
fn = tn | |
f = (value2 - value1) * fn | |
else: | |
f = 0.0 | |
return f | |
def __in_cubic(self, t, value1, value2, t1, t2): | |
""" | |
Cubic with time | |
slow at the begging and fast at end | |
:param t: current time | |
:param value1: function upper bounder result value | |
:param value2: function lower bounder result value | |
:param t1: function upper bounder time value | |
:param t2: function lower bounder time value | |
:return: the result value for that given time | |
""" | |
if t1 != t2 and abs(t - t1) > 0.01: | |
tn = (t - t1) / (t2 - t1) | |
fn = (tn**3) | |
f = (value2 - value1) * fn | |
else: | |
f = 0.0 | |
return f | |
def __out_cubic(self, t, value1, value2, t1, t2): | |
""" | |
Cubic with time | |
fast at the begging and slow at end | |
:param t: current time | |
:param value1: function upper bounder result value | |
:param value2: function lower bounder result value | |
:param t1: function upper bounder time value | |
:param t2: function lower bounder time value | |
:return: the result value for that given time | |
""" | |
if t1 != t2 and abs(t - t1) > 0.01: | |
tn = (t - t1) / (t2 - t1) | |
fn = (1-(1-tn)**3) | |
f = (value2 - value1) * fn | |
else: | |
f = 0.0 | |
return f | |
def __in_out_quint(self, t, value1, value2, t1, t2): | |
""" | |
5th power with time | |
slow at the begging, fast at the middle and slow at end | |
:param t: current time | |
:param value1: function upper bounder result value | |
:param value2: function lower bounder result value | |
:param t1: function upper bounder time value | |
:param t2: function lower bounder time value | |
:return: the result value for that given time | |
""" | |
if t1 != t2 and abs(t - t1) > 0.01: | |
tn = (t - t1) / (t2 - t1) | |
fn = (tn*tn*tn*(tn*(6*tn-15)+10)) | |
f = (value2 - value1) * fn | |
else: | |
f = 0.0 | |
return f |
from game_engine.component import Component | |
class Animator(Component): | |
def __init__(self, game_object, animation_list): | |
""" | |
Initiate Animator with the animation list | |
:param game_object: the list of animations for this animator | |
""" | |
self.animation_list = animation_list | |
self.current_playing_animation = None | |
self.animation_idx = 0 | |
self.is_paused = False | |
for animation in self.animation_list: | |
animation.set_animator(self) | |
super().__init__(game_object) | |
def play(self, animation_idx=0, should_loop=False, loops="inf"): | |
""" | |
will play the list of animation in sequence | |
""" | |
self.should_loop = should_loop | |
self.loops = loops | |
self.current_loop = 0 | |
self.animation_idx = animation_idx | |
self.current_playing_animation = self.animation_list[self.animation_idx] | |
self.animation_list[self.animation_idx].reset() | |
def play_next_animation(self): | |
""" | |
Play the next animation on animation list | |
It checks before if it can play and if it should loop | |
""" | |
if self.animation_idx < len(self.animation_list)-1: | |
self.__next() | |
elif self.should_loop: | |
self.__loop() | |
else: | |
self.stop() | |
def pause(self): | |
""" | |
pause animation | |
""" | |
self.is_paused = True | |
def resume(self): | |
""" | |
Resume animation from where it stopped | |
""" | |
self.is_paused = False | |
def stop(self): | |
""" | |
stop playing animation | |
""" | |
self.current_playing_animation = None | |
def __next(self): | |
""" | |
play next animation | |
""" | |
self.animation_idx += 1 | |
self.current_playing_animation = self.animation_list[self.animation_idx] | |
self.animation_list[self.animation_idx].reset() | |
def __loop(self): | |
""" | |
loop in animation list | |
""" | |
if self.loops is "inf": | |
self.play() | |
else: | |
if self.loops > self.current_loop: | |
self.current_loop += 1 | |
self.play() | |
else: | |
self.stop() | |
def __update(self): | |
""" | |
This will run every frame | |
Will update the current animation | |
""" | |
if (self.current_playing_animation is not None) and (not self.is_paused): | |
self.current_playing_animation.update() |
from game_engine.collider import Collider | |
from game_engine.components.polygon_collider import PolygonCollider | |
from pygame.math import Vector2 | |
from game_engine.geometry import Geometry | |
class CircleCollider(Collider): | |
def __init__(self, game_object): | |
""" | |
initiate collider | |
:param game_object: The reference to the object that contains the collider | |
""" | |
super(CircleCollider, self).__init__(game_object) | |
Collider.add_collider(self) | |
def is_vertex_inside(self, point): | |
""" | |
Verify if a point is inside of the circle | |
:param point: the point to verify | |
:return: True if it is inside | |
""" | |
return Geometry.circle_point_intersection(self.get_center(), self.get_radius(), point) | |
def get_center(self): | |
""" | |
:return: Get the center point of the circle | |
""" | |
return self.transform.position | |
def get_radius(self): | |
""" | |
:return: Get the radius of the circle | |
""" | |
return self.game_object.circle_mesh.get_radius() | |
def get_main_points(self): | |
""" | |
The main points are the up, down, left and right extremities borders of the circle | |
:return: An array that contains each of the four points | |
""" | |
return [Vector2(self.game_object.transform.position.x - self.get_radius(), self.get_center().y), | |
Vector2(self.get_center().x, self.game_object.transform.position.y + self.get_radius()), | |
Vector2(self.game_object.transform.position.x + self.get_radius(), self.get_center().y), | |
Vector2(self.get_center().x, self.game_object.transform.position.y - self.get_radius())] | |
def on_collision(self): | |
""" | |
Check if the have occurred a collision between two colliders | |
loop on the collider list to check if this collider have collided with other | |
:return: True if collided | |
""" | |
for collider in Collider.collider_list: | |
if isinstance(collider, CircleCollider): | |
collided = self.__circle_collision(collider) | |
elif isinstance(collider, PolygonCollider): | |
collided = self.__polygon_collision(collider) | |
if collided: | |
return True, collider.game_object | |
return False, None | |
def __circle_collision(self, circle): | |
""" | |
Check a collision between this collider and a circle_collider | |
:param circle: the circle collider reference | |
:return: True if collided | |
""" | |
if circle == self: | |
return False | |
elif circle.get_center().distance_to(self.get_center()) > circle.get_radius() + self.get_radius(): | |
return False | |
else: | |
return True | |
def __polygon_collision(self, polygon): | |
""" | |
Check a collision between this collider and a polygon collider | |
:param circle: the polygon collider reference | |
:return: True if collided | |
""" | |
for vertex in polygon.get_point_list(): | |
if self.is_vertex_inside(vertex): | |
return True | |
for point in self.get_main_points(): | |
if polygon.is_vertex_inside(point): | |
return True | |
return False |
from game_engine.mesh import Mesh | |
class CircleMesh(Mesh): | |
def __init__(self, game_object, radius): | |
super(CircleMesh, self).__init__(game_object) | |
self.__radius = radius | |
def get_radius(self): | |
return self.__radius * max(self.transform.scale.x, self.transform.scale.y) | |
def get_unscaled_radius(self): | |
return self.__radius | |
def set_radius(self, radius): | |
self.__radius = radius |
from game_engine.components.physics import Physics | |
from game_engine.component import Component | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
import random | |
import math | |
class ParticleSystem(Component): | |
def __init__(self, | |
game_object, | |
spawn_game_obj_class, | |
layer=0, | |
quant=1, | |
quant_proport_to_len=False, | |
period=0.05, | |
vel_min=80, | |
vel_max=160, | |
duration=1.0, | |
gravity=0, | |
inherit_vel=False, | |
inherit_vel_mult=1, | |
spawn_prob="lin", | |
vel_prob="lin", | |
unscaled=False, | |
num_of_periods="inf" | |
): | |
super().__init__(game_object) | |
self.duration = duration | |
self.gravity = gravity | |
self.vel_min = vel_min | |
self.layer = layer | |
self.vel_max = vel_max | |
self.inherit_vel = inherit_vel | |
self.quant_proport_to_len = quant_proport_to_len | |
self.inherit_vel_mult = inherit_vel_mult | |
self.quant = quant | |
self.turned_on = False | |
self.period = period | |
self.spawn_game_obj_class = spawn_game_obj_class | |
self.last_time = Time.now() | |
self.ini_point_method = None | |
self.fin_point_method = None | |
self.generation_mode = None | |
self.obj_list = list() | |
self.spawn_prob = None | |
self.vel_prob = None | |
self.define_vel_prob(vel_prob) | |
self.define_spawn_prob(spawn_prob) | |
self.unscaled = unscaled | |
self.num_of_periods = num_of_periods | |
self.actual_period = 0 | |
if self.inherit_vel: | |
if self.game_object.physics is None: | |
self.game_object.physics = Physics(self.game_object) | |
def set_line_gen(self, ini_point_method, fin_point_method): | |
self.ini_point_method = ini_point_method | |
self.fin_point_method = fin_point_method | |
self.generation_mode = self.set_line_gen | |
def set_circ_gen(self, center_point, radius, mode="radial", ini_angle_met=None, fin_angle_met=None, direct_met=None): | |
if ini_angle_met is None: | |
self.ini_angle_met = self.default_ini_ang_met | |
else: | |
self.ini_angle_met = ini_angle_met | |
if fin_angle_met is None: | |
self.fin_angle_met = self.default_fin_ang_met | |
else: | |
self.fin_angle_met = fin_angle_met | |
self.direct_met = direct_met | |
self.center_point = center_point | |
self.radius = radius | |
self.mode = mode | |
self.generation_mode = self.set_circ_gen | |
def default_ini_ang_met(self): | |
return 0 | |
def default_fin_ang_met(self): | |
return 360 | |
def play(self): | |
self.turned_on = True | |
self.destroy_timer = Time.now() | |
self.last_time = Time.now() | |
def stop(self): | |
self.turned_on = False | |
def define_spawn_prob(self, spawn_prob): | |
if spawn_prob == "lin": | |
self.spawn_prob = self.__linear_prob_func | |
elif spawn_prob == "parab": | |
self.spawn_prob = self.__parabolic_spawn_prob_func | |
def define_vel_prob(self, vel_prob): | |
if vel_prob == "lin": | |
self.vel_prob = self.__lin_vel_prob_func | |
elif vel_prob == "parab": | |
self.vel_prob = self.__parabolic_vel_prob_func | |
def __update(self): | |
if self.turned_on: | |
if self.should_spawn(): | |
if self.quant_proport_to_len: | |
quant = math.ceil(self.fin_point_method().distance_to(self.ini_point_method()) * self.quant) | |
else: | |
quant = int(self.quant) | |
for i in range(quant): | |
self.spawn_particle() | |
self.destroy_first() | |
def spawn_particle(self): | |
spawn_location = None | |
obj_velocity_vect = None | |
spawn_prob = self.spawn_prob() | |
vel_prob = self.vel_prob(spawn_prob) | |
if self.generation_mode == self.set_line_gen: | |
spawn_location = (self.fin_point_method() - self.ini_point_method()) * spawn_prob + self.ini_point_method() | |
velocity = random.randint(int(self.vel_min * 1000), int(self.vel_max * 1000)) / 1000 | |
obj_velocity_vect = (self.fin_point_method() - self.ini_point_method()).normalize().rotate(90) * velocity | |
elif self.generation_mode == self.set_circ_gen: | |
normal = Vector2(1, 0).rotate( | |
(self.fin_angle_met() - self.ini_angle_met()) * spawn_prob + self.ini_angle_met()) | |
spawn_location = self.center_point + normal * self.radius | |
velocity = random.randint(int(self.vel_min * 1000), int(self.vel_max * 1000)) / 1000 | |
if self.mode == "radial": | |
obj_velocity_vect = normal * velocity * vel_prob | |
elif self.mode == "directional": | |
obj_velocity_vect = self.direct_met() * velocity * vel_prob | |
else: | |
raise Exception("Unknown mode {0}".format(str(self.mode))) | |
obj = self.spawn_game_obj_class(spawn_location) | |
if self.inherit_vel: | |
obj.physics = Physics(obj, velocity=( | |
obj_velocity_vect + self.game_object.physics.inst_velocity * self.inherit_vel_mult)) | |
else: | |
obj.physics = Physics(obj, velocity=(obj_velocity_vect)) | |
obj.physics.gravity = self.gravity | |
obj.transform.layer = self.layer | |
obj.set_creator_object(self.game_object) | |
obj.destroy_time = self.duration | |
if self.unscaled == True: | |
if obj.animator is not None: | |
for animation in obj.animator.animation_list: | |
animation.unscaled = self.unscaled | |
if obj.physics is not None: | |
obj.physics.unscaled = self.unscaled | |
self.obj_list.append(obj) | |
def destroy_first(self): | |
if Time.now() - self.destroy_timer > self.duration: | |
self.obj_list[0].destroy_me() | |
del self.obj_list[0] | |
def __circular_prob_func(self): | |
# line going from 0 to 1 | |
angle = random.randint(0, 180) | |
rad = angle * math.pi / 180 | |
if angle < 90: | |
return (1 - math.cos(rad)) / 2 | |
else: | |
return (1 - math.cos(rad)) / 2 | |
def __parabolic_spawn_prob_func(self): | |
x = random.randint(0, 100) | |
odd = random.randint(0, 100) | |
if odd < 50: | |
return ((-1)*math.sqrt(x/100)+1)/2 | |
else: | |
return (math.sqrt(x/100)+1)/2 | |
def __gauss_prob_func(self): | |
return max(min(random.gauss(0.5, 0.25), 1.0), 0.0) | |
def __lin_vel_prob_func(self, x): | |
return 1 | |
def __parabolic_vel_prob_func(self, x): | |
return 4 * (x - 0.5) * (x - 0.5) | |
def __linear_prob_func(self): | |
return random.randint(0, 1000)/1000.0 | |
def should_spawn(self): | |
if self.num_of_periods != "inf": | |
self.actual_period += 1 | |
if self.num_of_periods > self.actual_period: | |
return False | |
if (Time.now() - self.last_time) > self.period: | |
self.last_time = Time.now() | |
return True | |
else: | |
return False |
from game_engine.component import Component | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
class Physics(Component): | |
def __init__(self, game_object, mass=None, gravity=0, velocity=Vector2(0, 0), acceleration=Vector2(0, 0), | |
angular_velocity=0, angular_acceleration=0, unscaled=False): | |
super(Physics, self).__init__(game_object) | |
gravity *= 10 | |
self.mass = mass | |
self.velocity = velocity | |
self.acceleration = acceleration | |
self.angular_velocity = angular_velocity | |
self.angular_acceleration = angular_acceleration | |
self.unscaled = unscaled | |
self.gravity = gravity | |
self.inst_velocity = velocity | |
p = self.transform.position | |
t = Time.delta_time(self.unscaled, self.game_object.time_scale) | |
self.position_vect = [Vector2(p.x, p.y), Vector2(p.x, p.y), Vector2(p.x, p.y)] | |
self.time_vect = [t, t, t] | |
def get_inst_velocity(self): | |
return self.inst_velocity | |
def __update(self): | |
self.__update_velocity() | |
self.__update_position() | |
self.__update_angular_velocity() | |
self.__update_rotation() | |
self.__update_inst_velocity() | |
def __update_inst_velocity(self): | |
del self.time_vect[0] | |
self.time_vect.append(Time.delta_time(self.unscaled, self.game_object.time_scale)) | |
del self.position_vect[0] | |
self.position_vect.append(Vector2(self.transform.position.x, self.transform.position.y)) | |
dir = self.position_vect[2] - self.position_vect[0] | |
t = self.time_vect[0] + self.time_vect[1] + self.time_vect[2] | |
if t == 0: | |
self.inst_velocity = Vector2(0, 0) | |
else: | |
self.inst_velocity = dir / t | |
def __update_position(self): | |
new_position = Vector2(self.transform.position.x + (self.velocity.x * Time.delta_time(self.unscaled, self.game_object.time_scale)), | |
self.transform.position.y + (self.velocity.y * Time.delta_time(self.unscaled, self.game_object.time_scale))) | |
self.transform.translate(new_position) | |
def __update_velocity(self): | |
self.velocity.x = self.velocity.x + (self.acceleration.x * Time.delta_time(self.unscaled, self.game_object.time_scale)) | |
self.velocity.y = self.velocity.y + ((self.acceleration.y + self.gravity) * Time.delta_time(self.unscaled, self.game_object.time_scale)) | |
def __update_angular_velocity(self): | |
self.angular_velocity = self.angular_velocity + (self.angular_acceleration * Time.delta_time(self.unscaled, self.game_object.time_scale)) | |
def __update_rotation(self): | |
self.transform.rotate(self.angular_velocity * Time.delta_time(self.unscaled, self.game_object.time_scale)) |
from game_engine.collider import Collider | |
from game_engine.geometry import Geometry | |
class PolygonCollider(Collider): | |
def __init__(self, game_object): | |
""" | |
initiate collider | |
:param game_object: The reference to the object that contains the collider | |
""" | |
super(PolygonCollider, self).__init__(game_object) | |
Collider.add_collider(self) | |
def is_vertex_inside(self, point): | |
""" | |
Verify if a point is inside of the polygon | |
:param point: the point to verify | |
:return: True if it is inside | |
""" | |
return Geometry.polygon_point_intersection(self.get_point_list(), point) | |
def get_point_list(self): | |
""" | |
:return: Get the point list of a polygon | |
""" | |
return self.game_object.polygon_mesh.get_points() | |
def on_collision(self): | |
""" | |
Check if the have occurred a collision between two colliders | |
print("testing ", p1, "-", p2, " with point ", point) loop on the collider list to check if this collider have collided with other | |
:return: True if collided | |
""" | |
def __circle_collision(self, circle): | |
""" | |
Check a collision between this collider and a circle_collider | |
:param circle: the circle collider reference | |
:return: True if collided | |
""" | |
raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---') | |
def __polygon_collision(self, polygon): | |
""" | |
Check a collision between this collider and a polygon_collider | |
:param circle: the circle collider reference | |
:return: True if collided | |
""" | |
raise Exception('--- This methods have not been implemented yet! Use circle_collider instead ---') |
from game_engine.mesh import Mesh | |
from pygame.math import Vector2 | |
class PolygonMesh(Mesh): | |
def __init__(self, game_object): | |
super(PolygonMesh, self).__init__(game_object) | |
self.__point_list = None | |
self.__set_points_up() | |
self.__geometric_center = None | |
self.__update_geometric_center() | |
self.__scaled_point_list = None | |
self.__update_scaled_point_list() | |
def get_points(self): | |
""" | |
Get the list of points that defines the polygon | |
:return: the points list (it is a list of Vector2) | |
""" | |
return self.__scaled_point_list | |
def get_unscaled_points(self): | |
""" | |
Get the unscaled points list | |
:return: the unscaled point_list | |
""" | |
return self.__point_list | |
def __set_points_up(self): | |
""" | |
Set the point list with the game_object's _point_list parameter | |
If it is not defined it will rise an exception, | |
because it is a requisite to the polygon mesh to have defined the _get_points | |
""" | |
if self.game_object._get_points() is not None: | |
self.__point_list = self.game_object._get_points() | |
else: | |
raise Exception("GameObject {0} has a polygon_mesh, but has not a _get_points method!" | |
.format(type(self.game_object).__name__)) | |
def __start(self): | |
""" | |
Start the mesh parameters | |
""" | |
self.__point_list = self.game_object._get_points() | |
self.__update_geometric_center() | |
self.__update_scaled_point_list() | |
def __update(self): | |
""" | |
Update the mesh parameters | |
""" | |
self.__point_list = self.game_object._get_points() | |
self.__update_geometric_center() | |
self.__update_scaled_point_list() | |
def __update_geometric_center(self): | |
""" | |
Update the geometric center based on new pointers position | |
""" | |
self.__geometric_center = Vector2(0, 0) | |
for point in self.__point_list: | |
self.__geometric_center += point | |
self.__geometric_center /= len(self.__point_list) | |
def __update_scaled_point_list(self): | |
""" | |
Update the scaled points list with the new position and new scale | |
""" | |
self.__scaled_point_list = list() | |
for point in self.__point_list: | |
point_x = ((point.x - self.__geometric_center.x) * self.transform.scale.x) + self.__geometric_center.x | |
point_y = ((point.y - self.__geometric_center.y) * self.transform.scale.y) + self.__geometric_center.y | |
self.__scaled_point_list.append(Vector2(point_x, point_y)) |
from game_engine.mesh import Mesh | |
class TextMesh(Mesh): | |
def __init__(self, game_object, message, size, font): | |
super(TextMesh, self).__init__(game_object) | |
self.message = message | |
self.size = size | |
self.font = font | |
self.label = self.font.render(self.message, 1, self.get_material().color) | |
def __update(self): | |
""" | |
update label with new message or color | |
""" | |
self.label = self.font.render(self.message, 1, self.get_material().color) |
from game_engine.component import Component | |
class Transform(Component): | |
def __init__(self, game_object, position, rotation, scale, layer): | |
""" | |
Set the initial parameters | |
:param position.x: game_object's x initial position | |
:param position.y: game_object's y initial position | |
:param rotation: game_object's initial rotation in degrees | |
:param scale.x: game_object's x initial scale | |
:param scale.y: game_object's y initial scale | |
:param layer: the layer in the order of screen | |
""" | |
super(Transform, self).__init__(game_object) | |
self.position = position | |
self.rotation = rotation | |
self.scale = scale | |
self.layer = layer | |
def translate(self, new_position): | |
""" | |
Set the new position of the game_object (Vector2) | |
:param new_position: where the game_object will go to | |
""" | |
self.position.x = new_position.x | |
self.position.y = new_position.y | |
def rotate(self, rotation): | |
""" | |
Assuming the game_object is a polygon (Does not make sense for a circle to be rotated) | |
:param rotation: | |
:return: | |
""" | |
self.rotation += rotation |
import pygame | |
from pygame import gfxdraw | |
from .color import Color | |
class Draw: | |
game_display = None | |
screen_width = 0 | |
screen_height = 0 | |
@classmethod | |
def set_game_display(cls, screen, screen_width, screen_height): | |
""" | |
set the screen reference | |
:param screen: the pygame's screen object | |
:param screen_width: | |
:param screen_height: | |
""" | |
cls.game_display = screen | |
cls.screen_width = screen_width | |
cls.screen_height = screen_height | |
@classmethod | |
def update_background(cls): | |
""" | |
fill all screen with black at the begging of each frame | |
""" | |
cls.game_display.fill(Color.black) | |
@classmethod | |
def circle(cls, position, radius, color, alpha): | |
""" | |
Draw a circle | |
:param position: circle's position | |
:param radius: circle's radius | |
:param color: circle's color | |
:param alpha: the opacity of the draw | |
""" | |
if alpha < 0: | |
alpha = 0 | |
pygame.gfxdraw.filled_circle(cls.game_display, int(position.x), int(position.y), int(radius), (color[0], color[1], color[2], alpha)) | |
@classmethod | |
def polygon(cls, color, point_list, alpha): | |
""" | |
Draw a polygon | |
:param color: the color of the polygon | |
:param point_list: the list of points that defines the polygon | |
:param alpha: the opacity of the draw | |
""" | |
if alpha < 0: | |
alpha = 0 | |
pygame.gfxdraw.filled_polygon(cls.game_display, point_list, (color[0], color[1], color[2], alpha)) | |
@classmethod | |
def text(cls, position_x, position_y, label, alpha=255): | |
""" | |
Draws text | |
:param position_x: text's x position | |
:param position_y: text's y position | |
:param label: its the pygame label necessary to draw the text | |
:param alpha: the opacity of the draw | |
""" | |
if alpha != 255: | |
if alpha < 0: | |
alpha = 0 | |
alpha_img = pygame.Surface(label.get_rect().size, pygame.SRCALPHA) | |
alpha_img.fill((255, 255, 255, alpha)) | |
label.blit(alpha_img, (0, 0), special_flags=pygame.BLEND_RGBA_MULT) | |
cls.game_display.blit(label, (position_x, position_y)) |
import pygame | |
from .draw import Draw | |
from game_engine.scene import Scene | |
from game_engine.time import Time | |
from game_engine.input import Input | |
class Engine: | |
screen_width = 240 | |
screen_height = 426 | |
game_name = "Untitled" | |
game_display = None | |
scenes = None | |
@classmethod | |
def start_game(cls, game_settings): | |
""" | |
Start the game coroutine with pygame | |
:param game_settings: settings of the game | |
""" | |
cls.set_game_settings(game_settings) | |
Time.start_coroutine(cls.game) | |
Time.start_game() | |
@classmethod | |
def set_game_settings(cls, game_settings): | |
""" | |
set up some game settings on engine | |
:param game_settings: settings of the game | |
""" | |
if hasattr(game_settings, "scenes_list"): | |
cls.scenes = game_settings.scenes_list | |
else: | |
raise Exception("No scenes_list in game_settings file!") | |
if hasattr(game_settings, "game_name"): | |
cls.game_name = game_settings.game_name | |
if hasattr(game_settings, "screen_width"): | |
cls.screen_width = game_settings.screen_width | |
if hasattr(game_settings, "screen_height"): | |
cls.screen_height = game_settings.screen_height | |
@classmethod | |
async def game(cls): | |
""" | |
Async method that will be the coroutine where the game will run in | |
""" | |
pygame.mixer.pre_init(44100, -16, 1, 512) | |
pygame.init() | |
cls.game_display = pygame.display.set_mode((cls.screen_width, cls.screen_height)) | |
pygame.display.set_caption(cls.game_name) | |
Scene.scenes_list = cls.scenes | |
Draw.set_game_display(cls.game_display, cls.screen_width, cls.screen_height) | |
Input.set_engine_reference(cls) | |
Scene.start_first_scene() | |
@classmethod | |
def end_game(cls): | |
""" | |
Quits the game | |
""" | |
pygame.quit() | |
quit() |
from .draw import Draw | |
from .components.transform import Transform | |
from .scene import Scene | |
from pygame.math import Vector2 | |
class GameObject: | |
current_running_scene = 0 | |
def __init__(self, position=Vector2(0, 0), rotation=0, scale=Vector2(1, 1), layer=0): | |
""" | |
set basics mesh_objects parameters | |
:param position.x: game_object's x initial position | |
:param position.y: game_object's y initial position | |
:param rotation: game_object's initial rotation in degrees | |
:param scale.x: game_object's x initial scale | |
:param scale.y: game_object's y initial scale | |
:param layer: the layer in the order of screen | |
""" | |
self.transform = 0 | |
self.transform = Transform(self, position, rotation, scale, layer) | |
self.transform.transform = self.transform | |
self.tag = None | |
self.animator = None | |
self.animation = None | |
self.material = None | |
self.physics = None | |
self.polygon_mesh = None | |
self.particle_system = None | |
self.circle_mesh = None | |
self.text_mesh = None | |
self.collidable = True | |
self.box_collider = None | |
self.circle_collider = None | |
self.polygon_collider = None | |
self.time_scale = 1 | |
self.__instantiate(self) | |
def awake(self): | |
""" | |
Will be called just once when the GameObject is instantiate on scene and will be called before start | |
""" | |
pass | |
def start(self): | |
""" | |
Will be called just once when the GameObject is instantiate on scene | |
""" | |
pass | |
def update(self): | |
""" | |
Will be call every frame | |
""" | |
pass | |
def protected_start(self): | |
if self.polygon_mesh is not None: | |
self.polygon_mesh._PolygonMesh__start() | |
def protected_update(self): | |
""" | |
This method will run every frame, but it is not intended to be implemented inside a game_object | |
""" | |
if self.animator is not None: | |
self.animator._Animator__update() | |
if self.physics is not None: | |
self.physics._Physics__update() | |
if self.polygon_mesh is not None: | |
self.polygon_mesh._PolygonMesh__update() | |
if self.text_mesh is not None: | |
self.text_mesh._TextMesh__update() | |
if self.particle_system is not None: | |
self.particle_system._ParticleSystem__update() | |
def draw_game_object(self): | |
""" | |
Draw the game_object on screen | |
""" | |
if self.polygon_mesh is not None: | |
Draw.polygon(self.material.color, self.polygon_mesh.get_points(), self.material.alpha) | |
elif self.circle_mesh is not None: | |
Draw.circle(self.transform.position, self.circle_mesh.get_radius(), self.material.color, self.material.alpha) | |
elif self.text_mesh is not None: | |
Draw.text(self.transform.position.x, self.transform.position.y, self.text_mesh.label, self.material.alpha) | |
def _get_points(self): | |
return None | |
def destroy_me(self): | |
GameObject.destroy(self) | |
@classmethod | |
def find_by_type(cls, game_object_type_string): | |
""" | |
Find all the mesh_objects of that type in the current running scene | |
:param game_object_type_string: a string with the game_object type(Class) | |
:return: a list with all the mesh_objects of that type | |
""" | |
return Scene.current_running_scene.find_game_object_by_type(game_object_type_string) | |
@classmethod | |
def find_by_tag(cls, game_object_tag_string): | |
""" | |
Find all the mesh_objects with that tag in the current running scene | |
:param game_object_tag_string: the tag name | |
:return: a list with all game_object in the scene with that tag | |
""" | |
return Scene.current_running_scene.find_game_object_by_tag(game_object_tag_string) | |
@classmethod | |
def __instantiate(cls, game_object): | |
""" | |
Instantiate a new game_object on scene | |
:param game_object: game_object to be instantiated | |
""" | |
Scene.current_running_scene.add_game_object(game_object) | |
@classmethod | |
def destroy(cls, game_object): | |
""" | |
Destroy the game_object, remove it from scene | |
:param game_object: the game_object to be removed (Can be a list) | |
""" | |
if isinstance(game_object, list) or isinstance(game_object, tuple): | |
for game_obj in game_object: | |
Scene.current_running_scene.remove_game_object(game_obj) | |
else: | |
Scene.current_running_scene.remove_game_object(game_object) |
import math | |
from pygame.math import Vector2 | |
class Geometry: | |
@classmethod | |
def polygon_point_intersection(cls, point_list, point): | |
""" | |
:param point_list: Reference to polygon object | |
:param point: Reference to point object | |
:return: true if point is inside polygon | |
""" | |
n = len(point_list) | |
inside = False | |
x,y = point.x, point.y | |
p1x, p1y = point_list[0] | |
for i in range(n + 1): | |
p2x, p2y = point_list[i % n] | |
if y > min(p1y, p2y): | |
if y <= max(p1y, p2y): | |
if x <= max(p1x, p2x): | |
if p1y != p2y: | |
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x | |
if p1x == p2x or x <= xints: | |
inside = not inside | |
p1x, p1y = p2x, p2y | |
return inside | |
@classmethod | |
def circle_point_intersection(cls, circle_center, circle_radius, point): | |
return point.distance_to(circle_center) <= circle_radius | |
@classmethod | |
def line_point_intersection(cls, segment, point): | |
p1x, p1y = segment[0] | |
p2x, p2y = segment[1] | |
valor = (p2.y - p1.y)*point.x + (p1.x - p2.x)*point.y - p1.x*(p2.y - p1.y) - p1.y*(p1.x - p2.x) | |
return (p2.y - p1.y) * valor < 0 and ((p1.y <= point.y < p2.y) or (p2.y <= point.y < p1.y)) | |
@classmethod | |
def inside_bounding_box(cls, point_list, point): | |
xmax = -100000 | |
xmin = 100000 | |
ymax = -100000 | |
ymin = 100000 | |
for p in point_list: | |
xmax = max(p.x, xmax) | |
xmin = min(p.x, xmin) | |
ymax = max(p.y, ymax) | |
ymin = min(p.y, ymin) | |
return xmin <= point.x < xmax and ymin <= point.y < ymax | |
@classmethod | |
def rotate_point(cls, pivot, point, angle): | |
cx, cy = pivot.x, pivot.y | |
px, py = point.x, point.y | |
px -= cx | |
py -= cy | |
pxnew = px * math.cos(angle) - py * math.sin(angle) | |
pynew = px * math.sin(angle) + py * math.cos(angle) | |
px = pxnew + cx | |
py = pynew + cy | |
return Vector2(px, py) |
import pygame | |
class Input: | |
engine = None | |
is_pressing_left = False | |
is_pressing_right = False | |
is_pressing_space = False | |
press_left_down = False | |
press_right_down = False | |
press_space_down = False | |
@classmethod | |
def update_input(cls, events): | |
""" | |
find on the events list all events to update the input | |
:param events: events list from pygame queue | |
""" | |
cls.reset_keys() | |
for event in events: | |
if event.type == pygame.KEYDOWN: | |
cls.__key_down(event) | |
elif event.type == pygame.KEYUP: | |
cls.__key_up(event) | |
elif event.type == pygame.QUIT: | |
cls.__quit_game() | |
@classmethod | |
def set_engine_reference(cls, class_ref): | |
""" | |
Set the reference of engine class to this class | |
:param class_ref: The reference to engine class | |
""" | |
cls.engine = class_ref | |
@classmethod | |
def reset_keys(cls): | |
""" | |
Press key down reset | |
""" | |
cls.press_left_down = False | |
cls.press_right_down = False | |
cls.press_space_down = False | |
@classmethod | |
def __key_down(cls, event): | |
""" | |
player started to press a key | |
set it to true | |
:param event: keydown event | |
""" | |
if event.key == pygame.K_LEFT: | |
cls.is_pressing_left = True | |
cls.press_left_down = True | |
if event.key == pygame.K_RIGHT: | |
cls.is_pressing_right = True | |
cls.press_right_down = True | |
if event.key == pygame.K_SPACE: | |
cls.is_pressing_space = True | |
cls.press_space_down = True | |
@classmethod | |
def __key_up(cls, event): | |
""" | |
player stopped to press a key | |
set it to false | |
:param event: keyup event | |
""" | |
if event.key == pygame.K_LEFT: | |
cls.is_pressing_left = False | |
if event.key == pygame.K_RIGHT: | |
cls.is_pressing_right = False | |
if event.key == pygame.K_SPACE: | |
cls.is_pressing_space = False | |
@classmethod | |
def __quit_game(cls): | |
""" | |
if pressed quit key | |
call the engine's method to quit | |
""" | |
cls.engine.end_game() |
class KeyFrame: | |
def __init__(self, time, position=None, rotation=None, scale=None, layer=None, alpha=None, interpolation="in_cubic"): | |
""" | |
Define one key_frame that will compound a list of key_frames that will be passed to an animation | |
:param time: | |
:param position: | |
:param rotation: | |
:param scale: | |
:param layer: | |
:param alpha: | |
:param interpolation: | |
""" | |
self.position = position | |
self.rotation = rotation | |
self.scale = scale | |
self.layer = layer | |
self.alpha = alpha | |
self.time = time | |
self.interpolation = interpolation |
from game_engine.color import Color | |
class Material: | |
def __init__(self, color=Color.white, alpha=255): | |
""" | |
set initial parameters | |
:param color: material color | |
""" | |
self.color = color | |
self.alpha = alpha |
from game_engine.component import Component | |
class Mesh(Component): | |
def __init__(self, game_object): | |
super(Mesh, self).__init__(game_object) | |
self.check_material() | |
def check_material(self): | |
""" | |
check whether the game_object material was defined before create the mesh | |
""" | |
if self.game_object.material is None: | |
raise Exception("GameObject {0} must have a material defined in order to have a Mesh" | |
.format(type(self.game_object).name)) | |
def get_material(self): | |
""" | |
get the material of this mesh | |
:return: the game_object material | |
""" | |
return self.game_object.material |
import pygame | |
from .collider import Collider | |
from pygame.math import Vector2 | |
from .time import Time | |
from .input import Input | |
from .draw import Draw | |
class Scene: | |
current_running_scene_index = 0 | |
current_running_scene = 0 | |
changing_scene = True | |
scenes_list = [] | |
def __init__(self, init_game_objects_controllers_reference_list): | |
""" | |
Set object's variables to start a new scene | |
:param init_game_objects_controllers_reference_list: list of all mesh_objects of the scene | |
""" | |
Scene.changing_scene = True | |
init_game_objects_list = [] | |
self.init_game_objects_list = init_game_objects_list | |
self.game_objects = [] | |
self.frame_events = [] | |
if Scene.current_running_scene == 0: | |
Scene.current_running_scene = self | |
for reference in init_game_objects_controllers_reference_list: | |
init_game_objects_list.append(reference(Vector2(0, 0), 0, Vector2(0, 0), 0)) | |
self.run_on_next_frame_list = list() | |
self.should_end_scene = False | |
Scene.changing_scene = False | |
def start(self): | |
""" | |
Run methods to set the scene up | |
""" | |
Draw.update_background() | |
self.should_end_scene = False | |
self.game_objects = list() | |
for game_object in self.init_game_objects_list: | |
self.game_objects.append(game_object) | |
self.run_events() | |
self.run_all_awake() | |
self.run_all_starts() | |
pygame.display.flip() | |
Time.end_of_start() | |
def run_all_awake(self): | |
""" | |
Run the awake method of each game_object | |
""" | |
for game_object in self.init_game_objects_list: | |
game_object.awake() | |
def run_all_starts(self): | |
""" | |
Run the start method of each game_object | |
""" | |
for game_object in self.init_game_objects_list: | |
game_object.start() | |
def run_all_updates(self): | |
""" | |
Runs the update of each game_object of the scene | |
""" | |
for game_object in self.game_objects: | |
game_object.protected_update() | |
game_object.update() | |
def run_next_frame_list(self): | |
for method in self.run_on_next_frame_list: | |
method() | |
self.run_on_next_frame_list = list() | |
def draw_all_game_objects(self): | |
""" | |
Sort the mesh_objects list based on layer and then | |
run draw method of each game_object of the scene | |
""" | |
self.game_objects.sort(key=lambda game_object: game_object.transform.layer) | |
for game_object in self.game_objects: | |
game_object.draw_game_object() | |
def scene_loop(self): | |
""" | |
Defines the main loop of the scene | |
The scene occurs while in the loop | |
""" | |
while not self.should_end_scene: | |
Draw.update_background() | |
self.run_next_frame_list() | |
self.run_events() | |
self.run_all_updates() | |
self.draw_all_game_objects() | |
pygame.display.flip() | |
self.run_debugs() | |
Time.end_of_loop() | |
self.exit_scene() | |
def add_game_object(self, game_object): | |
""" | |
Add a new game object to the scene's mesh_objects list | |
:param game_object: new game_object to add to scene | |
""" | |
self.game_objects = [game_object] + self.game_objects | |
if not Scene.changing_scene: | |
self.run_on_next_frame_list.append(game_object.awake) | |
self.run_on_next_frame_list.append(game_object.start) | |
def remove_game_object(self, game_object): | |
""" | |
Remove a game_object if it is on game_object list | |
:param game_object: the game_object to be removed | |
""" | |
if game_object in self.game_objects: | |
self.game_objects.remove(game_object) | |
Collider.remove(game_object) | |
def find_game_object_by_type(self, type_of_game_obj): | |
""" | |
Return a list with all game object in the current scene that | |
is instance of the class type_of_game_obj | |
:param type_of_game_obj: the name of the class of the game object that you wat to find | |
:return: a list of mesh_objects of that type | |
""" | |
return_list = [] | |
for game_object in self.game_objects: | |
if self.get_type_str(game_object) == type_of_game_obj: | |
return_list.append(game_object) | |
return return_list | |
def get_type_str(self, object): | |
strings = str(type(object))[::-1].split(".")[0][::-1] | |
type_string = strings.split("'")[0] | |
return type_string | |
def find_game_object_by_tag(self, tag): | |
""" | |
Return a list with all game object in the current scene that | |
has a tag string equals to the tag you want | |
:param tag: the tag of the mesh_objects you want | |
:return: a list with the game object with that tag | |
""" | |
return_list = [] | |
for game_object in self.game_objects: | |
if game_object.tag == tag: | |
return_list.append(game_object) | |
return return_list | |
def run_events(self): | |
""" | |
get the events in pygame queue | |
and run the methods related to them | |
""" | |
self.frame_events = pygame.event.get() | |
Input.update_input(self.frame_events) | |
def debug_event(self): | |
""" | |
DEBUG print all the events of each frame | |
""" | |
for event in self.frame_events: | |
print(event) | |
def debug_fps(self): | |
""" | |
DEBUG: print the game fps each frame | |
""" | |
print(Time.clock.get_fps()) | |
def debug_objs_list(self): | |
""" | |
DEBUG print the game_object list each frame | |
""" | |
object_list = [] | |
for game_object in self.game_objects: | |
object_list.append(self.get_type_str(game_object)) | |
print(object_list) | |
def debug_objs_len(self): | |
""" | |
DEBUG print the number of game_object each frame | |
""" | |
print(len(self.game_objects)) | |
def end_scene(self): | |
""" | |
Set the variable to stop scene loop | |
""" | |
self.should_end_scene = True | |
def exit_scene(self): | |
""" | |
empty the mesh_objects and the collider list and start next scene | |
""" | |
self.game_objects = [] | |
Collider.collider_list = [] | |
Scene.current_running_scene = Scene.scenes_list[Scene.current_running_scene_index]() | |
Scene.start_next_scene() | |
@classmethod | |
def start_first_scene(cls): | |
""" | |
Start the first scene | |
""" | |
cls.current_running_scene = cls.scenes_list[0]() | |
cls.current_running_scene_index = 0 | |
cls.current_running_scene.start() | |
cls.current_running_scene.scene_loop() | |
@classmethod | |
def change_scene(cls, scene_index): | |
""" | |
End the current scene to start the next scene | |
:param scene_index: the index on scene_list of the next scene | |
""" | |
cls.current_running_scene.end_scene() | |
cls.current_running_scene_index = scene_index | |
@classmethod | |
def start_next_scene(cls): | |
""" | |
Start next scene | |
""" | |
cls.current_running_scene.start() | |
cls.current_running_scene.scene_loop() | |
def run_debugs(self): | |
""" | |
DEBUG: Run debugs in scene | |
They Are commented by default | |
Only uncomment them to debug | |
""" | |
# self.debug_objs_len() | |
# self.debug_objs_list() | |
# self.debug_event() | |
# self.debug_fps() |
import pygame | |
import asyncio | |
class Time: | |
ioloop = asyncio.get_event_loop() | |
last_frame_tick = pygame.time.get_ticks() | |
clock = pygame.time.Clock() | |
tasks = [] | |
time_scale = 1.0 | |
@classmethod | |
def start_game(cls): | |
""" | |
Run the coroutine tasks list | |
""" | |
cls.ioloop.run_until_complete(asyncio.wait(cls.tasks)) | |
cls.ioloop.close() | |
@classmethod | |
def start_coroutine(cls, method): | |
""" | |
Add a method to be run simultaneously along the game | |
:param method: The async method that will be added to the task list | |
""" | |
cls.tasks.append(cls.ioloop.create_task(method())) | |
@classmethod | |
def end_of_start(cls): | |
""" | |
Set the time at the moment to get_ticks_last_frame | |
""" | |
cls.last_frame_tick = pygame.time.get_ticks() | |
cls.clock.tick(144) | |
@classmethod | |
def end_of_loop(cls): | |
""" | |
Set the time at the moment to get_ticks_last_frame | |
""" | |
cls.last_frame_tick = pygame.time.get_ticks() | |
cls.clock.tick(144) | |
@classmethod | |
def delta_time(cls, unscaled=False, time_scale=1): | |
""" | |
:return: the duration of that frame in seconds | |
""" | |
if cls.clock.get_fps() != 0: | |
if unscaled: | |
return 1 / cls.clock.get_fps()*time_scale | |
else: | |
return (1/cls.clock.get_fps())*cls.time_scale | |
else: | |
return 0 | |
@classmethod | |
def now(cls): | |
""" | |
:return: the time right now in seconds, based on how long the game is running | |
""" | |
return pygame.time.get_ticks()/1000 |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.color import Color | |
from game_engine.material import Material | |
from random import randint as rand | |
class BackgroundParticlesController(GameObject): | |
def start(self): | |
self.first_layer_velocity = 200 | |
self.second_layer_velocity = 100 | |
self.first_layer = [] | |
self.second_layer = [] | |
self.generate_particles() | |
def update(self): | |
for obstacle in self.first_layer: | |
if obstacle.transform.position.y > Constants.screen_height: | |
obstacle.transform.position = Vector2(rand(0, Constants.screen_width), 0) | |
else: | |
self.fall(obstacle, self.first_layer_velocity) | |
for obstacle in self.second_layer: | |
if obstacle.transform.position.y > Constants.screen_height: | |
obstacle.transform.position = Vector2(rand(0, Constants.screen_width), 0) | |
else: | |
self.fall(obstacle, self.second_layer_velocity) | |
def fall(self, obstacle, fall_velocity): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ fall_velocity * Time.delta_time()) | |
def generate_particles(self): | |
for i in range(5): | |
rect = Rectangle(Vector2(rand(0, Constants.screen_width), rand(0, Constants.screen_height)), | |
Vector2(0.007 * Constants.screen_width, 0.007 * Constants.screen_width), | |
Material(Color.silver), -3) | |
rect.polygon_collider = [] | |
rect.collidable = False | |
self.first_layer.append(rect) | |
for i in range(5): | |
rect = Rectangle(Vector2(rand(0, Constants.screen_width), rand(0, Constants.screen_height)), | |
Vector2(0.007 * Constants.screen_width, 0.007 * Constants.screen_width), | |
Material(Color.gray), -3) | |
rect.polygon_collider = [] | |
rect.collidable = False | |
self.second_layer.append(rect) |
from pygame.math import Vector2 | |
from pygame import mixer | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from random import uniform as randfloat | |
from game.game_objects.mesh_objects.invencible_circle import InvencibleCircle | |
from game_engine.material import Material | |
from game_engine.basic_objects.text import Text | |
from game_engine.color import Color | |
from game.scripts.constants import Constants | |
from game.animations.text_up_fade_out_animation import TextUpFadeOutAnimation | |
from game_engine.components.animator import Animator | |
class InvenciblePowerUpController(GameObject): | |
def start(self): | |
self.fall_velocity = 150 | |
self.radius = Constants.screen_width * 0.025 | |
self.game_object_list = [] | |
self.sound_collect = mixer.Sound('game/assets/soundtrack/powerup_collect_01.ogg') | |
self.time_of_last_invencibily = -1000 | |
self.invecible_time = 3.5 | |
self.current_color = "normal" | |
self.animation_ticks_times = [0.4, 0.5, 0.6, 0.7, 0.75, 0.80, 0.85, 0.90, 0.95, 1.00, 1.10] | |
self.current_animation_tick_index = 0 | |
self.should_delete_power_up_text = False | |
self.power_up_text_gen_time = 0.0 | |
def awake(self): | |
self.player_controller = GameObject.find_by_type("PlayerController")[0] | |
def update(self): | |
if Time.time_scale == 0.0: | |
#Paused game. Adjust timers | |
self.time_of_last_invencibily += Time.delta_time(True) | |
difference_time = Time.now() - self.time_of_last_invencibily | |
if difference_time > self.invecible_time: | |
for i in range(2): | |
self.player_controller.game_object_list[i].is_invencible = False | |
self.get_back_to_original_colors() | |
self.current_animation_tick_index = 0 | |
else: | |
value = min(difference_time / self.invecible_time, 1) # Just to convert between 0 and 1 | |
diff = abs(value - self.animation_ticks_times[self.current_animation_tick_index]) | |
if(diff < 0.01): | |
self.current_animation_tick_index += 1 | |
self.tick_colors() | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
self.delete_power_up_text() | |
def fall(self, obstacle): | |
obstacle.transform.position.y = obstacle.transform.position.y + (self.fall_velocity * Time.delta_time()) | |
def get_power_up(self): | |
self.sound_collect.play() | |
power_up = self.game_object_list[0] | |
#Power up text effect | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
text_size = 15 | |
power_up_text = Text(power_up.transform.position, "INVENCIBLE!", Material(Color.purple, alpha=255), text_size, font_path) | |
power_up_text.transform.position.x -= power_up_text.text_mesh.size | |
power_up_text.animation = TextUpFadeOutAnimation(power_up_text) | |
power_up_text.animator = Animator(power_up_text, [power_up_text.animation]) | |
power_up_text.animator.play() | |
for i in range(2): | |
self.player_controller.game_object_list[i].is_invencible = True | |
self.change_colors_to_green() | |
self.time_of_last_invencibily = Time.now() | |
self.power_up_text = power_up_text | |
self.should_delete_power_up_text = True | |
def delete_power_up_text(self): | |
if self.should_delete_power_up_text: | |
if Time.now() - self.time_of_last_invencibily > 1.0: | |
self.should_delete_power_up_text = False | |
self.power_up_text.destroy_me() | |
def generate_obstacle(self): | |
random_pos = int(randfloat(self.radius + Constants.circCenter_x - Constants.circRadius, | |
Constants.screen_width - | |
(self.radius + Constants.circCenter_x - Constants.circRadius))) | |
circle = InvencibleCircle(Vector2(random_pos, -2 * self.radius), self.radius, | |
Material(Color.purple)) | |
self.game_object_list.append(circle) | |
def tick_colors(self): | |
if(self.current_color == "normal"): | |
self.current_color = "green" | |
self.change_colors_to_green() | |
else: | |
self.current_color = "normal" | |
self.get_back_to_original_colors() | |
def get_back_to_original_colors(self): | |
self.player_controller.game_object_list[0].change_color(Color.orange) | |
self.player_controller.game_object_list[1].change_color(Color.blue) | |
def change_colors_to_green(self): | |
for i in range(2): | |
self.player_controller.game_object_list[i].change_color(Color.purple) |
from pygame.math import Vector2 | |
from game.game_objects.mesh_objects.star import Star | |
from pygame import mixer | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from random import uniform as randfloat | |
from game_engine.basic_objects.text import Text | |
from game_engine.material import Material | |
from game_engine.color import Color | |
from game.scripts.constants import Constants | |
from game.animations.text_up_fade_out_animation import TextUpFadeOutAnimation | |
from game_engine.components.animator import Animator | |
class StarScoreController(GameObject): | |
def start(self): | |
self.fall_velocity = 150 | |
self.angular_speed = 0 | |
self.game_object_list = [] | |
self.size = Constants.screen_width * 0.025 | |
self.points_per_star = 50 | |
self.sound_collect = mixer.Sound('game/assets/soundtrack/star_collect_01.ogg') | |
self.should_delete_plus_score_text = False | |
self.plus_score_text_gen_time = 0.0 | |
def awake(self): | |
self.score_controller = GameObject.find_by_type("ScoreController")[0] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
self.delete_plus_score_text() | |
def fall(self, obstacle): | |
obstacle.fall(self.fall_velocity * Time.delta_time(), self.angular_speed * Time.delta_time()) | |
def get_star(self): | |
self.sound_collect.play() | |
obstacle = self.game_object_list[0] | |
#plus score effect | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
plus_score = Text(obstacle.transform.position, "+50", Material(Color.white, alpha=255), 15, font_path) | |
plus_score.transform.position.x -= plus_score.text_mesh.size | |
plus_score.animation = TextUpFadeOutAnimation(plus_score) | |
plus_score.animator = Animator(plus_score, [plus_score.animation]) | |
plus_score.animator.play() | |
self.time_of_last_plus_score = Time.now() | |
self.plus_score = plus_score | |
self.should_delete_plus_score_text = True | |
self.score_controller.score += self.points_per_star | |
def delete_plus_score_text(self): | |
if self.should_delete_plus_score_text: | |
if Time.now() - self.time_of_last_plus_score > 1.0: | |
self.should_delete_plus_score_text = False | |
self.plus_score.destroy_me() | |
def generate_obstacle(self): | |
random_pos = int(randfloat(self.size / 2 + Constants.circCenter_x - Constants.circRadius, | |
Constants.screen_width - | |
(self.size / 2 + Constants.circCenter_x - Constants.circRadius))) | |
star = Star(Vector2(random_pos, -self.size), self.size, | |
Material(Color.yellow)) | |
self.game_object_list.append(star) |
from game_engine.time import Time | |
from random import randint as rand | |
from pygame.math import Vector2 | |
#Controllers | |
from game.game_objects.controllers.items_controller.star_score_controller import StarScoreController | |
from game.game_objects.controllers.items_controller.invencible_power_up_controller import InvenciblePowerUpController | |
from game_engine.game_object import GameObject | |
class ItemsControllerWrapper(GameObject): | |
def start(self): | |
self.power_up_generators = [StarScoreController(Vector2(0, 0), 0, Vector2(0, 0), 0), | |
InvenciblePowerUpController(Vector2(0, 0), 0, Vector2(0, 0), 0)] | |
self.power_up_generation_delta = 6500 | |
self.last_power_up_time = 1000 * Time.now() | |
self.generation_obstacle_difficult = 1 | |
for power_up_generator in self.power_up_generators: | |
power_up_generator.start() | |
def update(self): | |
if Time.time_scale == 0.0: | |
#Adjust timer when paused | |
self.last_power_up_time += 1000 * Time.delta_time(True) | |
if 1000 * Time.now() - self.last_power_up_time > self.power_up_generation_delta * \ | |
self.generation_obstacle_difficult: | |
self.generate_random_power_up() | |
def generate_random_power_up(self): | |
self.last_power_up_time = 1000 * Time.now() | |
random_ind = rand(0, 1) | |
random_obstacle_generator = self.power_up_generators[random_ind] | |
random_obstacle_generator.generate_obstacle() |
from game_engine.scene import Scene | |
from game_engine.game_object import GameObject | |
from game_engine.time import Time | |
from game_engine.input import Input | |
from game.game_objects.mesh_objects.screen_fader import ScreenFader | |
from game.game_objects.controllers.player_controller import PlayerController | |
from game.game_objects.controllers.score_controller import ScoreController | |
from game.game_objects.controllers.background_particles_controller import BackgroundParticlesController | |
from game.game_objects.controllers.obstacle_controller_wrapper import ObstacleControllerWrapper | |
from game.game_objects.controllers.items_controller_wrapper import ItemsControllerWrapper | |
from game.game_objects.controllers.pause_controller import PauseController | |
class MainSceneController(GameObject): | |
def start(self): | |
""" | |
setup initial scene variables | |
""" | |
self.setup_initializer() | |
self.setup_fader() | |
self.fade_out_duration = 1.2 | |
def setup_initializer(self): | |
self.initial_time = Time.now() | |
self.should_initialize = True | |
def setup_fader(self): | |
""" | |
Start fade in and set variables to fade out | |
""" | |
ScreenFader(fade="in") | |
self.should_change_scene = False | |
self.should_fade_out = False | |
self.change_scene_timer = 0.0 | |
def update(self): | |
""" | |
call the initialize scene | |
""" | |
if Input.press_space_down: | |
Time.time_scale = (Time.time_scale + 1) % 2 | |
if Time.time_scale == 0.0: | |
self.pause_controller = PauseController() | |
else: | |
self.pause_controller.destroy_all_text() | |
self.pause_controller.destroy_me() | |
self.initialize_scene() | |
self.change_scene() | |
def initialize_scene(self): | |
""" | |
When is the correct time, initialize scene | |
This will happen just once | |
""" | |
if Time.now() - self.initial_time > 0.45 and self.should_initialize: | |
self.should_initialize = False | |
self.background_particle_controller = BackgroundParticlesController() | |
self.player_controller = PlayerController() | |
self.obstacle_controller_wrapper = ObstacleControllerWrapper() | |
self.items_controller = ItemsControllerWrapper() | |
self.score_controller = ScoreController() | |
def change_scene(self): | |
""" | |
Will fade screen out and the change it | |
""" | |
if self.should_fade_out: | |
ScreenFader(fade="out", fade_duration=self.fade_out_duration) | |
self.should_fade_out = False | |
self.should_change_scene = True | |
self.change_scene_timer = Time.now() | |
Time.time_scale = 0 | |
if self.should_change_scene and Time.now() - self.change_scene_timer > self.fade_out_duration+0.2: | |
Time.time_scale = 1.0 | |
Scene.change_scene(2) | |
def game_over(self): | |
""" | |
Is called just once to enable change scene | |
""" | |
if not self.should_change_scene: | |
self.should_fade_out = True |
from game_engine.time import Time | |
from random import randint as rand | |
from pygame.math import Vector2 | |
#Controllers | |
from game.game_objects.controllers.obstacles_controllers.simple_obstacle_controller import SimpleObstacleController | |
from game.game_objects.controllers.obstacles_controllers.middle_rect_obstacle_controller import MiddleRectObstacleController | |
from game.game_objects.controllers.obstacles_controllers.random_x_final_obstacle_controller import RandomXFinalObstacleController | |
from game.game_objects.controllers.obstacles_controllers.rect_translate_x_obstacle_cotroller import RectTranslateXObstacleController | |
from game.game_objects.controllers.obstacles_controllers.two_in_one_simple_obstacle_controller import TwoInOneSimpleObstacleController | |
from game.game_objects.controllers.obstacles_controllers.two_side_by_side_obstacle_controller import TwoSideBySideSimpleObstacleController | |
from game.game_objects.controllers.obstacles_controllers.spinning_middle_rect_obstacle_controller import SpinningMiddleRectObstacleController | |
from game.game_objects.controllers.obstacles_controllers.half_moon_spinning_rect_obstacle_controller import HalfMoonSpinningRectObstacleController | |
from game.game_objects.controllers.obstacles_controllers.invisible_middle_obstacle_controller import InvisibleMiddleObstacleController | |
from game.game_objects.controllers.obstacles_controllers.invisible_simple_obstacle_controller import InvisibleSimpleObstacleController | |
from game_engine.game_object import GameObject | |
from game_engine.basic_objects.text import Text | |
from game_engine.color import Color | |
from game.animations.text_up_fade_out_animation import TextUpFadeOutAnimation | |
from game_engine.components.animator import Animator | |
from game_engine.material import Material | |
from game.scripts.constants import Constants | |
class ObstacleControllerWrapper(GameObject): | |
def start(self): | |
self.obstacle_generators = [ | |
SimpleObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
] | |
self.rect_x_controller = RandomXFinalObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
self.obstacle_geneation_delta = 1500 | |
self.last_generation_time = 1000 * Time.now() | |
self.game_object_list = [] | |
self.last_increases_dificculty_time = Time.now() | |
self.game_difficuty = 1 | |
self.time_to_increase_difficult = 6.2 | |
self.generation_obstacle_difficult = 1 | |
self.max_difficult = 10 | |
self.should_delete_difficulty_text = False | |
self.diff_text_gen_time = 0.0 | |
for obstacle_generator in self.obstacle_generators: | |
obstacle_generator.start() | |
def update(self): | |
if Time.time_scale < 0.5: | |
#Adjust timers to new delta | |
self.last_generation_time += 1000 * Time.delta_time(True) | |
self.last_increases_dificculty_time += Time.delta_time(True) | |
self.increase_difficult() | |
self.delete_difficulty_text() | |
if (1000 * Time.now() - self.last_generation_time) * Time.time_scale > self.obstacle_geneation_delta * \ | |
self.generation_obstacle_difficult: | |
self.generate_random_obstacle() | |
for obstacle_generator in self.obstacle_generators: | |
game_objs = obstacle_generator.game_object_list | |
self.game_object_list.extend(game_objs) | |
def increase_difficult(self): | |
if Time.now() - self.last_increases_dificculty_time > self.time_to_increase_difficult \ | |
and self.game_difficuty < self.max_difficult: | |
self.game_difficuty += 1 | |
self.last_increases_dificculty_time = Time.now() | |
self.time_to_increase_difficult *= 1.03 | |
self.generation_obstacle_difficult = (1 - (self.game_difficuty - 1) * 0.2 / self.max_difficult) | |
self.generate_difficulty_text() | |
if self.game_difficuty == 2: | |
obstacle = MiddleRectObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 3: | |
obstacle = TwoInOneSimpleObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 4: | |
obstacle = TwoSideBySideSimpleObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 5: | |
obstacle = HalfMoonSpinningRectObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 6: | |
obstacle = InvisibleMiddleObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
self.delete_object_with_specific_type(MiddleRectObstacleController) | |
if self.game_difficuty == 7: | |
obstacle = InvisibleSimpleObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 8 and len(self.obstacle_generators) > 3: | |
obstacle = RectTranslateXObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
self.delete_object_with_specific_type(SimpleObstacleController) | |
self.delete_object_with_specific_type(TwoInOneSimpleObstacleController) | |
if self.game_difficuty == 9: | |
obstacle = SpinningMiddleRectObstacleController(Vector2(0, 0), 0, Vector2(0, 0), 0) | |
obstacle.start() | |
self.obstacle_generators.append(obstacle) | |
if self.game_difficuty == 10: | |
self.delete_object_with_specific_type(TwoSideBySideSimpleObstacleController) | |
self.delete_object_with_specific_type(InvisibleSimpleObstacleController) | |
self.delete_object_with_specific_type(HalfMoonSpinningRectObstacleController) | |
def generate_difficulty_text(self): | |
title_x = 0.35 * Constants.screen_width | |
title_y = 0.3 * Constants.screen_height | |
title_size = 50 | |
text = "HARDER!" | |
if self.game_difficuty == self.max_difficult: | |
text = "MAX DIFFICULTY!" | |
title_size = 28 | |
title_x = 0.20 * Constants.screen_width | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
diff_text = Text(Vector2(title_x - title_size, title_y), text, Material(Color.red, alpha=255), title_size, | |
font_path) | |
diff_text.transform.position.x -= diff_text.text_mesh.size | |
diff_text.animation = TextUpFadeOutAnimation(diff_text) | |
diff_text.animator = Animator(diff_text, [diff_text.animation]) | |
diff_text.animator.play() | |
self.diff_text = diff_text | |
self.diff_text_gen_time = Time.now() | |
self.should_delete_difficulty_text = True | |
def delete_difficulty_text(self): | |
if Time.now() - self.diff_text_gen_time > 1.0 and self.should_delete_difficulty_text: | |
self.should_delete_difficulty_text = False | |
self.diff_text.destroy_me() | |
def generate_random_obstacle(self): | |
self.last_generation_time = 1000 * Time.now() | |
number_of_obstacles = int(min(self.game_difficuty, len(self.obstacle_generators))) | |
random_ind = rand(0, number_of_obstacles-1) | |
random_obstacle_generator = self.obstacle_generators[random_ind] | |
if type(random_obstacle_generator) == RectTranslateXObstacleController: | |
self.last_generation_time -= 300 | |
if self.game_difficuty == self.max_difficult: | |
self.rect_x_controller.generate_obstacle() | |
random_obstacle_generator.generate_obstacle() | |
def delete_object_with_specific_type(self, obstacle_type): | |
for i in range(len(self.obstacle_generators)): | |
if type(self.obstacle_generators[i]) == obstacle_type: | |
self.obstacle_generators.pop(i) | |
break |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
import math | |
class HalfMoonSpinningRectObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 300 | |
self.obstacle_width = 1.3 * Constants.screen_width | |
self.obstacle_height = 0.06 * Constants.screen_height | |
self.angular_speed = (self.fall_velocity/(1.2* Constants.screen_height+self.obstacle_height)) * \ | |
0.9 * math.pi / 2 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > 1.2 * Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
obstacle.transform.rotate(self.angular_speed * Time.delta_time() * obstacle.side) | |
def generate_obstacle(self): | |
side = randint(0, 1) | |
rect = Rectangle(Vector2(-self.obstacle_width/2 + Constants.screen_width*side, | |
- self.obstacle_height), | |
Vector2(self.obstacle_width, self.obstacle_height), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
if side == 1: | |
rect.side = -1 | |
else: | |
rect.side = 1 | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game.animations.power_up_fade_out import PowerUpFadeOut | |
from game_engine.components.animator import Animator | |
class InvisibleMiddleObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 400 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
if obstacle.visible and obstacle.transform.position.y > 0.15 * Constants.screen_height: | |
self.turn_invisible(obstacle) | |
obstacle.visible = False | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def turn_invisible(self, game_obj): | |
game_obj.animation = PowerUpFadeOut(game_obj) | |
game_obj.animator = Animator(game_obj, [game_obj.animation]) | |
game_obj.animator.play() | |
def generate_obstacle(self): | |
self.obstacle_width = 0.3 * Constants.screen_width | |
self.obstacle_height = 0.06 * Constants.screen_height | |
rect = Rectangle(Vector2(0.5 * Constants.screen_width - 0.5 * self.obstacle_width, - 3*self.obstacle_height), | |
Vector2(self.obstacle_width, self.obstacle_height), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
rect.visible = True | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint as rand | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game.animations.power_up_fade_out import PowerUpFadeOut | |
from game_engine.components.animator import Animator | |
class InvisibleSimpleObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 300 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
if obstacle.visible and obstacle.transform.position.y > 0.15 * Constants.screen_height: | |
self.turn_invisible(obstacle) | |
obstacle.visible = False | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def turn_invisible(self, game_obj): | |
game_obj.animation = PowerUpFadeOut(game_obj) | |
game_obj.animator = Animator(game_obj, [game_obj.animation]) | |
game_obj.animator.play() | |
def generate_obstacle(self): | |
direction = rand(0, 1) < 0.5 | |
rect = Rectangle(Vector2(direction * 0.5 * Constants.screen_width + 12, - 0.06 * Constants.screen_height), | |
Vector2(0.45 * Constants.screen_width,0.06 * Constants.screen_height), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
rect.visible = True | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class MiddleRectObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 600 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
self.obstacle_width = 0.3 * Constants.screen_width | |
self.obstacle_height = 0.06 * Constants.screen_height | |
rect = Rectangle(Vector2(0.5 * Constants.screen_width - 0.5 * self.obstacle_width, - 3*self.obstacle_height), | |
Vector2(self.obstacle_width, self.obstacle_height), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from random import uniform as randfloat | |
from random import randint | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class RandomXFinalObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 400 | |
self.translate_velocity = 00 | |
self.game_object_list = [] | |
self.size = 0.017 * Constants.screen_height | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
new_x = obstacle.transform.position.x + self.translate_velocity \ | |
* Time.delta_time() * obstacle.vel | |
if new_x > Constants.screen_width - self.size/2 \ | |
or new_x < -self.size/2: | |
obstacle.vel *= -1 | |
obstacle.transform.position = Vector2(new_x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
random_pos = int(randfloat(self.size / 2 + Constants.circCenter_x - Constants.circRadius, | |
Constants.screen_width - | |
(self.size / 2 + Constants.circCenter_x - Constants.circRadius))) | |
rect = Rectangle(Vector2(random_pos, -self.size), | |
Vector2(self.size, self.size), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
direction = randint(0, 1) | |
if direction == 0: | |
direction = -1 | |
rect.vel = direction # Checks if going left or right. Can be 1 for right or -1 for left | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from random import uniform as randfloat | |
from random import randint | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class RectTranslateXObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 400 | |
self.translate_velocity = 600 | |
self.game_object_list = [] | |
self.obstacle_size = 0.05 * Constants.screen_height | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
new_x = obstacle.transform.position.x + self.translate_velocity \ | |
* Time.delta_time() * obstacle.vel | |
if new_x > Constants.screen_width - self.obstacle_size/2 \ | |
or new_x < -self.obstacle_size/2: | |
obstacle.vel *= -1 | |
obstacle.transform.position = Vector2(new_x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
random_pos = int(randfloat(Constants.screen_width - self.obstacle_size / 2-1, | |
-self.obstacle_size / 2+1)) | |
rect = Rectangle(Vector2(random_pos, -self.obstacle_size), | |
Vector2(self.obstacle_size, self.obstacle_size), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
direction = randint(0, 1) | |
if direction == 0: | |
direction = -1 | |
rect.vel = direction # Checks if going left or right. Can be 1 for right or -1 for left | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint as rand | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class SimpleObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 300 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
direction = rand(0, 1) < 0.5 | |
rect = Rectangle(Vector2(direction * 0.5 * Constants.screen_width + 12, - 0.06 * Constants.screen_height), | |
Vector2(0.45 * Constants.screen_width,0.06 * Constants.screen_height), | |
Material((255, 255, 255))) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint as rand | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game.scripts.constants import Constants | |
from game_engine.material import Material | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class SpinningMiddleRectObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 280 | |
self.angular_speed = 5 | |
self.game_object_list = [] | |
def update(self): | |
for obstacle in self.game_object_list: | |
if obstacle.transform.position.y > 1.2 * Constants.screen_height: | |
self.game_object_list.remove(obstacle) | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle) | |
def fall(self, obstacle): | |
obstacle.transform.position = Vector2(obstacle.transform.position.x, obstacle.transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
obstacle.transform.rotate(self.angular_speed * Time.delta_time() * obstacle.direction) | |
def generate_obstacle(self): | |
self.obstacle_width = 0.45 * Constants.screen_width | |
self.obstacle_height = 0.06 * Constants.screen_height | |
rect = Rectangle(Vector2(0.5 * Constants.screen_width - 0.5 * self.obstacle_width, - self.obstacle_height), | |
Vector2(self.obstacle_width, self.obstacle_height), | |
Material((255, 255, 255))) | |
direction = rand(0, 1) < 0.5 | |
if direction == 0: | |
direction = -1 | |
rect.direction = direction | |
rect.transform.rotate(0) | |
rect.animation = ObstaclePulsingAnimation(rect) | |
rect.animator = Animator(rect, [rect.animation]) | |
rect.animator.play() | |
self.game_object_list.append(rect) |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint as rand | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game_engine.game_object import GameObject | |
from game_engine.material import Material | |
from game.scripts.constants import Constants | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class TwoInOneSimpleObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 300 | |
self.game_object_list = [] | |
def update(self): | |
if len(self.game_object_list) > 0: | |
for obstacle_pair in self.game_object_list: | |
if obstacle_pair[0].transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle_pair) | |
for obstacle in obstacle_pair: | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle_pair) | |
def fall(self, obstacle_pair): | |
visible_condition = 0.1 * Constants.screen_height < obstacle_pair[1].transform.position.y < 0.45 * Constants.screen_height | |
if visible_condition: | |
obstacle_pair[1].transform.position = Vector2(obstacle_pair[1].transform.position.x, | |
obstacle_pair[1].transform.position.y | |
+ 4 * self.fall_velocity * Time.delta_time()) | |
else: | |
obstacle_pair[1].transform.position = Vector2(obstacle_pair[1].transform.position.x, | |
obstacle_pair[1].transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
obstacle_pair[0].transform.position = Vector2(obstacle_pair[0].transform.position.x, | |
obstacle_pair[0].transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
direction = rand(0, 1) < 0.5 | |
obstacle_width = 0.45 * Constants.screen_width | |
obstacle_height = 0.06 * Constants.screen_height | |
rect1 = Rectangle(Vector2(direction * 0.5 * Constants.screen_width + 12, - obstacle_height), | |
Vector2(obstacle_width, obstacle_height), | |
Material((255, 255, 255))) | |
rect2 = Rectangle(Vector2(rect1.transform.position.x, rect1.transform.position.y), | |
Vector2(obstacle_width, obstacle_height), | |
Material((255, 255, 255))) | |
rect1.animation = ObstaclePulsingAnimation(rect1) | |
rect1.animator = Animator(rect1, [rect1.animation]) | |
rect1.animator.play() | |
rect2.animation = ObstaclePulsingAnimation(rect2) | |
rect2.animator = Animator(rect2, [rect2.animation]) | |
rect2.animator.play() | |
self.game_object_list.append([rect1, rect2]) | |
from pygame.math import Vector2 | |
from game_engine.time import Time | |
from random import randint as rand | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.obstacle_rectangle import Rectangle | |
from game_engine.material import Material | |
from game.scripts.constants import Constants | |
from game.animations.obstacle_pulsing_animation import ObstaclePulsingAnimation | |
from game_engine.components.animator import Animator | |
class TwoSideBySideSimpleObstacleController(GameObject): | |
def start(self): | |
self.fall_velocity = 300 | |
self.game_object_list = [] | |
def update(self): | |
if len(self.game_object_list) > 0: | |
for obstacle_pair in self.game_object_list: | |
if obstacle_pair[0].transform.position.y > Constants.screen_height: | |
self.game_object_list.remove(obstacle_pair) | |
for obstacle in obstacle_pair: | |
obstacle.destroy(obstacle) | |
GameObject.destroy(obstacle) | |
else: | |
self.fall(obstacle_pair) | |
def fall(self, obstacle_pair): | |
visible_condition = 0.1 * Constants.screen_height < obstacle_pair[1].transform.position.y < 0.45 * Constants.screen_height | |
if visible_condition: | |
obstacle_pair[1].transform.position = Vector2(obstacle_pair[1].transform.position.x, obstacle_pair[1].transform.position.y | |
+ 5.0 * self.fall_velocity * Time.delta_time()) | |
else: | |
obstacle_pair[1].transform.position = Vector2(obstacle_pair[1].transform.position.x, obstacle_pair[1].transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
obstacle_pair[0].transform.position = Vector2(obstacle_pair[0].transform.position.x, | |
obstacle_pair[0].transform.position.y | |
+ self.fall_velocity * Time.delta_time()) | |
def generate_obstacle(self): | |
direction = rand(0, 1) < 0.5 | |
obstacle_width = 0.45 * Constants.screen_width | |
obstacle_height = 0.06 * Constants.screen_height | |
rect1 = Rectangle(Vector2(direction * 0.5 * Constants.screen_width + 12, - obstacle_height), | |
Vector2(obstacle_width, obstacle_height), | |
Material((255, 255, 255))) | |
rect2_x = rect1.transform.position.x - 12 | |
if rect2_x == 0: | |
rect2_x = 0.5 * Constants.screen_width + 12 | |
else: | |
rect2_x = 0.0 + 12 | |
rect2 = Rectangle(Vector2(rect2_x, rect1.transform.position.y), | |
Vector2(obstacle_width, obstacle_height), | |
Material((255, 255, 255))) | |
rect1.animation = ObstaclePulsingAnimation(rect1) | |
rect1.animator = Animator(rect1, [rect1.animation]) | |
rect1.animator.play() | |
rect2.animation = ObstaclePulsingAnimation(rect2) | |
rect2.animator = Animator(rect2, [rect2.animation]) | |
rect2.animator.play() | |
self.game_object_list.append([rect1, rect2]) |
from random import randint as rand | |
from game.game_objects.mesh_objects.main_menu_rectangle import Rectangle | |
from game_engine.scene import Scene | |
from game_engine.game_object import GameObject | |
from game_engine.input import Input | |
from game_engine.color import Color | |
from game_engine.time import Time | |
from game_engine.basic_objects.text import Text | |
from game_engine.material import Material | |
from pygame.math import Vector2 | |
from game.scripts.constants import Constants | |
from game.game_objects.mesh_objects.screen_fader import ScreenFader | |
from game.game_objects.controllers.background_particles_controller import BackgroundParticlesController | |
class PauseController(GameObject): | |
def start(self): | |
""" | |
NormalBehavior start method | |
will be called when the object is instantiate on scene | |
""" | |
self.time = Time.now() | |
self.period = 1.5 | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
message_x = 15 | |
message_y = 300 | |
message_size = 14 | |
title_x = 20 | |
title_y = 180 | |
title_size = 50 | |
self.game_object_list = [ | |
Text(Vector2(title_x, title_y), "PAUSED", Material(Color.red), title_size, font_path), | |
Text(Vector2(message_x, message_y), "Press space to keep playing", Material(Color.white), message_size, font_path), | |
Rectangle(Vector2(0, 0), Vector2(Constants.screen_width, Constants.screen_height), Material(Color.mask)) | |
] | |
self.game_object_list[2].collidable = False | |
def destroy_all_text(self): | |
for text in self.game_object_list: | |
text.destroy_me() |
from game_engine.input import Input | |
from game_engine.time import Time | |
from game.game_objects.mesh_objects.player_circle import PlayerCircle | |
from pygame.math import Vector2 | |
from game_engine.material import Material | |
from game_engine.game_object import GameObject | |
from game.scripts.constants import Constants | |
from game_engine.color import Color | |
import math | |
class PlayerController(GameObject): | |
def start(self): | |
self.angle = 0.0 | |
self.angularSpeed = 5.0 | |
self.game_object_list = [ | |
PlayerCircle(Vector2(Constants.circCenter_x + Constants.circRadius, Constants.screen_height+15), 15, Material(Color.blue, alpha=240)), | |
PlayerCircle(Vector2(Constants.circCenter_x - Constants.circRadius, Constants.screen_height+15), 15, Material(Color.orange, alpha=240)) | |
] | |
self.in_initial_animation = True | |
self.should_play = True | |
self.initial_time = Time.now() | |
def update(self): | |
self.initial_animation() | |
if not self.in_initial_animation: | |
if Input.is_pressing_left: | |
self.turn_left() | |
if Input.is_pressing_right: | |
self.turn_right() | |
def initial_animation(self): | |
if self.in_initial_animation: | |
if self.should_play: | |
self.should_play = False | |
self.game_object_list[0].animator.play() | |
self.game_object_list[1].animator.play() | |
if Time.now() - self.initial_time > 1.0: | |
self.in_initial_animation = False | |
def turn_right(self): | |
self.angle = (self.angle + self.angularSpeed * Time.delta_time()) % (2 * math.pi) | |
self.update_circles() | |
def turn_left(self): | |
self.angle = (self.angle - self.angularSpeed * Time.delta_time()) % (2 * math.pi) | |
self.update_circles() | |
def update_circles(self): | |
self.game_object_list[0].transform.\ | |
translate(Vector2(Constants.circCenter_x + Constants.circRadius * math.cos(self.angle), | |
Constants.circCenter_y + Constants.circRadius * math.sin(self.angle))) | |
self.game_object_list[1].transform.\ | |
translate(Vector2(Constants.circCenter_x + Constants.circRadius * math.cos(self.angle + math.pi), | |
Constants.circCenter_y + Constants.circRadius * math.sin(self.angle + math.pi))) |
from random import randint as rand | |
from game.game_objects.mesh_objects.main_menu_rectangle import Rectangle | |
from game_engine.scene import Scene | |
from game_engine.game_object import GameObject | |
from game_engine.input import Input | |
from game_engine.color import Color | |
from game_engine.time import Time | |
from game_engine.basic_objects.text import Text | |
from game_engine.material import Material | |
from pygame.math import Vector2 | |
from game.scripts.constants import Constants | |
from game.game_objects.mesh_objects.screen_fader import ScreenFader | |
from game.game_objects.controllers.background_particles_controller import BackgroundParticlesController | |
class RetryController(GameObject): | |
def start(self): | |
""" | |
NomalBehaivor start method | |
will be called when the object is instantiate on scene | |
""" | |
self.time = Time.now() | |
self.period = 1.5 | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
message_x = 15 | |
message_y = 300 | |
message_size = 14 | |
score = str(int(Constants.current_score)) | |
score_size = 28 | |
score_x = 30 | |
score_y = 240 | |
title_x = 20 | |
title_y = 180 | |
title_size = 50 | |
self.game_object_list = [ | |
Text(Vector2(title_x, title_y), "You died", Material(Color.red), title_size, font_path), | |
Text(Vector2(score_x, score_y), "Score: " + score, Material(Color.white), score_size, font_path), | |
Text(Vector2(message_x, message_y), "Press arrows keys to try again", Material(Color.white), message_size, font_path) | |
] | |
self.setup_fader() | |
BackgroundParticlesController() | |
def setup_fader(self): | |
""" | |
Start fade in and set variables to fade out | |
""" | |
ScreenFader(fade="in") | |
self.should_timer = False | |
self.should_change_scene = False | |
self.can_press_button = True | |
def update(self): | |
""" | |
NomalBehaivor update method | |
will be call every frame | |
""" | |
if self.should_spawn(): | |
self.spawn_block() | |
if self.pressed_button() and self.can_press_button: | |
self.should_timer = True | |
self.can_press_button = False | |
if self.should_timer: | |
ScreenFader(fade="out") | |
self.timer = Time.now() | |
self.should_timer = False | |
self.should_change_scene = True | |
if self.should_change_scene: | |
if Time.now() - self.timer > 0.68: | |
Scene.change_scene(1) | |
def spawn_block(self): | |
""" | |
Spawn a random block | |
""" | |
#parameters = self.generate_random_parameters() | |
#Rectangle(Vector2(parameters[0], parameters[1]), | |
# Vector2(parameters[2], parameters[3]), Material(parameters[4])) | |
def generate_random_parameters(self): | |
""" | |
Generate a random parameter to create a random block | |
:return: a Tuple with the parameters | |
""" | |
width = rand(20, 100) | |
height = rand(10, 90) | |
color = Color.random_color() | |
position_x = rand(10, Constants.screen_width - width - 10) | |
position_y = -height | |
return position_x, position_y, width, height, color | |
def pressed_button(self): | |
""" | |
:return: if it should change scene | |
""" | |
return Input.is_pressing_right or Input.is_pressing_left or Input.is_pressing_space | |
def should_spawn(self): | |
""" | |
:return: if it should spawn | |
""" | |
if Time.now() - self.time > self.period: | |
self.time = Time.now() | |
return True | |
else: | |
return False |
from game.scripts.constants import Constants | |
from game_engine.basic_objects.text import Text | |
from game_engine.game_object import GameObject | |
from game_engine.color import Color | |
from pygame.math import Vector2 | |
from game_engine.material import Material | |
from game_engine.time import Time | |
class ScoreController(GameObject): | |
def start(self): | |
font_path = "game/assets/fonts/neuropolxrg.ttf" | |
self.time_to_update_score = 0.095 | |
self.score_per_step = 1 # Number of steps of the game required to update the score | |
self.last_update_time = Time.now() | |
self.score = 0.0 | |
score_x = 10.0 | |
score_y = 5.0 | |
score_message = str(int(self.score)) | |
score_color = Color.white | |
score_size = 15 | |
self.game_object_list = [ | |
Text(Vector2(score_x, score_y), score_message, Material(score_color), score_size, font_path) | |
] | |
self.game_object_list[0].text_mesh.message = str(int(self.score)) | |
def update(self): | |
if (Time.now() - self.last_update_time) * Time.time_scale >= self.time_to_update_score: | |
self.score = self.score + self.score_per_step | |
self.last_update_time = Time.now() | |
Constants.current_score = self.score | |
self.game_object_list[0].text_mesh.message = str(int(self.score)) | |
from random import randint as rand | |
from game_engine.game_object import GameObject | |
from game.game_objects.mesh_objects.main_menu_rectangle import Rectangle | |
from game_engine.color import Color | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
from game_engine.material import Material | |
class TestRectGenerator(GameObject): | |
def start(self): | |
""" | |
NomalBehaivor start method | |
will be called when the object is instantiate on scene | |
""" | |
self.time = Time.now() | |
self.period = 1 | |
def update(self): | |
""" | |
NomalBehaivor update method | |
will be call every frame | |
""" | |
if self.should_spawn(): | |
self.spawn_block() | |
def spawn_block(self): | |
""" | |
Spawn a random block | |
""" | |
parameters = self.generate_random_parameters() | |
Rectangle(Vector2(parameters[0], parameters[1]), | |
Vector2(parameters[2], parameters[3]), Material(parameters[4])) | |
def generate_random_parameters(self): | |
""" | |
Generate a random parameter to create a random block | |
:return: a Tuple with the parameters | |
""" | |
width = rand(20, 100) | |
height = rand(10, 90) | |
color = Color.random_color() | |
position_x = rand(10, Constants.screen_width - width - 10) | |
position_y = -height | |
return position_x, position_y, width, height, color | |
def should_spawn(self): | |
""" | |
:return: if it should spawn | |
""" | |
if Time.now() - self.time > self.period: | |
self.time = Time.now() | |
return True | |
else: | |
return False |
from game.game_objects.mesh_objects.particle import Particle | |
from game_engine.components.particle_system import ParticleSystem | |
from game_engine.components.physics import Physics | |
from game_engine.game_object import GameObject | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
class DieEffect(GameObject): | |
def __init__(self, position, material, radius, inst_vel): | |
super().__init__(position, 0, Vector2(1, 1), 2) | |
self.physics = Physics(self) | |
self.inst_vel = inst_vel | |
self.material = material | |
self.radius = radius | |
def start(self): | |
""" | |
Will start a particle effect | |
""" | |
self.physics.inst_velocity = self.inst_vel | |
self.particle_system = ParticleSystem(self, | |
Particle, | |
quant=15, | |
period=0.01, | |
vel_min=30, | |
vel_max=130, | |
duration=0.9, | |
gravity=130, | |
layer=10, | |
inherit_vel=True, | |
inherit_vel_mult=0.5, | |
unscaled=True, | |
num_of_periods=1 | |
) | |
self.particle_system.set_circ_gen(self.transform.position, | |
radius=self.radius, | |
mode="radial", | |
direct_met=self.direct_met, | |
ini_angle_met=self.ini_angle_met, | |
fin_angle_met=self.fin_angle_met | |
) | |
self.particle_system.play() | |
self.spawn_time = Time.now() | |
def update(self): | |
""" | |
Will be destroyed after a time | |
""" | |
self.physics.inst_velocity = self.inst_vel | |
if Time.now() - self.spawn_time > 0.01: | |
self.destroy_me() | |
def ini_angle_met(self): | |
return 0 | |
def fin_angle_met(self): | |
return 360 | |
def direct_met(self): | |
return Vector2(0, -1) |
from game.game_objects.mesh_objects.particle import Particle | |
from game_engine.components.particle_system import ParticleSystem | |
from game_engine.game_object import GameObject | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
class GetPowerUpEffect(GameObject): | |
def __init__(self, position, material): | |
""" | |
Add the polygon mesh component | |
Call the superclass constructor passing basic game_object parameters | |
""" | |
super().__init__(position, 0, Vector2(1, 1), 2) | |
self.material = material | |
def start(self): | |
self.particle_system = ParticleSystem(self, | |
Particle, | |
quant=30, | |
period=0.02, | |
vel_min=40, | |
vel_max=80, | |
duration=2, | |
gravity=98, | |
layer=10 | |
) | |
self.particle_system.set_circ_gen(self.transform.position, | |
1, mode="radial", | |
direct_met=self.direct_met, | |
ini_angle_met=self.ini_angle_met, | |
fin_angle_met=self.fin_angle_met | |
) | |
self.particle_system.play() | |
self.spawn_time = Time.now() | |
def update(self): | |
if Time.now() - self.spawn_time > 0.03: | |
self.destroy_me() | |
def ini_angle_met(self): | |
return 0 | |
def fin_angle_met(self): | |
return 360 | |
def direct_met(self): | |
return Vector2(0, -1) |
from game_engine.basic_objects.basic_circle import BasicCircle | |
from game_engine.components.circle_collider import CircleCollider | |
from game_engine.components.particle_system import ParticleSystem | |
from game.game_objects.mesh_objects.particle import Particle | |
from game.animations.power_up_fade_out import PowerUpFadeOut | |
from game_engine.components.animator import Animator | |
from game.animations.litter_bounce import LitterBounce | |
from game_engine.collider import Collider | |
from game_engine.time import Time | |
from pygame.math import Vector2 | |
class InvencibleCircle(BasicCircle): | |
def __init__(self, position, radius, material): | |
super(InvencibleCircle, self).__init__(position, radius, material, layer=-2) | |
self.circle_collider = CircleCollider(self) | |
self.should_die = False | |
# Todo: create a power_up class that is superclass of invencible circle and star | |
def start(self): | |
self.particle_system = ParticleSystem(self, Particle, quant=1, period=0.15, vel_min=30, vel_max=60, | |
duration=0.8, gravity=98, inherit_vel=True) | |
self.particle_system.set_circ_gen(self.transform.position, self.circle_mesh.get_radius(), mode="radial", | |
direct_met=self.direct_met, ini_angle_met=self.ini_angle_met, | |
fin_angle_met=self.fin_angle_met) | |
self.particle_system.play() | |
self.animation = LitterBounce(self) | |
self.animator = Animator(self, [self.animation, PowerUpFadeOut(self)]) | |
self.animator.play() | |
def die(self): | |
# TODO: change how collider works: dont use the collider list | |
Collider.remove(self) | |
self.circle_collider = None | |
self.animator.play_next_animation() | |
self.should_die = True | |
self.die_time=Time.now() | |
def update(self): | |
if self.should_die: | |
if Time.now() - self.die_time > 0.4: | |
self.destroy_me() | |
def ini_angle_met(self): | |
return 150 | |
def fin_angle_met(self): | |
return 390 | |
def direct_met(self): | |
return Vector2(0, -1) |
from game_engine.basic_objects.basic_rectangle import BasicRectangle | |
from game_engine.components.polygon_collider import PolygonCollider | |
from game_engine.components.particle_system import ParticleSystem | |
from game.game_objects.mesh_objects.particle import Particle | |
class Rectangle(BasicRectangle): | |
def __init__(self, position, dimension, material, layer=0): | |
super(Rectangle, self).__init__(position, dimension, material, layer=layer) | |
self.dimension = dimension | |
self.polygon_collider = PolygonCollider(self) | |
self.particle_system = ParticleSystem(self, Particle, | |
quant=0.004, quant_proport_to_len=True, | |
period=0.04, | |
vel_min=0, vel_max=100, duration=0.5, | |
spawn_prob="parab", vel_prob="parab", | |
inherit_vel=True, inherit_vel_mult=1) | |
self.particle_system.set_line_gen(self.fin_point_met, self.ini_point_met) | |
self.particle_system.play() | |
def ini_point_met(self): | |
return self.polygon_mesh.get_points()[0] | |
def fin_point_met(self): | |
return self.polygon_mesh.get_points()[3] | |
# Todo: make line directional on particle system: | |
# def direct_met(self): | |
# return Vector2(0, -1) |
from game_engine.basic_objects.basic_particle_circ import BasicParticleCirc | |
from game.animations.particle_fade_animation import ParticleFadeAnimation | |
from game_engine.material import Material | |
from game_engine.components.animator import Animator | |
from game_engine.time import Time | |
class Particle(BasicParticleCirc): | |
def __init__(self, position): | |
self.change = True | |
super().__init__(position) | |
def start(self): | |
self.animation = ParticleFadeAnimation(self, self.creator_obj.particle_system.duration) | |
self.animator = Animator(self, [self.animation]) | |
self.animator.play() | |
def update(self): | |
if self.change: | |
self.change = False | |
self.material = Material(self.creator_obj.material.color) | |
if Time.now() - self.creation_time > self.destroy_time: | |
self.destroy_me() |
from game_engine.basic_objects.basic_circle import BasicCircle | |
from game_engine.basic_objects.basic_rectangle import BasicRectangle | |
from game.game_objects.mesh_objects.star import Star | |
from game.animations.circle_player_initial_animation import CirclePlayerInitialAnimation | |
from game_engine.components.particle_system import ParticleSystem | |
from game.game_objects.mesh_objects.particle import Particle | |
from game_engine.components.animator import Animator | |
from game_engine.game_object import GameObject | |
from game_engine.components.circle_collider import CircleCollider | |
from game.game_objects.mesh_objects.get_power_up_effect import GetPowerUpEffect | |
from game.game_objects.mesh_objects.die_effect import DieEffect | |
from game_engine.components.physics import Physics | |
from game.animations.player_bounce import PlayerBounce | |
from game_engine.color import Color | |
from pygame import mixer | |
from pygame.math import Vector2 | |
class PlayerCircle(BasicCircle): | |
def __init__(self, position, radius, material): | |
super(PlayerCircle, self).__init__(position, radius, material, layer=-2) | |
self.circle_collider = CircleCollider(self) | |
self.is_invencible = False | |
self.is_not_dying = True | |
def start(self): | |
self.physics = Physics(self) | |
self.star_score_controller = GameObject.find_by_type("StarScoreController")[0] | |
self.main_scene_controller = GameObject.find_by_type("MainSceneController")[0] | |
self.invencible_power_up_controller = GameObject.find_by_type("InvenciblePowerUpController")[0] | |
self.animation = CirclePlayerInitialAnimation(self) | |
self.animator = Animator(self, [self.animation]) | |
self.death_sound = mixer.Sound('game/assets/soundtrack/ball_death_01.ogg') | |
self.particle_system = ParticleSystem(self, Particle, quant=5, period=0.07, | |
vel_min=30, vel_max=200, duration=0.5, | |
inherit_vel=True, inherit_vel_mult=-0.7) | |
self.particle_system.set_circ_gen(self.transform.position, self.circle_mesh.get_radius(), mode="directional", | |
direct_met=self.direct_met, ini_angle_met=self.ini_angle_met, | |
fin_angle_met=self.fin_angle_met) | |
self.particle_system.play() | |
def ini_angle_met(self): | |
return 0 + Vector2(1, 0).angle_to(self.physics.inst_velocity) | |
def fin_angle_met(self): | |
return 180 + Vector2(1, 0).angle_to(self.physics.inst_velocity) | |
def direct_met(self): | |
return Vector2(0, 1) | |
def update(self): | |
self.check_collision() | |
def check_collision(self): | |
(collided, game_obj) = self.circle_collider.on_collision() | |
if collided: | |
if issubclass(type(game_obj), BasicRectangle) and not self.is_invencible and game_obj.collidable: | |
self.main_scene_controller.game_over() | |
self.die() | |
elif issubclass(type(game_obj), Star): | |
GetPowerUpEffect(position=game_obj.transform.position, material=game_obj.material) | |
game_obj.die() | |
self.star_score_controller.get_star() | |
elif issubclass(type(game_obj), BasicCircle): | |
GetPowerUpEffect(position=game_obj.transform.position, material=game_obj.material) | |
game_obj.die() | |
self.invencible_power_up_controller.get_power_up() | |
def die(self): | |
if self.is_not_dying: | |
self.death_sound.play() | |
self.is_not_dying = False | |
self.particle_system.stop() | |
inst_vel = self.physics.inst_velocity | |
r = self.circle_mesh.get_radius() | |
for i in range(7): | |
DieEffect(self.transform.position, self.material, 1 + r*i/6, inst_vel=inst_vel) | |
self.material.alpha = 0 |
from game_engine.basic_objects.basic_rectangle import BasicRectangle | |
from game_engine.components.polygon_collider import PolygonCollider | |
class Rectangle(BasicRectangle): | |
def __init__(self, position, dimension, material, layer=0): | |
super(Rectangle, self).__init__(position, dimension, material, layer=layer) | |
self.dimension = dimension | |
self.polygon_collider = PolygonCollider(self) |
from game_engine.basic_objects.basic_rectangle import BasicRectangle | |
from game_engine.components.animation import Animation | |
from game_engine.components.animator import Animator | |
from game_engine.key_frame import KeyFrame | |
from pygame.math import Vector2 | |
from game_engine.engine import Engine | |
from game_engine.material import Material | |
from game_engine.game_object import GameObject | |
from game_engine.color import Color | |
from game_engine.time import Time | |
class ScreenFader(BasicRectangle): | |
def __init__(self, fade="in", fade_duration=0.7): | |
""" | |
Constructor, will decide whether to fade in or fade out | |
:param fade: string telling fade in or out | |
""" | |
self.fade = fade | |
self.fade_duration = fade_duration | |
if fade == "in": | |
alp = 255 | |
else: | |
alp = 0 | |
super().__init__(Vector2(0, 0), Vector2(Engine.screen_width, Engine.screen_height), | |
Material(Color.black, alpha=alp), 1000) | |
def start(self): | |
""" | |
Create a animation that fades the entire screen | |
Pass this animation to animator and play it | |
""" | |
key_frames = list() | |
if self.fade == "in": | |
key_frames.append(KeyFrame(0.0, alpha=255)) | |
key_frames.append(KeyFrame(self.fade_duration, alpha=0)) | |
else: | |
key_frames.append(KeyFrame(0.0, alpha=0)) | |
key_frames.append(KeyFrame(self.fade_duration, alpha=255)) | |
self.animation = Animation(self, key_frames, should_loop=False, unscaled="True") | |
self.animator = Animator(self, animation_list=[self.animation]) | |
self.animator.play() | |
self.creation_time = Time.now() | |
def update(self): | |
""" | |
Will destroy the animation after finished it | |
""" | |
if Time.now() - self.creation_time > self.fade_duration*2: | |
GameObject.destroy(self) |
from game_engine.components.polygon_mesh import PolygonMesh | |
from game_engine.components.circle_mesh import CircleMesh | |
from game_engine.components.circle_collider import CircleCollider | |
from game_engine.components.particle_system import ParticleSystem | |
from game.game_objects.mesh_objects.particle import Particle | |
from game.animations.power_up_fade_out import PowerUpFadeOut | |
from game_engine.components.animator import Animator | |
from game.animations.litter_bounce import LitterBounce | |
from game_engine.collider import Collider | |
from game_engine.game_object import GameObject | |
from game_engine.time import Time | |
from game_engine.geometry import Geometry | |
from pygame.math import Vector2 | |
import math | |
class Star(GameObject): | |
def __init__(self, center_position, radius, material): | |
""" | |
Add the polygon mesh component | |
Call the superclass constructor passing basic game_object parameters | |
""" | |
super(Star, self).__init__(center_position, 0, Vector2(1, 1), 2) | |
self.material = material | |
self.circle_collider = CircleCollider(self) | |
self.circle_mesh = CircleMesh(self, radius) | |
self.polygon_mesh = PolygonMesh(self) | |
self.should_die = False | |
def _get_points(self): | |
point_list = list() | |
angle = math.pi / 2 + math.pi | |
for i in range(5): | |
point_list.append(Vector2(self.transform.position.x + self.circle_mesh.get_radius() * math.cos(angle), | |
self.transform.position.y + self.circle_mesh.get_radius() * math.sin(angle))) | |
angle = angle + 36 * math.pi / 180 | |
point_list.append(Vector2(self.transform.position.x + self.circle_mesh.get_radius()/2 * math.cos(angle), | |
self.transform.position.y + self.circle_mesh.get_radius()/2 * math.sin(angle))) | |
angle = angle + 36 * math.pi / 180 | |
for i in range(5): | |
point = point_list[i] | |
point_list[i] = Geometry.rotate_point(Vector2(self.transform.position.x, self.transform.position.y), | |
point, self.transform.rotation) | |
return point_list | |
def fall(self, distance, angular_distance): | |
self.transform.translate(Vector2(self.transform.position.x, self.transform.position.y + distance)) | |
self.transform.rotate(angular_distance) | |
def die(self): | |
# TODO: change how collider works: dont use the collider list | |
Collider.remove(self) | |
self.circle_collider = None | |
self.circle_collider = None | |
self.animator.play_next_animation() | |
self.should_die = True | |
self.die_time=Time.now() | |
def start(self): | |
self.particle_system = ParticleSystem(self, Particle, quant=1, period=0.15, vel_min=30, vel_max=60, | |
duration=0.8, gravity=98, inherit_vel=True) | |
self.particle_system.set_circ_gen(self.transform.position, self.circle_mesh.get_radius(), mode="radial", | |
direct_met=self.direct_met, ini_angle_met=self.ini_angle_met, | |
fin_angle_met=self.fin_angle_met) | |
self.particle_system.play() | |
self.animation = LitterBounce(self) | |
self.animator = Animator(self, [self.animation, PowerUpFadeOut(self)]) | |
self.animator.play() | |
def update(self): | |
if self.should_die: | |
if Time.now() - self.die_time > 0.4: | |
self.destroy_me() | |
def ini_angle_met(self): | |
return 150 | |
def fin_angle_met(self): | |
return 390 | |
def direct_met(self): | |
return Vector2(0, -1) |
from game_engine.basic_objects.basic_circle import BasicCircle | |
from game_engine.components.circle_collider import CircleCollider | |
class StarCircle(BasicCircle): | |
def __init__(self, position, radius, material): | |
super(StarCircle, self).__init__(position, radius, material, layer = -1) | |
self.circle_collider = CircleCollider(self) | |
def start(self): | |
pass | |
def update(self): | |
pass |
from game_engine.scene import Scene | |
from game.game_objects.controllers.main_scene_controller import MainSceneController | |
class MainScene(Scene): | |
def __init__(self): | |
""" | |
Create the list of mesh_objects and call the superclass constructor passing the list | |
""" | |
self.init_game_objects_controllers_reference_list = [MainSceneController] | |
super(MainScene, self).__init__(self.init_game_objects_controllers_reference_list) |
from game_engine.scene import Scene | |
from game.game_objects.controllers.retry_controller import RetryController | |
class RetryScene(Scene): | |
def __init__(self): | |
""" | |
Create the list of mesh_objects and call the superclass constructor passing the list | |
""" | |
self.init_game_objects_controllers_reference_list = [RetryController] | |
super(RetryScene, self).__init__(self.init_game_objects_controllers_reference_list) |
class Constants: | |
screen_width = 360 | |
screen_height = 640 | |
circCenter_x = 180 | |
circCenter_y = 520 | |
circRadius = 95 | |
current_score = 0 |
from game.scripts.scenes_controller_script import ScenesControllerScript | |
from game.scripts.constants import Constants | |
class GameSettings: | |
game_name = "Balance" | |
screen_width = Constants.screen_width | |
screen_height = Constants.screen_height | |
scenes_list = ScenesControllerScript.get_scenes() |
class Global: | |
current_score = 0 | |
difficulty = 0 |
# class Material: | |
# | |
# def __init__(self, color, alpha=None): | |
# self.color = color | |
# self.alpha = alpha |
from game.scenes.main_scene import MainScene | |
from game.scenes.main_menu import MainMenu | |
from game.scenes.retry_scene import RetryScene | |
class ScenesControllerScript: | |
@classmethod | |
def get_scenes(cls): | |
""" | |
:return: the scene list with the references to the scenes classes | |
""" | |
return [MainMenu, MainScene, RetryScene] |
[console_scripts] | |
f2py = numpy.f2py.f2py2e:main | |
f2py3 = numpy.f2py.f2py2e:main | |
f2py3.7 = numpy.f2py.f2py2e:main | |
pip |
Copyright (c) 2005-2019, NumPy Developers. | |
All rights reserved. | |
Redistribution and use in source and binary forms, with or without | |
modification, are permitted provided that the following conditions are | |
met: | |
* Redistributions of source code must retain the above copyright | |
notice, this list of conditions and the following disclaimer. | |
* Redistributions in binary form must reproduce the above | |
copyright notice, this list of conditions and the following | |
disclaimer in the documentation and/or other materials provided | |
with the distribution. | |
* Neither the name of the NumPy Developers nor the names of any | |
contributors may be used to endorse or promote products derived | |
from this software without specific prior written permission. | |
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
The NumPy repository and source distributions bundle several libraries that are | |
compatibly licensed. We list these here. | |
Name: Numpydoc | |
Files: doc/sphinxext/numpydoc/* | |
License: 2-clause BSD | |
For details, see doc/sphinxext/LICENSE.txt | |
Name: scipy-sphinx-theme | |
Files: doc/scipy-sphinx-theme/* | |
License: 3-clause BSD, PSF and Apache 2.0 | |
For details, see doc/scipy-sphinx-theme/LICENSE.txt | |
Name: lapack-lite | |
Files: numpy/linalg/lapack_lite/* | |
License: 3-clause BSD | |
For details, see numpy/linalg/lapack_lite/LICENSE.txt | |
Name: tempita | |
Files: tools/npy_tempita/* | |
License: BSD derived | |
For details, see tools/npy_tempita/license.txt | |
Name: dragon4 | |
Files: numpy/core/src/multiarray/dragon4.c | |
License: One of a kind | |
For license text, see numpy/core/src/multiarray/dragon4.c | |
---- | |
This binary distribution of NumPy also bundles the following software: | |
Name: GCC runtime library | |
Files: .dylibs/* | |
Description: dynamically linked to files compiled with gcc | |
Availability: https://gcc.gnu.org/viewcvs/gcc/ | |
License: GPLv3 + runtime exception | |
Copyright (C) 2002-2017 Free Software Foundation, Inc. | |
Libgfortran is free software; you can redistribute it and/or modify | |
it under the terms of the GNU General Public License as published by | |
the Free Software Foundation; either version 3, or (at your option) | |
any later version. | |
Libgfortran is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
Under Section 7 of GPL version 3, you are granted additional | |
permissions described in the GCC Runtime Library Exception, version | |
3.1, as published by the Free Software Foundation. | |
You should have received a copy of the GNU General Public License and | |
a copy of the GCC Runtime Library Exception along with this program; | |
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
<http://www.gnu.org/licenses/>. | |
---- | |
Full text of license texts referred to above follows (that they are | |
listed below does not necessarily imply the conditions apply to the | |
present binary release): | |
---- | |
GCC RUNTIME LIBRARY EXCEPTION | |
Version 3.1, 31 March 2009 | |
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/> | |
Everyone is permitted to copy and distribute verbatim copies of this | |
license document, but changing it is not allowed. | |
This GCC Runtime Library Exception ("Exception") is an additional | |
permission under section 7 of the GNU General Public License, version | |
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that | |
bears a notice placed by the copyright holder of the file stating that | |
the file is governed by GPLv3 along with this Exception. | |
When you use GCC to compile a program, GCC may combine portions of | |
certain GCC header files and runtime libraries with the compiled | |
program. The purpose of this Exception is to allow compilation of | |
non-GPL (including proprietary) programs to use, in this way, the | |
header files and runtime libraries covered by this Exception. | |
0. Definitions. | |
A file is an "Independent Module" if it either requires the Runtime | |
Library for execution after a Compilation Process, or makes use of an | |
interface provided by the Runtime Library, but is not otherwise based | |
on the Runtime Library. | |
"GCC" means a version of the GNU Compiler Collection, with or without | |
modifications, governed by version 3 (or a specified later version) of | |
the GNU General Public License (GPL) with the option of using any | |
subsequent versions published by the FSF. | |
"GPL-compatible Software" is software whose conditions of propagation, | |
modification and use would permit combination with GCC in accord with | |
the license of GCC. | |
"Target Code" refers to output from any compiler for a real or virtual | |
target processor architecture, in executable form or suitable for | |
input to an assembler, loader, linker and/or execution | |
phase. Notwithstanding that, Target Code does not include data in any | |
format that is used as a compiler intermediate representation, or used | |
for producing a compiler intermediate representation. | |
The "Compilation Process" transforms code entirely represented in | |
non-intermediate languages designed for human-written code, and/or in | |
Java Virtual Machine byte code, into Target Code. Thus, for example, | |
use of source code generators and preprocessors need not be considered | |
part of the Compilation Process, since the Compilation Process can be | |
understood as starting with the output of the generators or | |
preprocessors. | |
A Compilation Process is "Eligible" if it is done using GCC, alone or | |
with other GPL-compatible software, or if it is done without using any | |
work based on GCC. For example, using non-GPL-compatible Software to | |
optimize any GCC intermediate representations would not qualify as an | |
Eligible Compilation Process. | |
1. Grant of Additional Permission. | |
You have permission to propagate a work of Target Code formed by | |
combining the Runtime Library with Independent Modules, even if such | |
propagation would otherwise violate the terms of GPLv3, provided that | |
all Target Code was generated by Eligible Compilation Processes. You | |
may then convey such a combination under terms of your choice, | |
consistent with the licensing of the Independent Modules. | |
2. No Weakening of GCC Copyleft. | |
The availability of this Exception does not imply any general | |
presumption that third-party software is unaffected by the copyleft | |
requirements of the license of GCC. | |
---- | |
GNU GENERAL PUBLIC LICENSE | |
Version 3, 29 June 2007 | |
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |
Everyone is permitted to copy and distribute verbatim copies | |
of this license document, but changing it is not allowed. | |
Preamble | |
The GNU General Public License is a free, copyleft license for | |
software and other kinds of works. | |
The licenses for most software and other practical works are designed | |
to take away your freedom to share and change the works. By contrast, | |
the GNU General Public License is intended to guarantee your freedom to | |
share and change all versions of a program--to make sure it remains free | |
software for all its users. We, the Free Software Foundation, use the | |
GNU General Public License for most of our software; it applies also to | |
any other work released this way by its authors. You can apply it to | |
your programs, too. | |
When we speak of free software, we are referring to freedom, not | |
price. Our General Public Licenses are designed to make sure that you | |
have the freedom to distribute copies of free software (and charge for | |
them if you wish), that you receive source code or can get it if you | |
want it, that you can change the software or use pieces of it in new | |
free programs, and that you know you can do these things. | |
To protect your rights, we need to prevent others from denying you | |
these rights or asking you to surrender the rights. Therefore, you have | |
certain responsibilities if you distribute copies of the software, or if | |
you modify it: responsibilities to respect the freedom of others. | |
For example, if you distribute copies of such a program, whether | |
gratis or for a fee, you must pass on to the recipients the same | |
freedoms that you received. You must make sure that they, too, receive | |
or can get the source code. And you must show them these terms so they | |
know their rights. | |
Developers that use the GNU GPL protect your rights with two steps: | |
(1) assert copyright on the software, and (2) offer you this License | |
giving you legal permission to copy, distribute and/or modify it. | |
For the developers' and authors' protection, the GPL clearly explains | |
that there is no warranty for this free software. For both users' and | |
authors' sake, the GPL requires that modified versions be marked as | |
changed, so that their problems will not be attributed erroneously to | |
authors of previous versions. | |
Some devices are designed to deny users access to install or run | |
modified versions of the software inside them, although the manufacturer | |
can do so. This is fundamentally incompatible with the aim of | |
protecting users' freedom to change the software. The systematic | |
pattern of such abuse occurs in the area of products for individuals to | |
use, which is precisely where it is most unacceptable. Therefore, we | |
have designed this version of the GPL to prohibit the practice for those | |
products. If such problems arise substantially in other domains, we | |
stand ready to extend this provision to those domains in future versions | |
of the GPL, as needed to protect the freedom of users. | |
Finally, every program is threatened constantly by software patents. | |
States should not allow patents to restrict development and use of | |
software on general-purpose computers, but in those that do, we wish to | |
avoid the special danger that patents applied to a free program could | |
make it effectively proprietary. To prevent this, the GPL assures that | |
patents cannot be used to render the program non-free. | |
The precise terms and conditions for copying, distribution and | |
modification follow. | |
TERMS AND CONDITIONS | |
0. Definitions. | |
"This License" refers to version 3 of the GNU General Public License. | |
"Copyright" also means copyright-like laws that apply to other kinds of | |
works, such as semiconductor masks. | |
"The Program" refers to any copyrightable work licensed under this | |
License. Each licensee is addressed as "you". "Licensees" and | |
"recipients" may be individuals or organizations. | |
To "modify" a work means to copy from or adapt all or part of the work | |
in a fashion requiring copyright permission, other than the making of an | |
exact copy. The resulting work is called a "modified version" of the | |
earlier work or a work "based on" the earlier work. | |
A "covered work" means either the unmodified Program or a work based | |
on the Program. | |
To "propagate" a work means to do anything with it that, without | |
permission, would make you directly or secondarily liable for | |
infringement under applicable copyright law, except executing it on a | |
computer or modifying a private copy. Propagation includes copying, | |
distribution (with or without modification), making available to the | |
public, and in some countries other activities as well. | |
To "convey" a work means any kind of propagation that enables other | |
parties to make or receive copies. Mere interaction with a user through | |
a computer network, with no transfer of a copy, is not conveying. | |
An interactive user interface displays "Appropriate Legal Notices" | |
to the extent that it includes a convenient and prominently visible | |
feature that (1) displays an appropriate copyright notice, and (2) | |
tells the user that there is no warranty for the work (except to the | |
extent that warranties are provided), that licensees may convey the | |
work under this License, and how to view a copy of this License. If | |
the interface presents a list of user commands or options, such as a | |
menu, a prominent item in the list meets this criterion. | |
1. Source Code. | |
The "source code" for a work means the preferred form of the work | |
for making modifications to it. "Object code" means any non-source | |
form of a work. | |
A "Standard Interface" means an interface that either is an official | |
standard defined by a recognized standards body, or, in the case of | |
interfaces specified for a particular programming language, one that | |
is widely used among developers working in that language. | |
The "System Libraries" of an executable work include anything, other | |
than the work as a whole, that (a) is included in the normal form of | |
packaging a Major Component, but which is not part of that Major | |
Component, and (b) serves only to enable use of the work with that | |
Major Component, or to implement a Standard Interface for which an | |
implementation is available to the public in source code form. A | |
"Major Component", in this context, means a major essential component | |
(kernel, window system, and so on) of the specific operating system | |
(if any) on which the executable work runs, or a compiler used to | |
produce the work, or an object code interpreter used to run it. | |
The "Corresponding Source" for a work in object code form means all | |
the source code needed to generate, install, and (for an executable | |
work) run the object code and to modify the work, including scripts to | |
control those activities. However, it does not include the work's | |
System Libraries, or general-purpose tools or generally available free | |
programs which are used unmodified in performing those activities but | |
which are not part of the work. For example, Corresponding Source | |
includes interface definition files associated with source files for | |
the work, and the source code for shared libraries and dynamically | |
linked subprograms that the work is specifically designed to require, | |
such as by intimate data communication or control flow between those | |
subprograms and other parts of the work. | |
The Corresponding Source need not include anything that users | |
can regenerate automatically from other parts of the Corresponding | |
Source. | |
The Corresponding Source for a work in source code form is that | |
same work. | |
2. Basic Permissions. | |
All rights granted under this License are granted for the term of | |
copyright on the Program, and are irrevocable provided the stated | |
conditions are met. This License explicitly affirms your unlimited | |
permission to run the unmodified Program. The output from running a | |
covered work is covered by this License only if the output, given its | |
content, constitutes a covered work. This License acknowledges your | |
rights of fair use or other equivalent, as provided by copyright law. | |
You may make, run and propagate covered works that you do not | |
convey, without conditions so long as your license otherwise remains | |
in force. You may convey covered works to others for the sole purpose | |
of having them make modifications exclusively for you, or provide you | |
with facilities for running those works, provided that you comply with | |
the terms of this License in conveying all material for which you do | |
not control copyright. Those thus making or running the covered works | |
for you must do so exclusively on your behalf, under your direction | |
and control, on terms that prohibit them from making any copies of | |
your copyrighted material outside their relationship with you. | |
Conveying under any other circumstances is permitted solely under | |
the conditions stated below. Sublicensing is not allowed; section 10 | |
makes it unnecessary. | |
3. Protecting Users' Legal Rights From Anti-Circumvention Law. | |
No covered work shall be deemed part of an effective technological | |
measure under any applicable law fulfilling obligations under article | |
11 of the WIPO copyright treaty adopted on 20 December 1996, or | |
similar laws prohibiting or restricting circumvention of such | |
measures. | |
When you convey a covered work, you waive any legal power to forbid | |
circumvention of technological measures to the extent such circumvention | |
is effected by exercising rights under this License with respect to | |
the covered work, and you disclaim any intention to limit operation or | |
modification of the work as a means of enforcing, against the work's | |
users, your or third parties' legal rights to forbid circumvention of | |
technological measures. | |
4. Conveying Verbatim Copies. | |
You may convey verbatim copies of the Program's source code as you | |
receive it, in any medium, provided that you conspicuously and | |
appropriately publish on each copy an appropriate copyright notice; | |
keep intact all notices stating that this License and any | |
non-permissive terms added in accord with section 7 apply to the code; | |
keep intact all notices of the absence of any warranty; and give all | |
recipients a copy of this License along with the Program. | |
You may charge any price or no price for each copy that you convey, | |
and you may offer support or warranty protection for a fee. | |
5. Conveying Modified Source Versions. | |
You may convey a work based on the Program, or the modifications to | |
produce it from the Program, in the form of source code under the | |
terms of section 4, provided that you also meet all of these conditions: | |
a) The work must carry prominent notices stating that you modified | |
it, and giving a relevant date. | |
b) The work must carry prominent notices stating that it is | |
released under this License and any conditions added under section | |
7. This requirement modifies the requirement in section 4 to | |
"keep intact all notices". | |
c) You must license the entire work, as a whole, under this | |
License to anyone who comes into possession of a copy. This | |
License will therefore apply, along with any applicable section 7 | |
additional terms, to the whole of the work, and all its parts, | |
regardless of how they are packaged. This License gives no | |
permission to license the work in any other way, but it does not | |
invalidate such permission if you have separately received it. | |
d) If the work has interactive user interfaces, each must display | |
Appropriate Legal Notices; however, if the Program has interactive | |
interfaces that do not display Appropriate Legal Notices, your | |
work need not make them do so. | |
A compilation of a covered work with other separate and independent | |
works, which are not by their nature extensions of the covered work, | |
and which are not combined with it such as to form a larger program, | |
in or on a volume of a storage or distribution medium, is called an | |
"aggregate" if the compilation and its resulting copyright are not | |
used to limit the access or legal rights of the compilation's users | |
beyond what the individual works permit. Inclusion of a covered work | |
in an aggregate does not cause this License to apply to the other | |
parts of the aggregate. | |
6. Conveying Non-Source Forms. | |
You may convey a covered work in object code form under the terms | |
of sections 4 and 5, provided that you also convey the | |
machine-readable Corresponding Source under the terms of this License, | |
in one of these ways: | |
a) Convey the object code in, or embodied in, a physical product | |
(including a physical distribution medium), accompanied by the | |
Corresponding Source fixed on a durable physical medium | |
customarily used for software interchange. | |
b) Convey the object code in, or embodied in, a physical product | |
(including a physical distribution medium), accompanied by a | |
written offer, valid for at least three years and valid for as | |
long as you offer spare parts or customer support for that product | |
model, to give anyone who possesses the object code either (1) a | |
copy of the Corresponding Source for all the software in the | |
product that is covered by this License, on a durable physical | |
medium customarily used for software interchange, for a price no | |
more than your reasonable cost of physically performing this | |
conveying of source, or (2) access to copy the | |
Corresponding Source from a network server at no charge. | |
c) Convey individual copies of the object code with a copy of the | |
written offer to provide the Corresponding Source. This | |
alternative is allowed only occasionally and noncommercially, and | |
only if you received the object code with such an offer, in accord | |
with subsection 6b. | |
d) Convey the object code by offering access from a designated | |
place (gratis or for a charge), and offer equivalent access to the | |
Corresponding Source in the same way through the same place at no | |
further charge. You need not require recipients to copy the | |
Corresponding Source along with the object code. If the place to | |
copy the object code is a network server, the Corresponding Source | |
may be on a different server (operated by you or a third party) | |
that supports equivalent copying facilities, provided you maintain | |
clear directions next to the object code saying where to find the | |
Corresponding Source. Regardless of what server hosts the | |
Corresponding Source, you remain obligated to ensure that it is | |
available for as long as needed to satisfy these requirements. | |
e) Convey the object code using peer-to-peer transmission, provided | |
you inform other peers where the object code and Corresponding | |
Source of the work are being offered to the general public at no | |
charge under subsection 6d. | |
A separable portion of the object code, whose source code is excluded | |
from the Corresponding Source as a System Library, need not be | |
included in conveying the object code work. | |
A "User Product" is either (1) a "consumer product", which means any | |
tangible personal property which is normally used for personal, family, | |
or household purposes, or (2) anything designed or sold for incorporation | |
into a dwelling. In determining whether a product is a consumer product, | |
doubtful cases shall be resolved in favor of coverage. For a particular | |
product received by a particular user, "normally used" refers to a | |
typical or common use of that class of product, regardless of the status | |
of the particular user or of the way in which the particular user | |
actually uses, or expects or is expected to use, the product. A product | |
is a consumer product regardless of whether the product has substantial | |
commercial, industrial or non-consumer uses, unless such uses represent | |
the only significant mode of use of the product. | |
"Installation Information" for a User Product means any methods, | |
procedures, authorization keys, or other information required to install | |
and execute modified versions of a covered work in that User Product from | |
a modified version of its Corresponding Source. The information must | |
suffice to ensure that the continued functioning of the modified object | |
code is in no case prevented or interfered with solely because | |
modification has been made. | |
If you convey an object code work under this section in, or with, or | |
specifically for use in, a User Product, and the conveying occurs as | |
part of a transaction in which the right of possession and use of the | |
User Product is transferred to the recipient in perpetuity or for a | |
fixed term (regardless of how the transaction is characterized), the | |
Corresponding Source conveyed under this section must be accompanied | |
by the Installation Information. But this requirement does not apply | |
if neither you nor any third party retains the ability to install | |
modified object code on the User Product (for example, the work has | |
been installed in ROM). | |
The requirement to provide Installation Information does not include a | |
requirement to continue to provide support service, warranty, or updates | |
for a work that has been modified or installed by the recipient, or for | |
the User Product in which it has been modified or installed. Access to a | |
network may be denied when the modification itself materially and | |
adversely affects the operation of the network or violates the rules and | |
protocols for communication across the network. | |
Corresponding Source conveyed, and Installation Information provided, | |
in accord with this section must be in a format that is publicly | |
documented (and with an implementation available to the public in | |
source code form), and must require no special password or key for | |
unpacking, reading or copying. | |
7. Additional Terms. | |
"Additional permissions" are terms that supplement the terms of this | |
License by making exceptions from one or more of its conditions. | |
Additional permissions that are applicable to the entire Program shall | |
be treated as though they were included in this License, to the extent | |
that they are valid under applicable law. If additional permissions | |
apply only to part of the Program, that part may be used separately | |
under those permissions, but the entire Program remains governed by | |
this License without regard to the additional permissions. | |
When you convey a copy of a covered work, you may at your option | |
remove any additional permissions from that copy, or from any part of | |
it. (Additional permissions may be written to require their own | |
removal in certain cases when you modify the work.) You may place | |
additional permissions on material, added by you to a covered work, | |
for which you have or can give appropriate copyright permission. | |
Notwithstanding any other provision of this License, for material you | |
add to a covered work, you may (if authorized by the copyright holders of | |
that material) supplement the terms of this License with terms: | |
a) Disclaiming warranty or limiting liability differently from the | |
terms of sections 15 and 16 of this License; or | |
b) Requiring preservation of specified reasonable legal notices or | |
author attributions in that material or in the Appropriate Legal | |
Notices displayed by works containing it; or | |
c) Prohibiting misrepresentation of the origin of that material, or | |
requiring that modified versions of such material be marked in | |
reasonable ways as different from the original version; or | |
d) Limiting the use for publicity purposes of names of licensors or | |
authors of the material; or | |
e) Declining to grant rights under trademark law for use of some | |
trade names, trademarks, or service marks; or | |
f) Requiring indemnification of licensors and authors of that | |
material by anyone who conveys the material (or modified versions of | |
it) with contractual assumptions of liability to the recipient, for | |
any liability that these contractual assumptions directly impose on | |
those licensors and authors. | |
All other non-permissive additional terms are considered "further | |
restrictions" within the meaning of section 10. If the Program as you | |
received it, or any part of it, contains a notice stating that it is | |
governed by this License along with a term that is a further | |
restriction, you may remove that term. If a license document contains | |
a further restriction but permits relicensing or conveying under this | |
License, you may add to a covered work material governed by the terms | |
of that license document, provided that the further restriction does | |
not survive such relicensing or conveying. | |
If you add terms to a covered work in accord with this section, you | |
must place, in the relevant source files, a statement of the | |
additional terms that apply to those files, or a notice indicating | |
where to find the applicable terms. | |
Additional terms, permissive or non-permissive, may be stated in the | |
form of a separately written license, or stated as exceptions; | |
the above requirements apply either way. | |
8. Termination. | |
You may not propagate or modify a covered work except as expressly | |
provided under this License. Any attempt otherwise to propagate or | |
modify it is void, and will automatically terminate your rights under | |
this License (including any patent licenses granted under the third | |
paragraph of section 11). | |
However, if you cease all violation of this License, then your | |
license from a particular copyright holder is reinstated (a) | |
provisionally, unless and until the copyright holder explicitly and | |
finally terminates your license, and (b) permanently, if the copyright | |
holder fails to notify you of the violation by some reasonable means | |
prior to 60 days after the cessation. | |
Moreover, your license from a particular copyright holder is | |
reinstated permanently if the copyright holder notifies you of the | |
violation by some reasonable means, this is the first time you have | |
received notice of violation of this License (for any work) from that | |
copyright holder, and you cure the violation prior to 30 days after | |
your receipt of the notice. | |
Termination of your rights under this section does not terminate the | |
licenses of parties who have received copies or rights from you under | |
this License. If your rights have been terminated and not permanently | |
reinstated, you do not qualify to receive new licenses for the same | |
material under section 10. | |
9. Acceptance Not Required for Having Copies. | |
You are not required to accept this License in order to receive or | |
run a copy of the Program. Ancillary propagation of a covered work | |
occurring solely as a consequence of using peer-to-peer transmission | |
to receive a copy likewise does not require acceptance. However, | |
nothing other than this License grants you permission to propagate or | |
modify any covered work. These actions infringe copyright if you do | |
not accept this License. Therefore, by modifying or propagating a | |
covered work, you indicate your acceptance of this License to do so. | |
10. Automatic Licensing of Downstream Recipients. | |
Each time you convey a covered work, the recipient automatically | |
receives a license from the original licensors, to run, modify and | |
propagate that work, subject to this License. You are not responsible | |
for enforcing compliance by third parties with this License. | |
An "entity transaction" is a transaction transferring control of an | |
organization, or substantially all assets of one, or subdividing an | |
organization, or merging organizations. If propagation of a covered | |
work results from an entity transaction, each party to that | |
transaction who receives a copy of the work also receives whatever | |
licenses to the work the party's predecessor in interest had or could | |
give under the previous paragraph, plus a right to possession of the | |
Corresponding Source of the work from the predecessor in interest, if | |
the predecessor has it or can get it with reasonable efforts. | |
You may not impose any further restrictions on the exercise of the | |
rights granted or affirmed under this License. For example, you may | |
not impose a license fee, royalty, or other charge for exercise of | |
rights granted under this License, and you may not initiate litigation | |
(including a cross-claim or counterclaim in a lawsuit) alleging that | |
any patent claim is infringed by making, using, selling, offering for | |
sale, or importing the Program or any portion of it. | |
11. Patents. | |
A "contributor" is a copyright holder who authorizes use under this | |
License of the Program or a work on which the Program is based. The | |
work thus licensed is called the contributor's "contributor version". | |
A contributor's "essential patent claims" are all patent claims | |
owned or controlled by the contributor, whether already acquired or | |
hereafter acquired, that would be infringed by some manner, permitted | |
by this License, of making, using, or selling its contributor version, | |
but do not include claims that would be infringed only as a | |
consequence of further modification of the contributor version. For | |
purposes of this definition, "control" includes the right to grant | |
patent sublicenses in a manner consistent with the requirements of | |
this License. | |
Each contributor grants you a non-exclusive, worldwide, royalty-free | |
patent license under the contributor's essential patent claims, to | |
make, use, sell, offer for sale, import and otherwise run, modify and | |
propagate the contents of its contributor version. | |
In the following three paragraphs, a "patent license" is any express | |
agreement or commitment, however denominated, not to enforce a patent | |
(such as an express permission to practice a patent or covenant not to | |
sue for patent infringement). To "grant" such a patent license to a | |
party means to make such an agreement or commitment not to enforce a | |
patent against the party. | |
If you convey a covered work, knowingly relying on a patent license, | |
and the Corresponding Source of the work is not available for anyone | |
to copy, free of charge and under the terms of this License, through a | |
publicly available network server or other readily accessible means, | |
then you must either (1) cause the Corresponding Source to be so | |
available, or (2) arrange to deprive yourself of the benefit of the | |
patent license for this particular work, or (3) arrange, in a manner | |
consistent with the requirements of this License, to extend the patent | |
license to downstream recipients. "Knowingly relying" means you have | |
actual knowledge that, but for the patent license, your conveying the | |
covered work in a country, or your recipient's use of the covered work | |
in a country, would infringe one or more identifiable patents in that | |
country that you have reason to believe are valid. | |
If, pursuant to or in connection with a single transaction or | |
arrangement, you convey, or propagate by procuring conveyance of, a | |
covered work, and grant a patent license to some of the parties | |
receiving the covered work authorizing them to use, propagate, modify | |
or convey a specific copy of the covered work, then the patent license | |
you grant is automatically extended to all recipients of the covered | |
work and works based on it. | |
A patent license is "discriminatory" if it does not include within | |
the scope of its coverage, prohibits the exercise of, or is | |
conditioned on the non-exercise of one or more of the rights that are | |
specifically granted under this License. You may not convey a covered | |
work if you are a party to an arrangement with a third party that is | |
in the business of distributing software, under which you make payment | |
to the third party based on the extent of your activity of conveying | |
the work, and under which the third party grants, to any of the | |
parties who would receive the covered work from you, a discriminatory | |
patent license (a) in connection with copies of the covered work | |
conveyed by you (or copies made from those copies), or (b) primarily | |
for and in connection with specific products or compilations that | |
contain the covered work, unless you entered into that arrangement, | |
or that patent license was granted, prior to 28 March 2007. | |
Nothing in this License shall be construed as excluding or limiting | |
any implied license or other defenses to infringement that may | |
otherwise be available to you under applicable patent law. | |
12. No Surrender of Others' Freedom. | |
If conditions are imposed on you (whether by court order, agreement or | |
otherwise) that contradict the conditions of this License, they do not | |
excuse you from the conditions of this License. If you cannot convey a | |
covered work so as to satisfy simultaneously your obligations under this | |
License and any other pertinent obligations, then as a consequence you may | |
not convey it at all. For example, if you agree to terms that obligate you | |
to collect a royalty for further conveying from those to whom you convey | |
the Program, the only way you could satisfy both those terms and this | |
License would be to refrain entirely from conveying the Program. | |
13. Use with the GNU Affero General Public License. | |
Notwithstanding any other provision of this License, you have | |
permission to link or combine any covered work with a work licensed | |
under version 3 of the GNU Affero General Public License into a single | |
combined work, and to convey the resulting work. The terms of this | |
License will continue to apply to the part which is the covered work, | |
but the special requirements of the GNU Affero General Public License, | |
section 13, concerning interaction through a network will apply to the | |
combination as such. | |
14. Revised Versions of this License. | |
The Free Software Foundation may publish revised and/or new versions of | |
the GNU General Public License from time to time. Such new versions will | |
be similar in spirit to the present version, but may differ in detail to | |
address new problems or concerns. | |
Each version is given a distinguishing version number. If the | |
Program specifies that a certain numbered version of the GNU General | |
Public License "or any later version" applies to it, you have the | |
option of following the terms and conditions either of that numbered | |
version or of any later version published by the Free Software | |
Foundation. If the Program does not specify a version number of the | |
GNU General Public License, you may choose any version ever published | |
by the Free Software Foundation. | |
If the Program specifies that a proxy can decide which future | |
versions of the GNU General Public License can be used, that proxy's | |
public statement of acceptance of a version permanently authorizes you | |
to choose that version for the Program. | |
Later license versions may give you additional or different | |
permissions. However, no additional obligations are imposed on any | |
author or copyright holder as a result of your choosing to follow a | |
later version. | |
15. Disclaimer of Warranty. | |
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | |
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | |
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY | |
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, | |
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM | |
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF | |
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | |
16. Limitation of Liability. | |
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | |
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS | |
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY | |
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE | |
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF | |
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD | |
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), | |
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF | |
SUCH DAMAGES. | |
17. Interpretation of Sections 15 and 16. | |
If the disclaimer of warranty and limitation of liability provided | |
above cannot be given local legal effect according to their terms, | |
reviewing courts shall apply local law that most closely approximates | |
an absolute waiver of all civil liability in connection with the | |
Program, unless a warranty or assumption of liability accompanies a | |
copy of the Program in return for a fee. | |
END OF TERMS AND CONDITIONS | |
How to Apply These Terms to Your New Programs | |
If you develop a new program, and you want it to be of the greatest | |
possible use to the public, the best way to achieve this is to make it | |
free software which everyone can redistribute and change under these terms. | |
To do so, attach the following notices to the program. It is safest | |
to attach them to the start of each source file to most effectively | |
state the exclusion of warranty; and each file should have at least | |
the "copyright" line and a pointer to where the full notice is found. | |
<one line to give the program's name and a brief idea of what it does.> | |
Copyright (C) <year> <name of author> | |
This program is free software: you can redistribute it and/or modify | |
it under the terms of the GNU General Public License as published by | |
the Free Software Foundation, either version 3 of the License, or | |
(at your option) any later version. | |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
You should have received a copy of the GNU General Public License | |
along with this program. If not, see <http://www.gnu.org/licenses/>. | |
Also add information on how to contact you by electronic and paper mail. | |
If the program does terminal interaction, make it output a short | |
notice like this when it starts in an interactive mode: | |
<program> Copyright (C) <year> <name of author> | |
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. | |
This is free software, and you are welcome to redistribute it | |
under certain conditions; type `show c' for details. | |
The hypothetical commands `show w' and `show c' should show the appropriate | |
parts of the General Public License. Of course, your program's commands | |
might be different; for a GUI interface, you would use an "about box". | |
You should also get your employer (if you work as a programmer) or school, | |
if any, to sign a "copyright disclaimer" for the program, if necessary. | |
For more information on this, and how to apply and follow the GNU GPL, see | |
<http://www.gnu.org/licenses/>. | |
The GNU General Public License does not permit incorporating your program | |
into proprietary programs. If your program is a subroutine library, you | |
may consider it more useful to permit linking proprietary applications with | |
the library. If this is what you want to do, use the GNU Lesser General | |
Public License instead of this License. But first, please read | |
<http://www.gnu.org/philosophy/why-not-lgpl.html>. |
Metadata-Version: 2.1 | |
Name: numpy | |
Version: 1.16.4 | |
Summary: NumPy is the fundamental package for array computing with Python. | |
Home-page: https://www.numpy.org | |
Author: Travis E. Oliphant et al. | |
Maintainer: NumPy Developers | |
Maintainer-email: numpy-discussion@python.org | |
License: BSD | |
Download-URL: https://pypi.python.org/pypi/numpy | |
Platform: Windows | |
Platform: Linux | |
Platform: Solaris | |
Platform: Mac OS-X | |
Platform: Unix | |
Classifier: Development Status :: 5 - Production/Stable | |
Classifier: Intended Audience :: Science/Research | |
Classifier: Intended Audience :: Developers | |
Classifier: License :: OSI Approved | |
Classifier: Programming Language :: C | |
Classifier: Programming Language :: Python | |
Classifier: Programming Language :: Python :: 2 | |
Classifier: Programming Language :: Python :: 2.7 | |
Classifier: Programming Language :: Python :: 3 | |
Classifier: Programming Language :: Python :: 3.4 | |
Classifier: Programming Language :: Python :: 3.5 | |
Classifier: Programming Language :: Python :: 3.6 | |
Classifier: Programming Language :: Python :: 3.7 | |
Classifier: Programming Language :: Python :: Implementation :: CPython | |
Classifier: Topic :: Software Development | |
Classifier: Topic :: Scientific/Engineering | |
Classifier: Operating System :: Microsoft :: Windows | |
Classifier: Operating System :: POSIX | |
Classifier: Operating System :: Unix | |
Classifier: Operating System :: MacOS | |
Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.* | |
It provides: | |
- a powerful N-dimensional array object | |
- sophisticated (broadcasting) functions | |
- tools for integrating C/C++ and Fortran code | |
- useful linear algebra, Fourier transform, and random number capabilities | |
- and much more | |
Besides its obvious scientific uses, NumPy can also be used as an efficient | |
multi-dimensional container of generic data. Arbitrary data-types can be | |
defined. This allows NumPy to seamlessly and speedily integrate with a wide | |
variety of databases. | |
All NumPy wheels distributed on PyPI are BSD licensed. | |
../../../bin/f2py,sha256=WHlEpYa861xQw_xcIky8AzRti_5WZJbLH1p0EnBe2Q4,259 | |
../../../bin/f2py3,sha256=WHlEpYa861xQw_xcIky8AzRti_5WZJbLH1p0EnBe2Q4,259 | |
../../../bin/f2py3.7,sha256=WHlEpYa861xQw_xcIky8AzRti_5WZJbLH1p0EnBe2Q4,259 | |
numpy-1.16.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 | |
numpy-1.16.4.dist-info/LICENSE.txt,sha256=bRZkZOdT634zKp1g1FYioE-ZjlQ9CSDnim_7HDKEICE,42206 | |
numpy-1.16.4.dist-info/METADATA,sha256=WFga9C6F8FoqxVjRlHBye8TAKcoqOUhxHwLWdlsEoxY,1948 | |
numpy-1.16.4.dist-info/RECORD,, | |
numpy-1.16.4.dist-info/WHEEL,sha256=lzLSP8LQNrOglXOIpuQOEvoaoMcsCDhtfg39RiZGHxU,249 | |
numpy-1.16.4.dist-info/entry_points.txt,sha256=MA6o_IjpQrpZlNNxq1yxwYV0u_I689RuoWedrJLsZnk,113 | |
numpy-1.16.4.dist-info/top_level.txt,sha256=4J9lbBMLnAiyxatxh8iRKV5Entd_6-oqbO7pzJjMsPw,6 | |
numpy/.dylibs/libgcc_s.1.dylib,sha256=T2GQ6V3Q0PvfJx6bMnda_3HUc7HweQpBhsbjFAnrz9o,273072 | |
numpy/.dylibs/libgfortran.3.dylib,sha256=8p97e3by4BahWr2XsTf7OITKupmFNmmNdSPve3ljaUQ,1550456 | |
numpy/.dylibs/libopenblasp-r0.3.7.dev.dylib,sha256=RpEuFcnZ-PHzNJ6NjiPvaOBxk-XsSykIfXOZUnx2A1g,63581068 | |
numpy/.dylibs/libquadmath.0.dylib,sha256=qgkGTBRcZNPTxcbU93EvRMRRAD7E2DdHNkWBT9oE5Ds,279932 | |
numpy/LICENSE.txt,sha256=bRZkZOdT634zKp1g1FYioE-ZjlQ9CSDnim_7HDKEICE,42206 | |
numpy/__config__.py,sha256=WbvPieICxdWCXHqye7isTm70QGBWRZZkLv1KA9wJ-1U,1554 | |
numpy/__init__.py,sha256=6bqV6_Ot0ysgHlDh0mhaoowho85c5GC5OEtyZrScd9U,7110 | |
numpy/__pycache__/__config__.cpython-37.pyc,, | |
numpy/__pycache__/__init__.cpython-37.pyc,, | |
numpy/__pycache__/_distributor_init.cpython-37.pyc,, | |
numpy/__pycache__/_globals.cpython-37.pyc,, | |
numpy/__pycache__/_pytesttester.cpython-37.pyc,, | |
numpy/__pycache__/conftest.cpython-37.pyc,, | |
numpy/__pycache__/ctypeslib.cpython-37.pyc,, | |
numpy/__pycache__/dual.cpython-37.pyc,, | |
numpy/__pycache__/matlib.cpython-37.pyc,, | |
numpy/__pycache__/setup.cpython-37.pyc,, | |
numpy/__pycache__/version.cpython-37.pyc,, | |
numpy/_distributor_init.py,sha256=IgPkSK3H9bgjFeUfWuXhjKrgetQl5ztUW-rTyjGHK3c,331 | |
numpy/_globals.py,sha256=p8xxERZsxjGPUWV9pMY3jz75NZxDLppGeKaHbYGCDqM,2379 | |
numpy/_pytesttester.py,sha256=eLWMwBiqamHoev8-VlmtvCaxV_gitqK7js-UkjUW4qs,6854 | |
numpy/compat/__init__.py,sha256=MHle4gJcrXh1w4SNv0mz5rbUTAjAzHnyO3rtbSW3AUo,498 | |
numpy/compat/__pycache__/__init__.cpython-37.pyc,, | |
numpy/compat/__pycache__/_inspect.cpython-37.pyc,, | |
numpy/compat/__pycache__/py3k.cpython-37.pyc,, | |
numpy/compat/__pycache__/setup.cpython-37.pyc,, | |
numpy/compat/_inspect.py,sha256=xEImUFhm4VAzT2LJj2Va_yDAHJsdy0RwSi1JwOOhykU,7513 | |
numpy/compat/py3k.py,sha256=pBEgOIEsaycTciDcN33Dt2WdNA8c3JBtxkSuzA2g8TQ,6663 | |
numpy/compat/setup.py,sha256=REJcwNU7EbfwBFS1FHazGJcUhh50_5gYttr3BSczCiM,382 | |
numpy/compat/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/compat/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/compat/tests/__pycache__/test_compat.cpython-37.pyc,, | |
numpy/compat/tests/test_compat.py,sha256=KI-7Ru3Ia5TwGBmBWTM3Qjq6G_CFOzX1uId--9MecXo,691 | |
numpy/conftest.py,sha256=MQmihxXyjxbn1l9N97DFVewnN1PcFAnWoBO404unCw4,1912 | |
numpy/core/__init__.py,sha256=_QCag_dScAatINZX8mlDcGdabAemkT8iq9GiWWgWisE,5928 | |
numpy/core/__pycache__/__init__.cpython-37.pyc,, | |
numpy/core/__pycache__/_add_newdocs.cpython-37.pyc,, | |
numpy/core/__pycache__/_aliased_types.cpython-37.pyc,, | |
numpy/core/__pycache__/_dtype.cpython-37.pyc,, | |
numpy/core/__pycache__/_dtype_ctypes.cpython-37.pyc,, | |
numpy/core/__pycache__/_internal.cpython-37.pyc,, | |
numpy/core/__pycache__/_methods.cpython-37.pyc,, | |
numpy/core/__pycache__/_string_helpers.cpython-37.pyc,, | |
numpy/core/__pycache__/_type_aliases.cpython-37.pyc,, | |
numpy/core/__pycache__/arrayprint.cpython-37.pyc,, | |
numpy/core/__pycache__/cversions.cpython-37.pyc,, | |
numpy/core/__pycache__/defchararray.cpython-37.pyc,, | |
numpy/core/__pycache__/einsumfunc.cpython-37.pyc,, | |
numpy/core/__pycache__/fromnumeric.cpython-37.pyc,, | |
numpy/core/__pycache__/function_base.cpython-37.pyc,, | |
numpy/core/__pycache__/generate_numpy_api.cpython-37.pyc,, | |
numpy/core/__pycache__/getlimits.cpython-37.pyc,, | |
numpy/core/__pycache__/info.cpython-37.pyc,, | |
numpy/core/__pycache__/machar.cpython-37.pyc,, | |
numpy/core/__pycache__/memmap.cpython-37.pyc,, | |
numpy/core/__pycache__/multiarray.cpython-37.pyc,, | |
numpy/core/__pycache__/numeric.cpython-37.pyc,, | |
numpy/core/__pycache__/numerictypes.cpython-37.pyc,, | |
numpy/core/__pycache__/overrides.cpython-37.pyc,, | |
numpy/core/__pycache__/records.cpython-37.pyc,, | |
numpy/core/__pycache__/setup.cpython-37.pyc,, | |
numpy/core/__pycache__/setup_common.cpython-37.pyc,, | |
numpy/core/__pycache__/shape_base.cpython-37.pyc,, | |
numpy/core/__pycache__/umath.cpython-37.pyc,, | |
numpy/core/__pycache__/umath_tests.cpython-37.pyc,, | |
numpy/core/_add_newdocs.py,sha256=I_ndaFJhI6iD5ySeC2HGpGRdPgv5JIP2vd6_Wo2tbjo,203038 | |
numpy/core/_aliased_types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/core/_dtype.py,sha256=rvaeOf-ypqhRrb4_Aljt2CXX6uZrX0STQ0pQwlXTKv4,9688 | |
numpy/core/_dtype_ctypes.py,sha256=srVPSI6kJvXjzIvkBQDU1itAfT-vCZKeyjgCF3yC-6Q,3448 | |
numpy/core/_dummy.cpython-37m-darwin.so,sha256=qCx2FVl4n6ZMdYGEmSZN6GtuRPSVQ3vA-LS9Lig1ib4,21232 | |
numpy/core/_internal.py,sha256=U3CSOXeUC79XPV-ZaxlhniUt3q5iFJXlQVFeIcwcsY4,27779 | |
numpy/core/_methods.py,sha256=3QDUJ2FAN317yU5fE6lPdFgBVrnBaiYnUkuTu1vWmoU,5083 | |
numpy/core/_multiarray_tests.cpython-37m-darwin.so,sha256=bfjnJ3mqkRa0ENACw56qDe9hM-F0vQFio9mG7ujqInA,125476 | |
numpy/core/_multiarray_umath.cpython-37m-darwin.so,sha256=17fFaIoyW3L334HpL4dd7deUpTBWkpHQL16VGCLxUAc,3379852 | |
numpy/core/_operand_flag_tests.cpython-37m-darwin.so,sha256=dzocAMr2Y_iHDNLXJmI0cVGB2Gaj90d3X1g4M3brr6I,22744 | |
numpy/core/_rational_tests.cpython-37m-darwin.so,sha256=bxmYJcCItTqSFNrTkxIRn8CbuHbqEVG7lIAulkU3S30,68088 | |
numpy/core/_string_helpers.py,sha256=NGGGhaFdU5eGiUAj3GTIBoOgWs4r9aTNlsE2r9NgX6Q,2855 | |
numpy/core/_struct_ufunc_tests.cpython-37m-darwin.so,sha256=WFw2vbASiosiugxLrFYwjVC1Lq2Wikes_ngFEYbKVU4,22820 | |
numpy/core/_type_aliases.py,sha256=FA2Pz5OKqcLl1QKLJNu-ETHIzQ1ii3LH5pSdHhZkfZA,9181 | |
numpy/core/_umath_tests.cpython-37m-darwin.so,sha256=c_rfB0-kiXqLw-nMzOM6hMkKuTE5Vo-ZV7Ai3ii20MI,39040 | |
numpy/core/arrayprint.py,sha256=9-GT05iiKZvMAjhoN1AUraQ1BzRaWmmq5_n0eWPp9L4,60310 | |
numpy/core/cversions.py,sha256=ukYNpkei0Coi7DOcbroXuDoXc6kl5odxmcy_39pszA0,413 | |
numpy/core/defchararray.py,sha256=gRmZlrryuLgNQLsFjMzZz4X2hFnTZzzqfkpiTWwvthI,71118 | |
numpy/core/einsumfunc.py,sha256=-P82bltMMGjUCRpYjJYohGdPpxLExucGlWJLMA7XxDw,51207 | |
numpy/core/fromnumeric.py,sha256=gza7xG7bWMWNtrQ0mV4719FWHpOAmk27LnhXsdQ5Ics,109555 | |
numpy/core/function_base.py,sha256=yV62eeOgNJGkA7n7LyoPBdmM5m5tkfeU9utMJaybXZQ,16343 | |
numpy/core/generate_numpy_api.py,sha256=0JBYTvekUeJyhp7QMKtWJSK-L6lVNhev16y0F2qX2pU,7470 | |
numpy/core/getlimits.py,sha256=zGUdeXCw8GAWly8MCTL6xWEyoBDszviDpc47SbS_BhY,18936 | |
numpy/core/include/numpy/__multiarray_api.h,sha256=MS8l193p6aGZSSuV6ube6F9Ms7Wsn5TKVv9WHM7eCkw,60958 | |
numpy/core/include/numpy/__ufunc_api.h,sha256=szHiF_4UY3EY8wH0q1YW9UmoakOHQTsHog75MJwEyTg,12143 | |
numpy/core/include/numpy/_neighborhood_iterator_imp.h,sha256=hNiUJ3gmJRxdjByk5R5jmLeBKpNfaP_29KLHFuTrSIA,1861 | |
numpy/core/include/numpy/_numpyconfig.h,sha256=gLm20E7JclQaq7AwdPKysJEA-d3SK-bUtum6ymzgqSg,982 | |
numpy/core/include/numpy/arrayobject.h,sha256=SXj-2avTHV8mNWvv7sOYHLKkRKcafDG7_HNpQNot1GE,164 | |
numpy/core/include/numpy/arrayscalars.h,sha256=vC7QCznlT8vkyvxbIh4QNwi1LR7UkP7GJ1j_0ZiJa1E,3509 | |
numpy/core/include/numpy/halffloat.h,sha256=ohvyl3Kz3mB1hW3MRzxwPDH-0L9WWM_eKhvYLjtT_2w,1878 | |
numpy/core/include/numpy/multiarray_api.txt,sha256=Panvwe-mLDLFw9WU90x2M7nqrCE99JzBD186Xa5R4po,56385 | |
numpy/core/include/numpy/ndarrayobject.h,sha256=ZVCR5RE1W4QUJ8X6jeai-9gwWvgDkImR8ZEH1XI2wl0,11507 | |
numpy/core/include/numpy/ndarraytypes.h,sha256=eHZA7lbctLPjUPdyXu6ITCnyOmlt88IIG2rZkjs4DAA,64719 | |
numpy/core/include/numpy/noprefix.h,sha256=YE-lWegAdZKI5lf44AW5jiWbnmO6hircWzj_WMFrLT4,6786 | |
numpy/core/include/numpy/npy_1_7_deprecated_api.h,sha256=LLeZKLuJADU3RDfT04pu5FCxCBU5cEzY5Q9phR_HL78,4715 | |
numpy/core/include/numpy/npy_3kcompat.h,sha256=exFgMT6slmo2Zg3bFsY3mKLUrrkg3KU_66gUmu5IYKk,14666 | |
numpy/core/include/numpy/npy_common.h,sha256=FIVNq2pSdIRJsoi56GAruahhfp1OwafweqCDhakUc8w,37277 | |
numpy/core/include/numpy/npy_cpu.h,sha256=3frXChwN0Cxca-sAeTTOJCiZ6_2q1EuggUwqEotdXLg,3879 | |
numpy/core/include/numpy/npy_endian.h,sha256=HHanBydLvLC2anJJySvy6wZ_lYaC_xI6GNwT8cJ78rE,2596 | |
numpy/core/include/numpy/npy_interrupt.h,sha256=Eyddk806h30jxgymbr44b7eIZKrHXtNzXpPtUPp2Ng8,3439 | |
numpy/core/include/numpy/npy_math.h,sha256=AeaXjX76YzIMT67EdZMnjG1--x61UI2htuGtGwgfA24,18838 | |
numpy/core/include/numpy/npy_no_deprecated_api.h,sha256=X-wRYdpuwIuerTnBblKjR7Dqsv8rqxn01RFLVWUHvi8,567 | |
numpy/core/include/numpy/npy_os.h,sha256=cEvEvpD92EeFjsjRelw1dXJaHYL-0yPJDuz3VeSJs4E,817 | |
numpy/core/include/numpy/numpyconfig.h,sha256=J5BLHoCyhe383tIM4YriMgYDjOPC4xWzRvqBPyNCTOE,1207 | |
numpy/core/include/numpy/old_defines.h,sha256=7eiZoi7JrdVT9LXKCoeta5AoIncGa98GcVlWqDrLjwk,6306 | |
numpy/core/include/numpy/oldnumeric.h,sha256=Yo-LiSzVfDK2YyhlH41ff4gS0m-lv8XjI4JcAzpdy94,708 | |
numpy/core/include/numpy/ufunc_api.txt,sha256=2d31yVD80vKEgf5Pr5JtDjnVjc2DLfiXRWQuq7KT5wc,6889 | |
numpy/core/include/numpy/ufuncobject.h,sha256=ocjHj2QCTYkbxIdSmPi-2k3vzKNJ96oB7giwiWFS2i0,13051 | |
numpy/core/include/numpy/utils.h,sha256=KqJzngAvarYV3oZQu5fY0ARPVihUP7FsZjdljysaSUk,729 | |
numpy/core/info.py,sha256=SjDs9EfOswEy-ABgUr9f09v83sUdhmwFXRlaZbOGCnA,4692 | |
numpy/core/lib/libnpymath.a,sha256=RboOJlTjM74MkxTtYfgWAtZaJAxjdyfkYlhI_l5fLk0,134920 | |
numpy/core/lib/npy-pkg-config/mlib.ini,sha256=puARujEiRP-jFD3s9Jwpe6E1fgT9T3YIny-pWJRDYNE,139 | |
numpy/core/lib/npy-pkg-config/npymath.ini,sha256=kamUNrYKAmXqQa8BcNv7D5sLqHh6bnChM0_5rZCsTfY,360 | |
numpy/core/machar.py,sha256=StiB_u3QIWj55RQASESDLjv_8650DE8vuCd7evhlmDI,10854 | |
numpy/core/memmap.py,sha256=4SEtSehRX8SJtGseMP1hm4LgrRR4oLa20wlRLUy4aJU,11612 | |
numpy/core/multiarray.py,sha256=26mdC_rn84U2WwIjf6mZX7rW_YugkEHRIDZBkKrRImQ,50606 | |
numpy/core/numeric.py,sha256=OaLQxu1NQmiehpC5BHRLMBIKXUqiqmDTaqhIgvN8K5M,92560 | |
numpy/core/numerictypes.py,sha256=8esH4zpSWgjUnfpe9CK0D5MmBPojh6iWLGEDza9Pi2E,17849 | |
numpy/core/overrides.py,sha256=OSkSrhC7dl63cK9Pr0qt4X5rMsB025MNoPV-_o15VNM,6658 | |
numpy/core/records.py,sha256=6N9oqz53J_5eGPsR9DxltkYxJdzraA2dJil0xBiqJm4,30418 | |
numpy/core/setup.py,sha256=a1oJAddaPzPtTGcPIiAG7xWyLKhTUHJF09HStDogL6g,41513 | |
numpy/core/setup_common.py,sha256=8Ywhbj-uud98QbEgIZEQGvmpD6S2Ve-dlOYqb_9lkmw,16571 | |
numpy/core/shape_base.py,sha256=4Sjp5Jjs5BfLJ0rU6PUrxl-hNk6ye5IcrpbUNbD667U,28273 | |
numpy/core/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/core/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/_locales.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_abc.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_api.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_arrayprint.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_datetime.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_defchararray.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_deprecations.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_dtype.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_einsum.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_errstate.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_extint128.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_function_base.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_getlimits.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_half.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_indexerrors.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_indexing.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_item_selection.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_longdouble.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_machar.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_mem_overlap.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_memmap.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_multiarray.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_nditer.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_numeric.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_numerictypes.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_overrides.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_print.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_records.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_scalar_ctors.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_scalarbuffer.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_scalarinherit.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_scalarmath.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_scalarprint.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_shape_base.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_ufunc.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_umath.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_umath_complex.cpython-37.pyc,, | |
numpy/core/tests/__pycache__/test_unicode.cpython-37.pyc,, | |
numpy/core/tests/_locales.py,sha256=GQro3bha8c5msgQyvNzmDUrNwqS2cGkKKuN4gg4c6tI,2266 | |
numpy/core/tests/data/astype_copy.pkl,sha256=lWSzCcvzRB_wpuRGj92spGIw-rNPFcd9hwJaRVvfWdk,716 | |
numpy/core/tests/data/recarray_from_file.fits,sha256=NA0kliz31FlLnYxv3ppzeruONqNYkuEvts5wzXEeIc4,8640 | |
numpy/core/tests/test_abc.py,sha256=cpIqt3VFBZLHbuNpO4NuyCGgd--k1zij5aasu7FV77I,2402 | |
numpy/core/tests/test_api.py,sha256=k7pN6IGolFQHBT7CmaRZHJqR59amWk6QcVK4RdqQkKc,18902 | |
numpy/core/tests/test_arrayprint.py,sha256=PPUppm9m6cPx2mjU0lQevTdnLSq0_6JIe8LyumN3ujM,34701 | |
numpy/core/tests/test_datetime.py,sha256=wgGd2kjoG0J5eZODZB5xrMjk2ljqQvO-UTYvZyr84F8,102132 | |
numpy/core/tests/test_defchararray.py,sha256=L5EoOBTZVrRU1Vju5IhY8BSUlBOGPzEViKJwyQSlpXo,25481 | |
numpy/core/tests/test_deprecations.py,sha256=BkNax57OUQmuEvqnR4Lv2p2y0UNpXPHhsGAR6a4qjmQ,21435 | |
numpy/core/tests/test_dtype.py,sha256=l5W2TEVIS2qEiTOvRfegBcC8l2HPr4U7up8xNPT30_s,43702 | |
numpy/core/tests/test_einsum.py,sha256=bg9t1Hu_z0G8xGWqJuIOqS21QMR76xdQl8xQhlejXPc,44090 | |
numpy/core/tests/test_errstate.py,sha256=5vN5Xiv9cILQ0j62P7DrZPvTAiAddRY4EfoCUGQqgMk,1335 | |
numpy/core/tests/test_extint128.py,sha256=-0zEInkai1qRhXI0bdHCguU_meD3s6Td4vUIBwirYQI,5709 | |
numpy/core/tests/test_function_base.py,sha256=pLUQhUHqda7uzs05gp390gl-bDS3qALLLQXc7XrWezs,13165 | |
numpy/core/tests/test_getlimits.py,sha256=2fBK7Slo67kP6bThcN9bOKmeX9gGPQVUE17jGVydoXk,4427 | |
numpy/core/tests/test_half.py,sha256=Hnlj7T4-kAKT-2gPa7-vNrbOOJ6qfZoaq0jW4CbCtRs,22300 | |
numpy/core/tests/test_indexerrors.py,sha256=0Ku3Sy5jcaE3D2KsyDrFTvgQzMv2dyWja3hc4t5-n_k,4857 | |
numpy/core/tests/test_indexing.py,sha256=i0P9N0vV6RKU_n-sT8whMeG-6ydRVKAREg-b3veKKXM,50602 | |
numpy/core/tests/test_item_selection.py,sha256=pMjd_8v5YC4WVjLLIrhmisPOG_DMw4j3YGEZ-UY1kOA,3599 | |
numpy/core/tests/test_longdouble.py,sha256=rd-YvOKgY8W0PMEtLHWm4er8dXgoC1STi6QuUGPWiHs,7300 | |
numpy/core/tests/test_machar.py,sha256=FrKeGhC7j-z9tApS_uI1E0DUkzieKIdUHMQPfCSM0t8,1141 | |
numpy/core/tests/test_mem_overlap.py,sha256=AyBz4pm7HhTDdlW2pq9FR1AO0E5QAYdKpBoWbOdSrco,29505 | |
numpy/core/tests/test_memmap.py,sha256=mYreq9HqWDz5Z9r6mmdvz4iB6VnUR61ytVCzzd3mhc0,7316 | |
numpy/core/tests/test_multiarray.py,sha256=Tsw10APOKlJLdNr5HHdmE1svKew3UMA4s5u9e7HcVOk,305972 | |
numpy/core/tests/test_nditer.py,sha256=e2vzCwKhLjcjgE4zPv7YgFpCE78NWdDJpL__fCR-prU,112097 | |
numpy/core/tests/test_numeric.py,sha256=LIe8Vlh7uEGuDb7YpndSevhWJKXSno2cJB9hOdOnuDo,103017 | |
numpy/core/tests/test_numerictypes.py,sha256=5NMUrwVqBH_qUFk-62QhUxMwg7FPPdm9hcPdftE_4zo,18526 | |
numpy/core/tests/test_overrides.py,sha256=qRa8AOOBnejS6ZfW7vP3g-jioFdIwBBJ3hYrPZXy7ks,13196 | |
numpy/core/tests/test_print.py,sha256=Q53dqbjQQIlCzRp_1ZY0A-ptP7FlbBZVPeMeMLX0cVg,6876 | |
numpy/core/tests/test_records.py,sha256=jFWlwrYqBGKSdmtq55Qqrg8dg-3wvcagr9KRXCC2wMA,17734 | |
numpy/core/tests/test_regression.py,sha256=ocoDIIlhdFQ8OZDOHu99OAbDqml0wfDq7Q9hXJirRUw,87168 | |
numpy/core/tests/test_scalar_ctors.py,sha256=kjyYllJHyhMQGT49Xbjjc2tuFHXcQIM-PAZExMWczq8,2294 | |
numpy/core/tests/test_scalarbuffer.py,sha256=0U9U95ogctbQb9ggbmgiQ2UmzXEpO6mbXnn4ciuAO50,3561 | |
numpy/core/tests/test_scalarinherit.py,sha256=iP5lLn-z1vtongCvDt-JBnHwNWNREx4ovu12Iy-mGiA,1838 | |
numpy/core/tests/test_scalarmath.py,sha256=wyWKt80diGTE3AtoNiJDALZbQnCw8f-iu2czVWkUFKs,27255 | |
numpy/core/tests/test_scalarprint.py,sha256=AissnDOK_noK09FFQp-oioDk3WjgKkTuGf1-vUWDfFg,15469 | |
numpy/core/tests/test_shape_base.py,sha256=8NAbKisPKVc3z9CdPr_oCVRDWjjvui_sAGLCXweTkR4,24351 | |
numpy/core/tests/test_ufunc.py,sha256=tkfbCdwV_tDSLcZl79CsTev2cjBBP-o3bTwrOYeVjEo,77403 | |
numpy/core/tests/test_umath.py,sha256=AOt3f7EnCM8RZJ8enlkgNQ6IT8SkrYx-a98g5Lq46cM,108290 | |
numpy/core/tests/test_umath_complex.py,sha256=QJHRggzmZw0eFoqyc83I8mdcU5ayPLI9kAmj3MUPEF8,19323 | |
numpy/core/tests/test_unicode.py,sha256=PvWt5NLjgwulCgXakHEKMJ2pSpTLbUWgz9dZExEcSJ8,13656 | |
numpy/core/umath.py,sha256=rdEijDCvJClpWvMFCUVRUlrAQQ_lq4jeNJla-hTfpFU,1919 | |
numpy/core/umath_tests.py,sha256=Sr6VQTbH-sOMlXy-tg1-Unht7MKaaV4wtAYR6mQYNbU,455 | |
numpy/ctypeslib.py,sha256=4hiXYBIoMqqqzQ2a_DM3Kw5qTuS0--uyudxoHWgsv6s,17258 | |
numpy/distutils/__config__.py,sha256=WbvPieICxdWCXHqye7isTm70QGBWRZZkLv1KA9wJ-1U,1554 | |
numpy/distutils/__init__.py,sha256=b93HZiRpHfSC9E-GPiXk6PWDwQ3STJ4rlzvx6PhHH1k,1092 | |
numpy/distutils/__pycache__/__config__.cpython-37.pyc,, | |
numpy/distutils/__pycache__/__init__.cpython-37.pyc,, | |
numpy/distutils/__pycache__/__version__.cpython-37.pyc,, | |
numpy/distutils/__pycache__/_shell_utils.cpython-37.pyc,, | |
numpy/distutils/__pycache__/ccompiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/compat.cpython-37.pyc,, | |
numpy/distutils/__pycache__/conv_template.cpython-37.pyc,, | |
numpy/distutils/__pycache__/core.cpython-37.pyc,, | |
numpy/distutils/__pycache__/cpuinfo.cpython-37.pyc,, | |
numpy/distutils/__pycache__/exec_command.cpython-37.pyc,, | |
numpy/distutils/__pycache__/extension.cpython-37.pyc,, | |
numpy/distutils/__pycache__/from_template.cpython-37.pyc,, | |
numpy/distutils/__pycache__/info.cpython-37.pyc,, | |
numpy/distutils/__pycache__/intelccompiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/lib2def.cpython-37.pyc,, | |
numpy/distutils/__pycache__/line_endings.cpython-37.pyc,, | |
numpy/distutils/__pycache__/log.cpython-37.pyc,, | |
numpy/distutils/__pycache__/mingw32ccompiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/misc_util.cpython-37.pyc,, | |
numpy/distutils/__pycache__/msvc9compiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/msvccompiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/npy_pkg_config.cpython-37.pyc,, | |
numpy/distutils/__pycache__/numpy_distribution.cpython-37.pyc,, | |
numpy/distutils/__pycache__/pathccompiler.cpython-37.pyc,, | |
numpy/distutils/__pycache__/setup.cpython-37.pyc,, | |
numpy/distutils/__pycache__/system_info.cpython-37.pyc,, | |
numpy/distutils/__pycache__/unixccompiler.cpython-37.pyc,, | |
numpy/distutils/__version__.py,sha256=SSRZKvGfvg_GpYbXWtI5gaTK0NGW9nBBCyNghaaXBh8,151 | |
numpy/distutils/_shell_utils.py,sha256=zKjy56kw6erYPK71h-afpX9HYn2ZeQHSMpvvYiVwOu0,2603 | |
numpy/distutils/ccompiler.py,sha256=e9yDu-4BNfcrT_RHXJk-a91qWwxrQOrIVXbdj_X22D8,27417 | |
numpy/distutils/command/__init__.py,sha256=l5r9aYwIEq1D-JJc8WFUxABk6Ip28FpRK_ok7wSLRZE,1098 | |
numpy/distutils/command/__pycache__/__init__.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/autodist.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/bdist_rpm.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build_clib.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build_ext.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build_py.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build_scripts.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/build_src.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/config.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/config_compiler.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/develop.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/egg_info.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/install.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/install_clib.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/install_data.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/install_headers.cpython-37.pyc,, | |
numpy/distutils/command/__pycache__/sdist.cpython-37.pyc,, | |
numpy/distutils/command/autodist.py,sha256=1oytviCdsUjm3YxLLVePvWEqxyz0eDRRqvfokDm2sXQ,2048 | |
numpy/distutils/command/bdist_rpm.py,sha256=rhhIyFzkd5NGi6lZaft44EBPZB3zZFRDc75klJYnbw8,775 | |
numpy/distutils/command/build.py,sha256=6Q9bDubq5WfwR1K5woDFXed692szD0Rq-5Ckv2xpoK4,1618 | |
numpy/distutils/command/build_clib.py,sha256=_Y3upI_slekgMk2CI2vplOXj5p1_aEHa-F9_nJ0HOgg,13389 | |
numpy/distutils/command/build_ext.py,sha256=QXS_Z1vrpvTrTjUb6m9n1upo9zljo4o5yd27Su5LJfQ,25949 | |
numpy/distutils/command/build_py.py,sha256=7TBGLz0va0PW6sEX-aUjsXdzvhuSbJGgIrMim1JTwu4,1210 | |
numpy/distutils/command/build_scripts.py,sha256=ze19jHBhC3JggKLbL9wgs9I3mG7ls-V2NbykvleNwgQ,1731 | |
numpy/distutils/command/build_src.py,sha256=aUl2Zf8WktMCr8f1u6YoAlblNSVGjv-lz_7yTfOKC80,30908 | |
numpy/distutils/command/config.py,sha256=DxvvFqUtKPCXzHCfC2DOQcMBm67YkRdXyWeBa3bYFQE,19094 | |
numpy/distutils/command/config_compiler.py,sha256=SKJTEk_Y_Da-dVYOHAdf4c3yXxjlE1dsr-hJxY0m0PU,4435 | |
numpy/distutils/command/develop.py,sha256=nYM5yjhKtGKh_3wZwrvEQBLYHKldz64aU-0iSycSkXA,641 | |
numpy/distutils/command/egg_info.py,sha256=pdiCFQiQuIpf_xmVk9Njl7iowY9CxGn9KRbU-A9eBfg,987 | |
numpy/distutils/command/install.py,sha256=yBj3NM6sctAbG3QR5Y4qPs7YjxpW7EoKeMPEkNWf2qU,3127 | |
numpy/distutils/command/install_clib.py,sha256=6tUO3FbF_b_e_Ly31qod9rB4yHA2z8m2mh6qry1a4yk,1315 | |
numpy/distutils/command/install_data.py,sha256=7iWTw93ty2sBPwHwg_EEhgQhZSZe6SsKdfTS9RbUR9A,914 | |
numpy/distutils/command/install_headers.py,sha256=NbZwt-Joo80z_1TfxA-mIWXm2L9Mmh4ZLht7HAuveoo,985 | |
numpy/distutils/command/sdist.py,sha256=tHmlb0RzD8x04dswPXEua9H_b6GuHWY1V3hYkwJDKvA,799 | |
numpy/distutils/compat.py,sha256=xzkW8JgJgGTmye34QCYTIkLfsXBvmPu4tvgCwXNdiU0,218 | |
numpy/distutils/conv_template.py,sha256=5VAAMSjzrSe_mCxzMHVW6GQZ0ATqQr5N9EFYhuTeQvg,9702 | |
numpy/distutils/core.py,sha256=9GNNyWDTCqfnD7Jp2tzp9vOBVyeJmF8lsgv_xdlt59g,8230 | |
numpy/distutils/cpuinfo.py,sha256=AHJuQeg78_P5EReO1kLd-MAohvB-GfV8zuRh7F8hltI,23015 | |
numpy/distutils/exec_command.py,sha256=laMoxZ17D5I0cnkUce94wpfgTl1j3xWHn_A_jisdcu8,10795 | |
numpy/distutils/extension.py,sha256=q_NjgW-sOoeEBbeSEJwFh411mTgsF7BzGYso61Wf0qg,2967 | |
numpy/distutils/fcompiler/__init__.py,sha256=v3zk6W_xQXCI5H00aVBYDi5IgSug7zrGQKxpl74Qs_k,40154 | |
numpy/distutils/fcompiler/__pycache__/__init__.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/absoft.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/compaq.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/environment.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/g95.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/gnu.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/hpux.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/ibm.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/intel.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/lahey.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/mips.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/nag.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/none.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/pathf95.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/pg.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/sun.cpython-37.pyc,, | |
numpy/distutils/fcompiler/__pycache__/vast.cpython-37.pyc,, | |
numpy/distutils/fcompiler/absoft.py,sha256=AKbj5uGr8dpGDLzRIJbdUnXXAtF_5k4JqnqwTWvy-tQ,5565 | |
numpy/distutils/fcompiler/compaq.py,sha256=djulalEdV6b58ofcEw14Uoq5-aNgblJMqLIzNwmJ2SE,4109 | |
numpy/distutils/fcompiler/environment.py,sha256=-S6TM5xc_prXB9ks7BFBaUuW0lnYK2Je9mmcwQTUsi8,3457 | |
numpy/distutils/fcompiler/g95.py,sha256=K68RRAvOvyKoh-jsD9J4ZDsHltrGnJ_AllxULhy6iOE,1396 | |
numpy/distutils/fcompiler/gnu.py,sha256=oHipJDyfisSK9_Kdkv1Av8hDHY3UbLALgWfBO7cXkPA,20804 | |
numpy/distutils/fcompiler/hpux.py,sha256=xpNfy7vCKWPnJ5M3JPnjMAewKBAfKN5hFX3hvEL2zaM,1419 | |
numpy/distutils/fcompiler/ibm.py,sha256=66gCrBbbVvqu_LvdX7a9MA15NqNIY2DXPOPUFrU1zRc,3595 | |
numpy/distutils/fcompiler/intel.py,sha256=WlsBtvZnLpFke7oTpMCDYFlccNSUWWkB2p422iwQURU,6861 | |
numpy/distutils/fcompiler/lahey.py,sha256=pJ0-xgtYwyYXgt8JlN8PFeYYEWB3vOmFkNx6UUFXzuM,1393 | |
numpy/distutils/fcompiler/mips.py,sha256=IxLojWR1oi0VW93PxPpHQXRwZcYffD1dunllQW2w19A,1780 | |
numpy/distutils/fcompiler/nag.py,sha256=eiTvBopdCgVh5-HDTryVbRrYvf4r_Sqse1mruTt5Blo,2608 | |
numpy/distutils/fcompiler/none.py,sha256=N6adoFAf8inIQfCDEBzK5cGI3hLIWWpHmQXux8iJDfA,824 | |
numpy/distutils/fcompiler/pathf95.py,sha256=Xf1JMB30PDSoNpA1Y-vKPRBeNO0XfSi0dvVQvvdjfUQ,1127 | |
numpy/distutils/fcompiler/pg.py,sha256=G0uNPfedmbkYWfChg1UbxBKqo25RenzSVJN1BUtRDw0,4232 | |
numpy/distutils/fcompiler/sun.py,sha256=21DQ6Rprr9rEp4pp7Np8kCwOc0Xfqdxa1iX0O-yPJPM,1643 | |
numpy/distutils/fcompiler/vast.py,sha256=LJ21-WIJsiquLtjdDaNsJqblwN5wuM2FZsYl1R40vN8,1733 | |
numpy/distutils/from_template.py,sha256=671F-qa8R1gbJUe1tCZFjw64K7J98ZnfeSV1HvWbZas,7979 | |
numpy/distutils/info.py,sha256=lNxUhbJnzWjA47P2I_9NW-tuVrjGzL62jHDlQJ3pp6E,157 | |
numpy/distutils/intelccompiler.py,sha256=1qzr6PMxi0UkR0NUY3rt3gqww9GwJ-Gbe91yxQKlieU,4291 | |
numpy/distutils/lib2def.py,sha256=RWD0EpuUHoxIuc9VyyDCH2d73jgsdGG2PBKVisanlVU,3502 | |
numpy/distutils/line_endings.py,sha256=aBO2e754iin4Ylo7FNwlBg6nPudXMnQZYdhVhf-E3aA,2053 | |
numpy/distutils/log.py,sha256=yHzdtNdTg6YtvO50Hu-Le5WJ7Typ2TvaCYabelTaUO0,2745 | |
numpy/distutils/mingw/gfortran_vs2003_hack.c,sha256=cbsN3Lk9Hkwzr9c-yOP2xEBg1_ml1X7nwAMDWxGjzc8,77 | |
numpy/distutils/mingw32ccompiler.py,sha256=4fU0Qe_BcIZOFnjwy423AfEHXVepbyayWu89-UxDGGY,25178 | |
numpy/distutils/misc_util.py,sha256=0m7jm8aJm6BzjF9jRcHg-Qu_TMyor7ac7MBWSFZuNgw,82510 | |
numpy/distutils/msvc9compiler.py,sha256=TuPYjPFp3nYQSIG1goNxuOly7o3VMx-H35POMpycB3k,2258 | |
numpy/distutils/msvccompiler.py,sha256=7EUlHbgdKBBJG3AzgE94AQeUFnj0HcD6M7_YPN7vdCs,1994 | |
numpy/distutils/npy_pkg_config.py,sha256=k3lxSOC_InRBSGddbfbvMLRTGqnE-LliNXakwdZ3AH8,13154 | |
numpy/distutils/numpy_distribution.py,sha256=lbnEW1OxWxC_1n2sKd0Q3fC5QnNdFuAkNAlvXF99zIQ,700 | |
numpy/distutils/pathccompiler.py,sha256=FjNouOTL8u4gLMbJW7GdT0RlsD2nXV1_SEBNZj9QdpQ,779 | |
numpy/distutils/setup.py,sha256=q3DcCZNkK_jHsC0imocewd4uCKQWWXjkzd4nkBmkMFI,611 | |
numpy/distutils/system_info.py,sha256=DdvuMuRqTWcy_vpWrBNFT8JEHne54jVot8eemtuvWv0,89635 | |
numpy/distutils/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/distutils/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_exec_command.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_fcompiler.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_fcompiler_gnu.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_fcompiler_intel.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_fcompiler_nagfor.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_from_template.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_misc_util.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_npy_pkg_config.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_shell_utils.cpython-37.pyc,, | |
numpy/distutils/tests/__pycache__/test_system_info.cpython-37.pyc,, | |
numpy/distutils/tests/test_exec_command.py,sha256=SH9RaWmUnSu8uGEjwyoTrzqoVPclTUnM6UTGSKxW8qc,7146 | |
numpy/distutils/tests/test_fcompiler.py,sha256=t26JUHwJxl_m-ALPa5XABgVFrMthBw7gRKN4yYX1hYQ,2678 | |
numpy/distutils/tests/test_fcompiler_gnu.py,sha256=O57uCEHeQIS0XF8GloEas3OlaOfmIHDWEtgYS_q3x48,2218 | |
numpy/distutils/tests/test_fcompiler_intel.py,sha256=fOjd_jv0Od6bZyzFf4YpZMcnFva0OZK7yJV_4Hebb6A,1140 | |
numpy/distutils/tests/test_fcompiler_nagfor.py,sha256=5-Num0A3cN7_NS3BlAgYt174S-OGOWRLL9rXtv-h_fA,1176 | |
numpy/distutils/tests/test_from_template.py,sha256=SDYoe0XUpAayyEQDq7ZhrvEEz7U9upJDLYzhcdoVifc,1103 | |
numpy/distutils/tests/test_misc_util.py,sha256=8LIm12X83HmvgmpvJJ9inaU7FlGt287VwDM-rMKCOv4,3316 | |
numpy/distutils/tests/test_npy_pkg_config.py,sha256=wa0QMQ9JAye87t2gDbFaBHp0HGpNFgwxJrJ30ZrHvNk,2639 | |
numpy/distutils/tests/test_shell_utils.py,sha256=we9P8AvjCQky1NRDP3sXAJnNUek7rDmMR4Ar9cg9iSk,2030 | |
numpy/distutils/tests/test_system_info.py,sha256=Asv6c-N1I2JQHAcBZuObsoBlaaKOVepkhyFAmGp0zow,7730 | |
numpy/distutils/unixccompiler.py,sha256=M7Hn3ANMo8iP-sZtSAebI3RCLp0ViRYxawAbck0hlQM,5177 | |
numpy/doc/__init__.py,sha256=BDpxTM0iw2F4thjBkYqjIXX57F5KfIaH8xMd67N6Jh0,574 | |
numpy/doc/__pycache__/__init__.cpython-37.pyc,, | |
numpy/doc/__pycache__/basics.cpython-37.pyc,, | |
numpy/doc/__pycache__/broadcasting.cpython-37.pyc,, | |
numpy/doc/__pycache__/byteswapping.cpython-37.pyc,, | |
numpy/doc/__pycache__/constants.cpython-37.pyc,, | |
numpy/doc/__pycache__/creation.cpython-37.pyc,, | |
numpy/doc/__pycache__/glossary.cpython-37.pyc,, | |
numpy/doc/__pycache__/indexing.cpython-37.pyc,, | |
numpy/doc/__pycache__/internals.cpython-37.pyc,, | |
numpy/doc/__pycache__/misc.cpython-37.pyc,, | |
numpy/doc/__pycache__/structured_arrays.cpython-37.pyc,, | |
numpy/doc/__pycache__/subclassing.cpython-37.pyc,, | |
numpy/doc/__pycache__/ufuncs.cpython-37.pyc,, | |
numpy/doc/basics.py,sha256=5ygY_jESZpg9TENO0dszCttlq5v5IXvXjL2pWg5Atms,9658 | |
numpy/doc/broadcasting.py,sha256=0uofJxPfkwsaQaTSju8TwiOpsmXSw2F3bzG8CdkKviU,5603 | |
numpy/doc/byteswapping.py,sha256=ivf9jUApDmMijOj1f5BGYkGCRVh4OLa_Wybbcl3A9Zw,5349 | |
numpy/doc/constants.py,sha256=G-xVDfqRId16dKXe1Owy6-tlexIzJUTuir2yu3iQgMc,9290 | |
numpy/doc/creation.py,sha256=6FUALDWgqPWObcW-ZHDQMAnfo42I60rRR9pDpwb4-YE,5496 | |
numpy/doc/glossary.py,sha256=D5Ljv1ZOOIj-O0Peg2E2QaUBV3mZb60bJOI_4gQmFTo,13583 | |
numpy/doc/indexing.py,sha256=qhhsiAeG_7Y2rgziwj515Fsw0wFL4dq1quT-ja3-0zs,15669 | |
numpy/doc/internals.py,sha256=xYp6lv4yyV0ZIo_qCvLCAWxDa0rhu7FNrTmpXY1isO4,9669 | |
numpy/doc/misc.py,sha256=JWJqyiYL2qoSMVAb0QC8w_Pm5l7ZLxx2Z9D5ilgU4Uo,6191 | |
numpy/doc/structured_arrays.py,sha256=Kr2n-4TQjfVVBoOLt3Lv30e6j5et9y8zYXZx3wg_hCc,26108 | |
numpy/doc/subclassing.py,sha256=AqtEltybX__ghj91b73QgXcGpYd8gGlwoO-R7SQDwe8,28561 | |
numpy/doc/ufuncs.py,sha256=vsAkCLEMh7Qa_3x4WbDMY3IQsDCLdOCuB_6P2aEcVLg,5427 | |
numpy/dual.py,sha256=SZ3DLWXQFv1lRKN1TlG487xmexpJFa7faaBdnnexm3E,1865 | |
numpy/f2py/__init__.py,sha256=nHuShe3wj5HQ2Xyb42DEorG3DK63HaXRNUizZgqB83g,3101 | |
numpy/f2py/__main__.py,sha256=mnksAcMyLdK0So_DseQn0zalhnA7LflS7hHvo7QCVjU,134 | |
numpy/f2py/__pycache__/__init__.cpython-37.pyc,, | |
numpy/f2py/__pycache__/__main__.cpython-37.pyc,, | |
numpy/f2py/__pycache__/__version__.cpython-37.pyc,, | |
numpy/f2py/__pycache__/auxfuncs.cpython-37.pyc,, | |
numpy/f2py/__pycache__/capi_maps.cpython-37.pyc,, | |
numpy/f2py/__pycache__/cb_rules.cpython-37.pyc,, | |
numpy/f2py/__pycache__/cfuncs.cpython-37.pyc,, | |
numpy/f2py/__pycache__/common_rules.cpython-37.pyc,, | |
numpy/f2py/__pycache__/crackfortran.cpython-37.pyc,, | |
numpy/f2py/__pycache__/diagnose.cpython-37.pyc,, | |
numpy/f2py/__pycache__/f2py2e.cpython-37.pyc,, | |
numpy/f2py/__pycache__/f2py_testing.cpython-37.pyc,, | |
numpy/f2py/__pycache__/f90mod_rules.cpython-37.pyc,, | |
numpy/f2py/__pycache__/func2subr.cpython-37.pyc,, | |
numpy/f2py/__pycache__/info.cpython-37.pyc,, | |
numpy/f2py/__pycache__/rules.cpython-37.pyc,, | |
numpy/f2py/__pycache__/setup.cpython-37.pyc,, | |
numpy/f2py/__pycache__/use_rules.cpython-37.pyc,, | |
numpy/f2py/__version__.py,sha256=rEHB9hlWmpryhNa0EmMnlAlDCGI4GXILC9CZUEV3Wew,254 | |
numpy/f2py/auxfuncs.py,sha256=mDvaBo3Y8tYpXLZfq8DCv6UZ3-2JqWc_iNBZRxGesb0,21826 | |
numpy/f2py/capi_maps.py,sha256=FgizIHORFdaX5eIVZEQSlC9kVAidh0jfKoJYMK4Z86E,31416 | |
numpy/f2py/cb_rules.py,sha256=un1xn8goj4jFL8FzxRwWSAzpr0CVcvwObVUKdIGJyaA,22946 | |
numpy/f2py/cfuncs.py,sha256=NRxuXAaryWHOFh5205BvvDjajituolH6FvtsumCltvI,45114 | |
numpy/f2py/common_rules.py,sha256=DOCOo4brpFaKNll8hOjG_vCYuOfKyTBYMItaDC_osEc,4981 | |
numpy/f2py/crackfortran.py,sha256=onGQnPhpE8DyP4L4XinwHbdPwhXavetgPbKS3SG-REQ,128945 | |
numpy/f2py/diagnose.py,sha256=VNuNTGnQaXn9Fn2jlueYt47634CvLQSaAWJWy_Nxwnw,5295 | |
numpy/f2py/f2py2e.py,sha256=w9zSJG3tnCMyOrgZJqhZiEUoAnnI7oU61kYZzvsLdfo,23983 | |
numpy/f2py/f2py_testing.py,sha256=8rkBjUsNhBavpoBgi_bqDS8H8tBdd5BR8hrE6ENsIAo,1523 | |
numpy/f2py/f90mod_rules.py,sha256=YFK4MPkGHBxshAInbcapnumX3qlu0h6ya6GQpS8zWLk,9850 | |
numpy/f2py/func2subr.py,sha256=Oy12rqUa1vcXvzR6g8yx8jSYDwfKt5Jqiebf1QaWX1o,9224 | |
numpy/f2py/info.py,sha256=Mk1-neqpqYQ6njoVUCKHmMkyFkAqYeWH4cGZr8NfKiI,136 | |
numpy/f2py/rules.py,sha256=WijCZZXIQSbV5wRHGGgjUvQlh4gQ9tKaqbSYTjvOyRk,58526 | |
numpy/f2py/setup.py,sha256=qNCIqRPcpEUhJBjihtEXEe4Iil4XDYVRAI_sZm7xZhM,2444 | |
numpy/f2py/src/fortranobject.c,sha256=HauRK7LFVV8f_u2tsmqJm_mZ0lvC2slXKbLePNiubdI,35993 | |
numpy/f2py/src/fortranobject.h,sha256=ltMxueNeETQtEYSA_E7bpRtF8Jj1xuOBS-YNhjBMfOw,5227 | |
numpy/f2py/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/f2py/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_array_from_pyobj.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_assumed_shape.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_block_docstring.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_callback.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_common.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_compile_function.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_kind.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_mixed.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_parameter.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_quoted_character.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_return_character.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_return_complex.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_return_integer.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_return_logical.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_return_real.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_semicolon_split.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_size.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/test_string.cpython-37.pyc,, | |
numpy/f2py/tests/__pycache__/util.cpython-37.pyc,, | |
numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c,sha256=L_Z0GMyfYIoRiS0FIBlzy_nxlFoNbkLAatObVmKeGsk,9025 | |
numpy/f2py/tests/src/assumed_shape/.f2py_f2cmap,sha256=But9r9m4iL7EGq_haMW8IiQ4VivH0TgUozxX4pPvdpE,29 | |
numpy/f2py/tests/src/assumed_shape/foo_free.f90,sha256=oBwbGSlbr9MkFyhVO2aldjc01dr9GHrMrSiRQek8U64,460 | |
numpy/f2py/tests/src/assumed_shape/foo_mod.f90,sha256=rfzw3QdI-eaDSl-hslCgGpd5tHftJOVhXvb21Y9Gf6M,499 | |
numpy/f2py/tests/src/assumed_shape/foo_use.f90,sha256=rmT9k4jP9Ru1PLcGqepw9Jc6P9XNXM0axY7o4hi9lUw,269 | |
numpy/f2py/tests/src/assumed_shape/precision.f90,sha256=r08JeTVmTTExA-hYZ6HzaxVwBn1GMbPAuuwBhBDtJUk,130 | |
numpy/f2py/tests/src/common/block.f,sha256=GQ0Pd-VMX3H3a-__f2SuosSdwNXHpBqoGnQDjf8aG9g,224 | |
numpy/f2py/tests/src/kind/foo.f90,sha256=zIHpw1KdkWbTzbXb73hPbCg4N2Htj3XL8DIwM7seXpo,347 | |
numpy/f2py/tests/src/mixed/foo.f,sha256=90zmbSHloY1XQYcPb8B5d9bv9mCZx8Z8AMTtgDwJDz8,85 | |
numpy/f2py/tests/src/mixed/foo_fixed.f90,sha256=pxKuPzxF3Kn5khyFq9ayCsQiolxB3SaNtcWaK5j6Rv4,179 | |
numpy/f2py/tests/src/mixed/foo_free.f90,sha256=fIQ71wrBc00JUAVUj_r3QF9SdeNniBiMw6Ly7CGgPWU,139 | |
numpy/f2py/tests/src/parameter/constant_both.f90,sha256=-bBf2eqHb-uFxgo6Q7iAtVUUQzrGFqzhHDNaxwSICfQ,1939 | |
numpy/f2py/tests/src/parameter/constant_compound.f90,sha256=re7pfzcuaquiOia53UT7qNNrTYu2euGKOF4IhoLmT6g,469 | |
numpy/f2py/tests/src/parameter/constant_integer.f90,sha256=nEmMLitKoSAG7gBBEQLWumogN-KS3DBZOAZJWcSDnFw,612 | |
numpy/f2py/tests/src/parameter/constant_non_compound.f90,sha256=IcxESVLKJUZ1k9uYKoSb8Hfm9-O_4rVnlkiUU2diy8Q,609 | |
numpy/f2py/tests/src/parameter/constant_real.f90,sha256=quNbDsM1Ts2rN4WtPO67S9Xi_8l2cXabWRO00CPQSSQ,610 | |
numpy/f2py/tests/src/regression/inout.f90,sha256=CpHpgMrf0bqA1W3Ozo3vInDz0RP904S7LkpdAH6ODck,277 | |
numpy/f2py/tests/src/size/foo.f90,sha256=IlFAQazwBRr3zyT7v36-tV0-fXtB1d7WFp6S1JVMstg,815 | |
numpy/f2py/tests/src/string/char.f90,sha256=ihr_BH9lY7eXcQpHHDQhFoKcbu7VMOX5QP2Tlr7xlaM,618 | |
numpy/f2py/tests/test_array_from_pyobj.py,sha256=gLSX9JuF_8NNboUQRzRF3IYC7pWJ06Mw8m6sy2wQvCQ,22083 | |
numpy/f2py/tests/test_assumed_shape.py,sha256=QhSsSJ4gzrgACSO-dyasMPhJSfa7PzDAxAd9yN0M6zI,949 | |
numpy/f2py/tests/test_block_docstring.py,sha256=lbRnFEGeseQ-WP9grC8Y4J6xKu-Nwgp0_fSVuYn19Hw,568 | |
numpy/f2py/tests/test_callback.py,sha256=d5366rZPJMowFLd7AiwUMRdhKTe8BMeakcp1plQNBJI,3994 | |
numpy/f2py/tests/test_common.py,sha256=tLmi1JrfwFdTcBlUInxTn04f6Hf8eSB00sWRoKJvHrM,868 | |
numpy/f2py/tests/test_compile_function.py,sha256=gQG9PeMaXCcjlc233cEJb5eMoNFfvzAdkHf8qb7Css0,4256 | |
numpy/f2py/tests/test_kind.py,sha256=G6u6EWjVHenmPju3RQCa9bSeCJGDul3VyXFgp2_Yc7w,1078 | |
numpy/f2py/tests/test_mixed.py,sha256=nUgGcvcbyd_NB6NuwFCIH8ze5eeMztC-fE5lCHXc9Bg,931 | |
numpy/f2py/tests/test_parameter.py,sha256=_wX-gM-XGxA_mfDBM8np9NLjYiCF6LJbglwKf09JbdM,3976 | |
numpy/f2py/tests/test_quoted_character.py,sha256=Q0oDtl3STQqzSap5VYPpfzJJ72NtQchm6Vg-bwuoBl4,1029 | |
numpy/f2py/tests/test_regression.py,sha256=lPQUKx5RrVtGhyIvIcWS5GgA_CgQypabuuna-Q1z3hs,764 | |
numpy/f2py/tests/test_return_character.py,sha256=4a_JeEtY1AkT-Q-01iaZyqWLDGmZGW17d88JNFZoXTc,3864 | |
numpy/f2py/tests/test_return_complex.py,sha256=FO4oflCncNIft36R3Fe9uiyDtryiB-_d2PLMH3x64I4,4779 | |
numpy/f2py/tests/test_return_integer.py,sha256=cyyAbyHUepwYeyXlgIa2FD4B7A2dHnpp2jwx8ZDQiZQ,4749 | |
numpy/f2py/tests/test_return_logical.py,sha256=u3dazkOU1oz9kZKYXBd2GWaEr02MYfjGdLrb7kT8MiY,4974 | |
numpy/f2py/tests/test_return_real.py,sha256=QVRKzeO44ZuIlV8EycmtXaHT_i0rnX2bi3rOh7py4GM,5619 | |
numpy/f2py/tests/test_semicolon_split.py,sha256=v7YFx-oTbXUZZ4qjdblCYeVVtkD1YYa4CbuEf2LTOLs,1580 | |
numpy/f2py/tests/test_size.py,sha256=GV7S4tl8FhK60T_EpX86yVQo_bMVTdyOTB8fGVIQ24o,1352 | |
numpy/f2py/tests/test_string.py,sha256=LTQC9AFVsUAuJVFuH3Wltl-NfFIilVl0KvBNnEgdnmo,676 | |
numpy/f2py/tests/util.py,sha256=u06FJvpEGZM6P9WaZWkfTxR5TSdjCm7eXku45MO5R_o,9436 | |
numpy/f2py/use_rules.py,sha256=L6nTSJnxougQ2PVAzR7s-1spidcfDp9tzLIFAJe3gUI,3652 | |
numpy/fft/__init__.py,sha256=KGWBTdw_6ckUIfniIdikkgBwDy8riaGID8x4cdOf_Ds,252 | |
numpy/fft/__pycache__/__init__.cpython-37.pyc,, | |
numpy/fft/__pycache__/fftpack.cpython-37.pyc,, | |
numpy/fft/__pycache__/helper.cpython-37.pyc,, | |
numpy/fft/__pycache__/info.cpython-37.pyc,, | |
numpy/fft/__pycache__/setup.cpython-37.pyc,, | |
numpy/fft/fftpack.py,sha256=lsjJM82Zdhh9t68dQrzO6iPihdgE_QK3GtSSfpYEcxI,47089 | |
numpy/fft/fftpack_lite.cpython-37m-darwin.so,sha256=y9heiRn60lsETotgbwxq05GwlagBml-U-TOcTCwQ9xE,62096 | |
numpy/fft/helper.py,sha256=6Q_SGRP2hukwWRJjiL5OidSfdJIkvZo7AePSqYMLgJI,9710 | |
numpy/fft/info.py,sha256=831NwiCI33uiLx21G7kFCwzZuFxDfmU8n-2LG4FJm2w,7235 | |
numpy/fft/setup.py,sha256=VR1boee7xZd3lOQVRJ3083I0kYqq_-RCo6CK6UK8Lso,550 | |
numpy/fft/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/fft/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/fft/tests/__pycache__/test_fftpack.cpython-37.pyc,, | |
numpy/fft/tests/__pycache__/test_helper.cpython-37.pyc,, | |
numpy/fft/tests/test_fftpack.py,sha256=Ub9oaoyEILrev0kZoEh0hTaYlf-orOUtfKIsNbANwoU,7019 | |
numpy/fft/tests/test_helper.py,sha256=q8y7x0IaXRW2ai0dCEmhW48MRkQ6k8bSegti5gCE6ac,9774 | |
numpy/lib/__init__.py,sha256=NgwUuJaACbQF-qd9VLy6CEPwcZHk1GEdWPW0UhZPQl8,1358 | |
numpy/lib/__pycache__/__init__.cpython-37.pyc,, | |
numpy/lib/__pycache__/_datasource.cpython-37.pyc,, | |
numpy/lib/__pycache__/_iotools.cpython-37.pyc,, | |
numpy/lib/__pycache__/_version.cpython-37.pyc,, | |
numpy/lib/__pycache__/arraypad.cpython-37.pyc,, | |
numpy/lib/__pycache__/arraysetops.cpython-37.pyc,, | |
numpy/lib/__pycache__/arrayterator.cpython-37.pyc,, | |
numpy/lib/__pycache__/financial.cpython-37.pyc,, | |
numpy/lib/__pycache__/format.cpython-37.pyc,, | |
numpy/lib/__pycache__/function_base.cpython-37.pyc,, | |
numpy/lib/__pycache__/histograms.cpython-37.pyc,, | |
numpy/lib/__pycache__/index_tricks.cpython-37.pyc,, | |
numpy/lib/__pycache__/info.cpython-37.pyc,, | |
numpy/lib/__pycache__/mixins.cpython-37.pyc,, | |
numpy/lib/__pycache__/nanfunctions.cpython-37.pyc,, | |
numpy/lib/__pycache__/npyio.cpython-37.pyc,, | |
numpy/lib/__pycache__/polynomial.cpython-37.pyc,, | |
numpy/lib/__pycache__/recfunctions.cpython-37.pyc,, | |
numpy/lib/__pycache__/scimath.cpython-37.pyc,, | |
numpy/lib/__pycache__/setup.cpython-37.pyc,, | |
numpy/lib/__pycache__/shape_base.cpython-37.pyc,, | |
numpy/lib/__pycache__/stride_tricks.cpython-37.pyc,, | |
numpy/lib/__pycache__/twodim_base.cpython-37.pyc,, | |
numpy/lib/__pycache__/type_check.cpython-37.pyc,, | |
numpy/lib/__pycache__/ufunclike.cpython-37.pyc,, | |
numpy/lib/__pycache__/user_array.cpython-37.pyc,, | |
numpy/lib/__pycache__/utils.cpython-37.pyc,, | |
numpy/lib/_datasource.py,sha256=qKNDjPS0q8CJ4HD9I6IPgJEmr2MN2m-yinyYOJBqZ1w,25523 | |
numpy/lib/_iotools.py,sha256=JERUFZ1Xja8mq9T7AQdtO2RwoblLmmDcfhnL9iT0xOE,32683 | |
numpy/lib/_version.py,sha256=8ouI5DbgX1RuNbPhVX_Fn14_v7ZiwwQ1grQPX3_bXBs,4866 | |
numpy/lib/arraypad.py,sha256=dAhAzdE0AXL26VMyOj0XIzh_XqcjWv6SDSDaAGSFxyQ,45097 | |
numpy/lib/arraysetops.py,sha256=gpQmyj62NhyGms41X4J0L2qF5apzPqSCRsSN1Os8AVo,24175 | |
numpy/lib/arrayterator.py,sha256=niYNI2qhySUT5j_3gl07pPbkmY4GJqdwIGaMZyPil84,7191 | |
numpy/lib/financial.py,sha256=fYFVxvZmJJUxyWarmOpNz8rhFfs_Gv3vx8iB9L2LVwc,25985 | |
numpy/lib/format.py,sha256=S09TcSMpynUbvxHmFIBogSIqoPH4SAfmfg2k8MG6A_E,31068 | |
numpy/lib/function_base.py,sha256=oCjNZGSGegRLTaEMmOJwYoTTWR1eZuVEO9HJLCk26Gc,156000 | |
numpy/lib/histograms.py,sha256=0FviiX5FbM5sxHjbHHE54LQLday_AoX4LCF_N8_x4bk,39375 | |
numpy/lib/index_tricks.py,sha256=OymHt6Mzi16XZBWSvLdnwGr9ywaPker-3Dq2vQHXN7I,29087 | |
numpy/lib/info.py,sha256=oVczF_pC_CMZC2h2adb2HHza_1qF3qI065j4RBrd-I4,6616 | |
numpy/lib/mixins.py,sha256=GeOiq01E663Z_06xQfIUYKpl2JPkswqhaQEernjnO_Q,7268 | |
numpy/lib/nanfunctions.py,sha256=7LsFmh0meOwBJpzoNnR1V8e2nAJv61A8Ib9EWmYFRLg,57741 | |
numpy/lib/npyio.py,sha256=QLp2uO4Q6ShXOWeX3WmTS9W5wcQTE4qczviKi1NJF6Y,84853 | |
numpy/lib/polynomial.py,sha256=fFJNI4t53P5la5GLV720bsBLhQd_4Qzs0qwn-FipHIg,40469 | |
numpy/lib/recfunctions.py,sha256=33GtzQ9CFP6CvhvWf2hVGaHFYJKfTHIJo51zgSUVDMc,54864 | |
numpy/lib/scimath.py,sha256=axf_K8DphkbuKZXA6K2A5fbyIu1BdKQ6P74iFW9YpTc,14698 | |
numpy/lib/setup.py,sha256=os9eV9wSzwTQlfxeoQ33gYQ4wOj1_6EvqcROc8PyGbE,379 | |
numpy/lib/shape_base.py,sha256=T1RaVDs9X7GUBclWBf2SZkLZhY3xbpctMv8rtt0gdM0,37967 | |
numpy/lib/stride_tricks.py,sha256=P7koCHdGLg31K2aQPIPcAmqLKKsnY-HZw_eS3hqUpZA,9123 | |
numpy/lib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/lib/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test__datasource.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test__iotools.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test__version.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_arraypad.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_arraysetops.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_arrayterator.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_financial.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_format.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_function_base.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_histograms.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_index_tricks.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_io.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_mixins.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_nanfunctions.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_packbits.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_polynomial.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_recfunctions.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_shape_base.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_stride_tricks.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_twodim_base.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_type_check.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_ufunclike.cpython-37.pyc,, | |
numpy/lib/tests/__pycache__/test_utils.cpython-37.pyc,, | |
numpy/lib/tests/data/py2-objarr.npy,sha256=F4cyUC-_TB9QSFLAo2c7c44rC6NUYIgrfGx9PqWPSKk,258 | |
numpy/lib/tests/data/py2-objarr.npz,sha256=xo13HBT0FbFZ2qvZz0LWGDb3SuQASSaXh7rKfVcJjx4,366 | |
numpy/lib/tests/data/py3-objarr.npy,sha256=pTTVh8ezp-lwAK3fkgvdKU8Arp5NMKznVD-M6Ex_uA0,341 | |
numpy/lib/tests/data/py3-objarr.npz,sha256=qQR0gS57e9ta16d_vCQjaaKM74gPdlwCPkp55P-qrdw,449 | |
numpy/lib/tests/data/python3.npy,sha256=X0ad3hAaLGXig9LtSHAo-BgOvLlFfPYMnZuVIxRmj-0,96 | |
numpy/lib/tests/data/win64python2.npy,sha256=agOcgHVYFJrV-nrRJDbGnUnF4ZTPYXuSeF-Mtg7GMpc,96 | |
numpy/lib/tests/test__datasource.py,sha256=5LwfmvIysaLHlCYkmsj46S7YRF2zRG4BmKSjjJr6fdE,11463 | |
numpy/lib/tests/test__iotools.py,sha256=7ocNK0I-XKtiJLxnq2Fq_Yszi-e-70Km7crI28Jyqww,13714 | |
numpy/lib/tests/test__version.py,sha256=eCeeSqb8G3WNtCgkM3XGz9Zszyye-KFDlNQ7EY2J_UY,2055 | |
numpy/lib/tests/test_arraypad.py,sha256=ZIcLFH5ykizK2XT6WN1f9CyR48tqHQnx_b2Ojurs7eo,49320 | |
numpy/lib/tests/test_arraysetops.py,sha256=WsH_vJI5guzG9Mix0y-kwVceTZ8e-aKJxEaf_RNTcbE,22157 | |
numpy/lib/tests/test_arrayterator.py,sha256=run7iWWbvoHGGsDv_uB6G8QENFzOCSgUIxAMVp7ZMu4,1357 | |
numpy/lib/tests/test_financial.py,sha256=8cCdlpNixwl1Wrgblemxi3ndTxX_Sq2yr-30lSURnq0,17098 | |
numpy/lib/tests/test_format.py,sha256=rHbWPYqG601X2z-jky6euq7_LdLAS8aO_ASaEMth9IY,37266 | |
numpy/lib/tests/test_function_base.py,sha256=3qeRuK68MKs7Xkxe61M2ozQkCiSnTDpGFeJbrp807Uc,117817 | |
numpy/lib/tests/test_histograms.py,sha256=0EhLLKOADkyQZTucCcXfYyAC5X1n5_vglMid0VJO-DE,33415 | |
numpy/lib/tests/test_index_tricks.py,sha256=Ql-AUXqIgQSdLjbeRI7GmaiVVtdgH0YoR0m5DGS_V-g,16322 | |
numpy/lib/tests/test_io.py,sha256=DojLDyjBXLAe7jfxZY2BUowKFQ9-r0IxeeYYiRiSeVo,97338 | |
numpy/lib/tests/test_mixins.py,sha256=YNIKF716Jz7V8FJ8Zzww_F6laTD8j3A6SBxCXqt6rAQ,7233 | |
numpy/lib/tests/test_nanfunctions.py,sha256=wS-i0JsIwMb0p35vW2qGC_dpewDFVzI2YTnJ25pr1O8,36179 | |
numpy/lib/tests/test_packbits.py,sha256=W4gtoYBa5LbevvbXL9lvrZRT5Wt1fqyI3J7oDLtza_A,12851 | |
numpy/lib/tests/test_polynomial.py,sha256=NhCF2nGmc43KraPfR6LCBD8M-i-xZKwIsLYPFXNi0WE,10087 | |
numpy/lib/tests/test_recfunctions.py,sha256=oviLggJncK86KsFuVcF3qoyoGraJDLvRlhwsm1qispg,39023 | |
numpy/lib/tests/test_regression.py,sha256=96pKecYGHPZwAoHV3_kLvl3gIb0PN0m33R0H3dd7uSk,8472 | |
numpy/lib/tests/test_shape_base.py,sha256=nVUzbHADBmrOaOHeh5fA27gjxsnOC3r-S5lyo1n5MV8,23979 | |
numpy/lib/tests/test_stride_tricks.py,sha256=HUp9YL7eBTRfT8gs6iraMl6M3YvoDxfFmkkwwmroing,15392 | |
numpy/lib/tests/test_twodim_base.py,sha256=toC7eTjEuZxygJwQub0tC7_uGCtLVVMCHfa6EUkGJU4,17524 | |
numpy/lib/tests/test_type_check.py,sha256=KxnoWjY3iGTnr0pDWEah73ZAx_6a85S9SSnkKIG-sn0,13509 | |
numpy/lib/tests/test_ufunclike.py,sha256=VFt_8BDH7q80yXmYJSn1crolIMizKFN3mAJcigaazLU,3350 | |
numpy/lib/tests/test_utils.py,sha256=kIH7i6N_Gtsk8FgIHTuYvASeGxQB15UYPJwqvWBPWkY,2474 | |
numpy/lib/twodim_base.py,sha256=sFRiYvhrOG9EY_YMlTblXb4aJKZaE3WLUx1WBKoIgG4,27339 | |
numpy/lib/type_check.py,sha256=XsSXtj8bHk22iq4NNZpwqMKPM9FoddtYNWKFi63oZqc,18073 | |
numpy/lib/ufunclike.py,sha256=1df-LT8UlC_SRmc06DhAnsUZLHROx0p56jw6GUwcap8,7156 | |
numpy/lib/user_array.py,sha256=7nJPlDfP-04Lcq8iH_cqBbSEsx5cHCcj-2Py-oh-5t0,7817 | |
numpy/lib/utils.py,sha256=L_JtNhuRwM6_4YyhM5jCpeH36j_lj5-t6XJT3pB4In0,36161 | |
numpy/linalg/__init__.py,sha256=P2q5fyWhZEc-xhcruFEcHWmYhSBOWSr63i9UjE8x3fk,2326 | |
numpy/linalg/__pycache__/__init__.cpython-37.pyc,, | |
numpy/linalg/__pycache__/info.cpython-37.pyc,, | |
numpy/linalg/__pycache__/linalg.cpython-37.pyc,, | |
numpy/linalg/__pycache__/setup.cpython-37.pyc,, | |
numpy/linalg/_umath_linalg.cpython-37m-darwin.so,sha256=mQcYvUnRqlxw_MmVpJlLkbcVA-QnipvATVWc2x3KCds,171172 | |
numpy/linalg/info.py,sha256=AbXPYYabJK5In0F9IMk-oVWZgDyEaoU45Wnq6RtuCJs,1198 | |
numpy/linalg/lapack_lite.cpython-37m-darwin.so,sha256=_qne9zE4TSZpClUdIfM9PB7XuS40KTmhfMMInIfnfio,29172 | |
numpy/linalg/linalg.py,sha256=rZuKNk2u7c65Cp8yssB1BEeCSPauDClcgYF6N4dY2So,85094 | |
numpy/linalg/setup.py,sha256=k1X4EfRWACFtJYfb8Wiol_-pPnEMtqURxQ8H9FwFHWg,1878 | |
numpy/linalg/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/linalg/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/linalg/tests/__pycache__/test_build.cpython-37.pyc,, | |
numpy/linalg/tests/__pycache__/test_deprecations.cpython-37.pyc,, | |
numpy/linalg/tests/__pycache__/test_linalg.cpython-37.pyc,, | |
numpy/linalg/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/linalg/tests/test_build.py,sha256=xKcJ8JmGk-zTqxxMhDX5GFsw-ptn8uwOUOcxaTUuPHc,1704 | |
numpy/linalg/tests/test_deprecations.py,sha256=eGYDVF3rmGQyDEMGOc-p_zc84Cx1I3jQPyaJe7xOvEc,706 | |
numpy/linalg/tests/test_linalg.py,sha256=y87Nka03PZAqUl5GqzOb4LsLbgbGib5V9ehdoTIPLRI,69977 | |
numpy/linalg/tests/test_regression.py,sha256=zz7lprqDg7yU-z1d6AOdCDH3Tjqgw82QGiaPM7peixY,5671 | |
numpy/ma/__init__.py,sha256=fcmMCElT3MmCkjIGVhXyEAbjuWe_j1NVUiE65eAMvy0,1470 | |
numpy/ma/__pycache__/__init__.cpython-37.pyc,, | |
numpy/ma/__pycache__/bench.cpython-37.pyc,, | |
numpy/ma/__pycache__/core.cpython-37.pyc,, | |
numpy/ma/__pycache__/extras.cpython-37.pyc,, | |
numpy/ma/__pycache__/mrecords.cpython-37.pyc,, | |
numpy/ma/__pycache__/setup.cpython-37.pyc,, | |
numpy/ma/__pycache__/testutils.cpython-37.pyc,, | |
numpy/ma/__pycache__/timer_comparison.cpython-37.pyc,, | |
numpy/ma/__pycache__/version.cpython-37.pyc,, | |
numpy/ma/bench.py,sha256=q3y_e1wpHVEdg0iIxrBshWVt2LOFfYi6q-eIJ3RSVrU,4942 | |
numpy/ma/core.py,sha256=UVL30fxCdddsnMlLOgcCxl-ca_-Iqm6uyN8QHnczcH4,256431 | |
numpy/ma/extras.py,sha256=iNaY5jpgYLssKzzgS7FfhlZ3BM59gjLbsNb7CUylLDU,56986 | |
numpy/ma/mrecords.py,sha256=j8EituvbyOFG5oiTwHBnVdQX1mhD_qByBezBeB_R1hM,26937 | |
numpy/ma/setup.py,sha256=zkieH8BeiGVXl3Wlt_WeP9kciZlyAZY20DDu4SGk4b4,429 | |
numpy/ma/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/ma/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_core.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_deprecations.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_extras.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_mrecords.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_old_ma.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/ma/tests/__pycache__/test_subclassing.cpython-37.pyc,, | |
numpy/ma/tests/test_core.py,sha256=pM6Wn3jxw6cPV7wFlNKWyyAYFDAGAm0oYtw2txBudKw,196219 | |
numpy/ma/tests/test_deprecations.py,sha256=StN-maPV6dwIPn1LmJ_Fd9l_Ysrbzvl8BZy6zYeUru8,2340 | |
numpy/ma/tests/test_extras.py,sha256=f7wFcowBM60pnNwIJ221W9X6AWNS9pRK_VjVXV5Wqio,66791 | |
numpy/ma/tests/test_mrecords.py,sha256=SX0_-SgRqIQBlPaiDNVD0-oBAot0N9aLPWM7Gj4U804,19966 | |
numpy/ma/tests/test_old_ma.py,sha256=85NJMKj-TG8WGSMFT2KbDEsBsrgV58URkFUd1v9iuBc,32351 | |
numpy/ma/tests/test_regression.py,sha256=AGAA97e9_0q1VHSTOx6qIsh1qA56GzEKhWZWveuHf3w,2993 | |
numpy/ma/tests/test_subclassing.py,sha256=GEqxbqfodv1823cRitfJK3qMWTbDWIpv2HwuVB_kpXk,12997 | |
numpy/ma/testutils.py,sha256=meyy8_0sx4g2sebsVO1PrFSc6ogLzEU7vjOuu2VjY1U,10365 | |
numpy/ma/timer_comparison.py,sha256=Q1AyfHzNrWzVTrx6ebL9HgpQEkEJPHAkbWuTK_0bBkQ,15586 | |
numpy/ma/version.py,sha256=KpJAmUE1s1TpbgqgdBpDoslxm7kOMpczLjEzLMGv9Ag,380 | |
numpy/matlib.py,sha256=bfk5RflWhOjnBKhpU4L-WDafyzoNIy5-K-8MMyIauN8,9809 | |
numpy/matrixlib/__init__.py,sha256=W-2bi7zuMWQY5U1ikwfaBPubrcYkbxzPzzIeYz3RYPA,284 | |
numpy/matrixlib/__pycache__/__init__.cpython-37.pyc,, | |
numpy/matrixlib/__pycache__/defmatrix.cpython-37.pyc,, | |
numpy/matrixlib/__pycache__/setup.cpython-37.pyc,, | |
numpy/matrixlib/defmatrix.py,sha256=1tR1FsgapRQ2XbiAvV4ik4mwkKLJA1y6ABeztSeYr2k,30660 | |
numpy/matrixlib/setup.py,sha256=7DS-rWnyWlLTuOj31UuhkyW8QhLQ7KD5wirtWT_DUhc,437 | |
numpy/matrixlib/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/matrixlib/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_defmatrix.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_interaction.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_masked_matrix.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_matrix_linalg.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_multiarray.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_numeric.cpython-37.pyc,, | |
numpy/matrixlib/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/matrixlib/tests/test_defmatrix.py,sha256=FRkFPpDpgUEzEAgShORCVhPOuqclxBftHyEW5z2oV4o,15315 | |
numpy/matrixlib/tests/test_interaction.py,sha256=y0ldcMIKCeT_tRo_uON6Cvxuff-M4MxmqnzA0kDFHYU,12179 | |
numpy/matrixlib/tests/test_masked_matrix.py,sha256=4uslUEOdw_ACZ9R-VKX2m82HlDeNFrBs-zP8uuWj5gI,8962 | |
numpy/matrixlib/tests/test_matrix_linalg.py,sha256=XYsAcC02YgvlfqAQOLY2hOuggeRlRhkztNsLYWGb4QQ,2125 | |
numpy/matrixlib/tests/test_multiarray.py,sha256=jM-cFU_ktanoyJ0ScRYv5xwohhE3pKpVhBBtd31b-IQ,628 | |
numpy/matrixlib/tests/test_numeric.py,sha256=YPq5f11MUAV6WcLQbl8xKWcm17lMj9SJ09mamqGCpxA,515 | |
numpy/matrixlib/tests/test_regression.py,sha256=ou1TP5bFNpjRaL2-zQxzS11ChwvAkCVp3k71SBtOO9M,1001 | |
numpy/polynomial/__init__.py,sha256=boBgsbz2Rr49pBTyGNT3TnLRTPSauyjBNeCVGek7oUM,1134 | |
numpy/polynomial/__pycache__/__init__.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/_polybase.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/chebyshev.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/hermite.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/hermite_e.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/laguerre.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/legendre.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/polynomial.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/polyutils.cpython-37.pyc,, | |
numpy/polynomial/__pycache__/setup.cpython-37.pyc,, | |
numpy/polynomial/_polybase.py,sha256=GEr4yD6OnPtNo98Mf0p5r8vt_nEmTeqTRG3pljAADbQ,32779 | |
numpy/polynomial/chebyshev.py,sha256=79hcfdqUHgAtBofN4ZZHTkOJNzYeJ12KsCgZm4zng6I,67424 | |
numpy/polynomial/hermite.py,sha256=14FITYGMDQzZLsAIu6TwIeS_Eh4O3dTizXHRmVLddAI,58113 | |
numpy/polynomial/hermite_e.py,sha256=NuXm8lrR5s8yli_bUauvgUsX0p0yM-EwtvNj6srObtU,58237 | |
numpy/polynomial/laguerre.py,sha256=o3ZvgUi9ivpWGqPIk6hCAGcs8P9mljAxP54oXpQfYiM,56513 | |
numpy/polynomial/legendre.py,sha256=UvhOne6SyvBKj5DIYGyfD9bu_RcMqADY5ezbVkhBR8s,57701 | |
numpy/polynomial/polynomial.py,sha256=9XK_JxmTv4IjCgtJ03dd6ASNDIe8H0crEOa7MbstUTI,53443 | |
numpy/polynomial/polyutils.py,sha256=2qA03OFnMiXQj6aF0vIUWN2B99D4yd-dEMSMOYDOLf0,11529 | |
numpy/polynomial/setup.py,sha256=PKIUV6Jh7_0jBboPp3IHPmp6LWVs4tbIkdu_FtmI_5U,385 | |
numpy/polynomial/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/polynomial/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_chebyshev.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_classes.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_hermite.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_hermite_e.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_laguerre.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_legendre.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_polynomial.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_polyutils.cpython-37.pyc,, | |
numpy/polynomial/tests/__pycache__/test_printing.cpython-37.pyc,, | |
numpy/polynomial/tests/test_chebyshev.py,sha256=ntXcwISDcgmlGGfMhwYWiWdjqtTVVUfFdQHm-Msx5yc,20719 | |
numpy/polynomial/tests/test_classes.py,sha256=F07n2iV0_XPK2JC_8egdnO6FDETpttR7gW6e-8zDtEs,20056 | |
numpy/polynomial/tests/test_hermite.py,sha256=OQNcZGOg1FoqVVXrouHqanG2d2zCUEUdhutRV9WaIOs,18758 | |
numpy/polynomial/tests/test_hermite_e.py,sha256=W1akH58gTi9VrXh2GiyJfPhpeJzf57XnAFVIgM33hp4,19092 | |
numpy/polynomial/tests/test_laguerre.py,sha256=fyNsRBmdHn1Sd0uQaoXeL70kJb6nQCMeYE5X7utxkS0,17692 | |
numpy/polynomial/tests/test_legendre.py,sha256=Np3xtAMN5zlioM5HFsyLR4tdAJW0ibvfJbz9QHWMkOo,18456 | |
numpy/polynomial/tests/test_polynomial.py,sha256=xbrdofA-XCwZhsyPL5tMKDEZ5cWzxNp6Pz4SV_4nKEU,19552 | |
numpy/polynomial/tests/test_polyutils.py,sha256=GzRz3leypd2UrWE-EwuIWL0lbbj6ks6Mjli3tozDN9U,3081 | |
numpy/polynomial/tests/test_printing.py,sha256=_7O-05q3JEjdxmuzBdWxligQVdC6qGygKmbhfiYW9KQ,2067 | |
numpy/random/__init__.py,sha256=RvKHC6GpPCEcZdGLYiDG3RH5vEbJfOkL4fM0igfBAAA,6053 | |
numpy/random/__pycache__/__init__.cpython-37.pyc,, | |
numpy/random/__pycache__/info.cpython-37.pyc,, | |
numpy/random/__pycache__/setup.cpython-37.pyc,, | |
numpy/random/info.py,sha256=OzPLVv_aA7kxLu9WdGiRqO2_yA2163PWQi3Lwwrhs3E,109 | |
numpy/random/mtrand.cpython-37m-darwin.so,sha256=AvZKjfDhooK3ATWcs3zMqOl9fWiaMfHzdjAt2fkU0VI,1109276 | |
numpy/random/randomkit.h,sha256=GOfc27td8dO8YM0WeB_qM313pouCDUt9Ad7nc_lgKI0,6799 | |
numpy/random/setup.py,sha256=Zm-rZze8r6GWKT-o9tYq2DVym0AMh2tNwE_s6m1Z-Bc,2286 | |
numpy/random/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/random/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/random/tests/__pycache__/test_random.cpython-37.pyc,, | |
numpy/random/tests/__pycache__/test_regression.cpython-37.pyc,, | |
numpy/random/tests/test_random.py,sha256=ZFOdAoUGJZ6HMSQA3rrY6m1_0p_SFRT3wJpaow-3iR8,66641 | |
numpy/random/tests/test_regression.py,sha256=Eb0wEE5cbGklIwcSTNkPI6CpawspICJsx3lYtTDxl7A,5671 | |
numpy/setup.py,sha256=lsyhnRXfo0ybq63nVUX8HnYhQ1mI0bSic-mk-lK3wnc,920 | |
numpy/testing/__init__.py,sha256=MHRK5eimwrC9RE723HlOcOQGxu5HAmQ-qwlcVX1sZ1k,632 | |
numpy/testing/__pycache__/__init__.cpython-37.pyc,, | |
numpy/testing/__pycache__/decorators.cpython-37.pyc,, | |
numpy/testing/__pycache__/noseclasses.cpython-37.pyc,, | |
numpy/testing/__pycache__/nosetester.cpython-37.pyc,, | |
numpy/testing/__pycache__/print_coercion_tables.cpython-37.pyc,, | |
numpy/testing/__pycache__/setup.cpython-37.pyc,, | |
numpy/testing/__pycache__/utils.cpython-37.pyc,, | |
numpy/testing/_private/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/testing/_private/__pycache__/__init__.cpython-37.pyc,, | |
numpy/testing/_private/__pycache__/decorators.cpython-37.pyc,, | |
numpy/testing/_private/__pycache__/noseclasses.cpython-37.pyc,, | |
numpy/testing/_private/__pycache__/nosetester.cpython-37.pyc,, | |
numpy/testing/_private/__pycache__/parameterized.cpython-37.pyc,, | |
numpy/testing/_private/__pycache__/utils.cpython-37.pyc,, | |
numpy/testing/_private/decorators.py,sha256=JSIBsQH4t1rdMcr1-Cf2jBJ6CXzIGEFyZoWxUJuXI7M,9015 | |
numpy/testing/_private/noseclasses.py,sha256=nYtV16KcoqAcHswfYO-u6bRIrDBvCvpqjCNfl7zk-SA,14601 | |
numpy/testing/_private/nosetester.py,sha256=S1nEtDBvNT87Zrt8XmuSVIBWpanJwjtD1YiRlcf7eoA,20515 | |
numpy/testing/_private/parameterized.py,sha256=S_cqBegd7kdwVq1kg_DAnywwFPT_g1bjDJ6-LMq0LO4,18316 | |
numpy/testing/_private/utils.py,sha256=A9EsrakwUiCfFf2lMq6c7KF3piyePOsvTNXjtKvnDnM,78633 | |
numpy/testing/decorators.py,sha256=BEktn0PuVlmgUQ_zGVNXu0wQYh3W0_bu61LnQPrxY20,428 | |
numpy/testing/noseclasses.py,sha256=iZmGKPHAGQIshsEONB-oLt7gHPzx2Bg57oat_M4M5XE,423 | |
numpy/testing/nosetester.py,sha256=as3E0khSkTseCRpyvtOSSq4fJY1K1lrrAyIcXOErTMo,583 | |
numpy/testing/print_coercion_tables.py,sha256=F44AObcou_xytUWszku8t1bWuui-4I_18o7Z7zW8l18,2705 | |
numpy/testing/setup.py,sha256=9PnlgcejccUBzaGPi9Po-ElhmuQMAmWCBRdvCDwiKYw,676 | |
numpy/testing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/testing/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/testing/tests/__pycache__/test_decorators.cpython-37.pyc,, | |
numpy/testing/tests/__pycache__/test_doctesting.cpython-37.pyc,, | |
numpy/testing/tests/__pycache__/test_utils.cpython-37.pyc,, | |
numpy/testing/tests/test_decorators.py,sha256=JB3wOfG4SHAvGNBJgEnotP6Y7uHgNq00-Ji8-vpTL0M,5921 | |
numpy/testing/tests/test_doctesting.py,sha256=sKBXwuRZwMFSiem3R9egBzzSUB81kkpw9y-Y07iqU2M,1413 | |
numpy/testing/tests/test_utils.py,sha256=EVWoi-wP7tyIizguiXghqMk3mjvkI5BUY8xFCB6txts,52974 | |
numpy/testing/utils.py,sha256=3Z2wHEc2f-0lZrdDueAdbe96KQw1DqM_aFosea9VRtY,1232 | |
numpy/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 | |
numpy/tests/__pycache__/__init__.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_ctypeslib.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_matlib.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_numpy_version.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_public_api.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_reloading.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_scripts.cpython-37.pyc,, | |
numpy/tests/__pycache__/test_warnings.cpython-37.pyc,, | |
numpy/tests/test_ctypeslib.py,sha256=Fy_dBd80RrBufyeXISkBu6kS3X700qOD5ob0pDjRssg,12276 | |
numpy/tests/test_matlib.py,sha256=WKILeEOe3NdKP_XAy-uCs4VEi7r_ghQ7NUhIgH1LzoM,2158 | |
numpy/tests/test_numpy_version.py,sha256=VtTTZAPnsJ8xtKLy1qYqIwrpcjTtqJ9xP9qP5-p8DbU,647 | |
numpy/tests/test_public_api.py,sha256=CmikwegnRWXrMO7vvsZ4Z8O8bwDsfuq5tfOWO-0ccs8,3457 | |
numpy/tests/test_reloading.py,sha256=7sDoPGkvvZas7FhT4oOURt75A5JAcr_Ws2uoJ9cVMiY,1304 | |
numpy/tests/test_scripts.py,sha256=SxlQPb8EttfP4V5iGJyXMBtDWTS3EcYVBN-JWDTtSy4,1637 | |
numpy/tests/test_warnings.py,sha256=ye4TBGnOuPAZyu5bS5JDxYV5hLglUQQfKSrMWwY_phI,2594 | |
numpy/version.py,sha256=rVX5lm9SJUn1evDtWgRx3kArGiXzDCqRpevJTPQp-3U,294 |
numpy |
Wheel-Version: 1.0 | |
Generator: bdist_wheel (0.33.4) | |
Root-Is-Purelib: false | |
Tag: cp37-cp37m-macosx_10_6_intel | |
Tag: cp37-cp37m-macosx_10_9_intel | |
Tag: cp37-cp37m-macosx_10_9_x86_64 | |
Tag: cp37-cp37m-macosx_10_10_intel | |
Tag: cp37-cp37m-macosx_10_10_x86_64 | |
# This file is generated by numpy's setup.py | |
# It contains system_info results at the time of building this package. | |
__all__ = ["get_info","show"] | |
import os | |
import sys | |
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs') | |
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): | |
os.environ.setdefault('PATH', '') | |
os.environ['PATH'] += os.pathsep + extra_dll_dir | |
blas_mkl_info={} | |
blis_info={} | |
openblas_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} | |
blas_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} | |
lapack_mkl_info={} | |
openblas_lapack_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} | |
lapack_opt_info={'libraries': ['openblas', 'openblas'], 'library_dirs': ['/usr/local/lib'], 'language': 'c', 'define_macros': [('HAVE_CBLAS', None)]} | |
def get_info(name): | |
g = globals() | |
return g.get(name, g.get(name + "_info", {})) | |
def show(): | |
for name,info_dict in globals().items(): | |
if name[0] == "_" or type(info_dict) is not type({}): continue | |
print(name + ":") | |
if not info_dict: | |
print(" NOT AVAILABLE") | |
for k,v in info_dict.items(): | |
v = str(v) | |
if k == "sources" and len(v) > 200: | |
v = v[:60] + " ...\n... " + v[-60:] | |
print(" %s = %s" % (k,v)) | |
""" | |
NumPy | |
===== | |
Provides | |
1. An array object of arbitrary homogeneous items | |
2. Fast mathematical operations over arrays | |
3. Linear Algebra, Fourier Transforms, Random Number Generation | |
How to use the documentation | |
---------------------------- | |
Documentation is available in two forms: docstrings provided | |
with the code, and a loose standing reference guide, available from | |
`the NumPy homepage <https://www.scipy.org>`_. | |
We recommend exploring the docstrings using | |
`IPython <https://ipython.org>`_, an advanced Python shell with | |
TAB-completion and introspection capabilities. See below for further | |
instructions. | |
The docstring examples assume that `numpy` has been imported as `np`:: | |
>>> import numpy as np | |
Code snippets are indicated by three greater-than signs:: | |
>>> x = 42 | |
>>> x = x + 1 | |
Use the built-in ``help`` function to view a function's docstring:: | |
>>> help(np.sort) | |
... # doctest: +SKIP | |
For some objects, ``np.info(obj)`` may provide additional help. This is | |
particularly true if you see the line "Help on ufunc object:" at the top | |
of the help() page. Ufuncs are implemented in C, not Python, for speed. | |
The native Python help() does not know how to view their help, but our | |
np.info() function does. | |
To search for documents containing a keyword, do:: | |
>>> np.lookfor('keyword') | |
... # doctest: +SKIP | |
General-purpose documents like a glossary and help on the basic concepts | |
of numpy are available under the ``doc`` sub-module:: | |
>>> from numpy import doc | |
>>> help(doc) | |
... # doctest: +SKIP | |
Available subpackages | |
--------------------- | |
doc | |
Topical documentation on broadcasting, indexing, etc. | |
lib | |
Basic functions used by several sub-packages. | |
random | |
Core Random Tools | |
linalg | |
Core Linear Algebra Tools | |
fft | |
Core FFT routines | |
polynomial | |
Polynomial tools | |
testing | |
NumPy testing tools | |
f2py | |
Fortran to Python Interface Generator. | |
distutils | |
Enhancements to distutils with support for | |
Fortran compilers support and more. | |
Utilities | |
--------- | |
test | |
Run numpy unittests | |
show_config | |
Show numpy build configuration | |
dual | |
Overwrite certain functions with high-performance Scipy tools | |
matlib | |
Make everything matrices. | |
__version__ | |
NumPy version string | |
Viewing documentation using IPython | |
----------------------------------- | |
Start IPython with the NumPy profile (``ipython -p numpy``), which will | |
import `numpy` under the alias `np`. Then, use the ``cpaste`` command to | |
paste examples into the shell. To see which functions are available in | |
`numpy`, type ``np.<TAB>`` (where ``<TAB>`` refers to the TAB key), or use | |
``np.*cos*?<ENTER>`` (where ``<ENTER>`` refers to the ENTER key) to narrow | |
down the list. To view the docstring for a function, use | |
``np.cos?<ENTER>`` (to view the docstring) and ``np.cos??<ENTER>`` (to view | |
the source code). | |
Copies vs. in-place operation | |
----------------------------- | |
Most of the functions in `numpy` return a copy of the array argument | |
(e.g., `np.sort`). In-place versions of these functions are often | |
available as array methods, i.e. ``x = np.array([1,2,3]); x.sort()``. | |
Exceptions to this rule are documented. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import sys | |
import warnings | |
from ._globals import ModuleDeprecationWarning, VisibleDeprecationWarning | |
from ._globals import _NoValue | |
# We first need to detect if we're being called as part of the numpy setup | |
# procedure itself in a reliable manner. | |
try: | |
__NUMPY_SETUP__ | |
except NameError: | |
__NUMPY_SETUP__ = False | |
if __NUMPY_SETUP__: | |
sys.stderr.write('Running from numpy source directory.\n') | |
else: | |
try: | |
from numpy.__config__ import show as show_config | |
except ImportError: | |
msg = """Error importing numpy: you should not try to import numpy from | |
its source directory; please exit the numpy source tree, and relaunch | |
your python interpreter from there.""" | |
raise ImportError(msg) | |
from .version import git_revision as __git_revision__ | |
from .version import version as __version__ | |
__all__ = ['ModuleDeprecationWarning', | |
'VisibleDeprecationWarning'] | |
# Allow distributors to run custom init code | |
from . import _distributor_init | |
from . import core | |
from .core import * | |
from . import compat | |
from . import lib | |
from .lib import * | |
from . import linalg | |
from . import fft | |
from . import polynomial | |
from . import random | |
from . import ctypeslib | |
from . import ma | |
from . import matrixlib as _mat | |
from .matrixlib import * | |
from .compat import long | |
# Make these accessible from numpy name-space | |
# but not imported in from numpy import * | |
if sys.version_info[0] >= 3: | |
from builtins import bool, int, float, complex, object, str | |
unicode = str | |
else: | |
from __builtin__ import bool, int, float, complex, object, unicode, str | |
from .core import round, abs, max, min | |
# now that numpy modules are imported, can initialize limits | |
core.getlimits._register_known_types() | |
__all__.extend(['__version__', 'show_config']) | |
__all__.extend(core.__all__) | |
__all__.extend(_mat.__all__) | |
__all__.extend(lib.__all__) | |
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma']) | |
# Filter out Cython harmless warnings | |
warnings.filterwarnings("ignore", message="numpy.dtype size changed") | |
warnings.filterwarnings("ignore", message="numpy.ufunc size changed") | |
warnings.filterwarnings("ignore", message="numpy.ndarray size changed") | |
# oldnumeric and numarray were removed in 1.9. In case some packages import | |
# but do not use them, we define them here for backward compatibility. | |
oldnumeric = 'removed' | |
numarray = 'removed' | |
# We don't actually use this ourselves anymore, but I'm not 100% sure that | |
# no-one else in the world is using it (though I hope not) | |
from .testing import Tester | |
# Pytest testing | |
from numpy._pytesttester import PytestTester | |
test = PytestTester(__name__) | |
del PytestTester | |
def _sanity_check(): | |
""" | |
Quick sanity checks for common bugs caused by environment. | |
There are some cases e.g. with wrong BLAS ABI that cause wrong | |
results under specific runtime conditions that are not necessarily | |
achieved during test suite runs, and it is useful to catch those early. | |
See https://github.com/numpy/numpy/issues/8577 and other | |
similar bug reports. | |
""" | |
try: | |
x = ones(2, dtype=float32) | |
if not abs(x.dot(x) - 2.0) < 1e-5: | |
raise AssertionError() | |
except AssertionError: | |
msg = ("The current Numpy installation ({!r}) fails to " | |
"pass simple sanity checks. This can be caused for example " | |
"by incorrect BLAS library being linked in, or by mixing " | |
"package managers (pip, conda, apt, ...). Search closed " | |
"numpy issues for similar problems.") | |
raise RuntimeError(msg.format(__file__)) | |
_sanity_check() | |
del _sanity_check |
""" Distributor init file | |
Distributors: you can add custom code here to support particular distributions | |
of numpy. | |
For example, this is a good place to put any checks for hardware requirements. | |
The numpy standard source distribution will not put code in this file, so you | |
can safely replace this file with your own version. | |
""" |
""" | |
Module defining global singleton classes. | |
This module raises a RuntimeError if an attempt to reload it is made. In that | |
way the identities of the classes defined here are fixed and will remain so | |
even if numpy itself is reloaded. In particular, a function like the following | |
will still work correctly after numpy is reloaded:: | |
def foo(arg=np._NoValue): | |
if arg is np._NoValue: | |
... | |
That was not the case when the singleton classes were defined in the numpy | |
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that | |
motivated this module. | |
""" | |
from __future__ import division, absolute_import, print_function | |
__ALL__ = [ | |
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue' | |
] | |
# Disallow reloading this module so as to preserve the identities of the | |
# classes defined here. | |
if '_is_loaded' in globals(): | |
raise RuntimeError('Reloading numpy._globals is not allowed') | |
_is_loaded = True | |
class ModuleDeprecationWarning(DeprecationWarning): | |
"""Module deprecation warning. | |
The nose tester turns ordinary Deprecation warnings into test failures. | |
That makes it hard to deprecate whole modules, because they get | |
imported by default. So this is a special Deprecation warning that the | |
nose tester will let pass without making tests fail. | |
""" | |
ModuleDeprecationWarning.__module__ = 'numpy' | |
class VisibleDeprecationWarning(UserWarning): | |
"""Visible deprecation warning. | |
By default, python will not show deprecation warnings, so this class | |
can be used when a very visible warning is helpful, for example because | |
the usage is most likely a user bug. | |
""" | |
VisibleDeprecationWarning.__module__ = 'numpy' | |
class _NoValueType(object): | |
"""Special keyword value. | |
The instance of this class may be used as the default value assigned to a | |
deprecated keyword in order to check if it has been given a user defined | |
value. | |
""" | |
__instance = None | |
def __new__(cls): | |
# ensure that only one instance exists | |
if not cls.__instance: | |
cls.__instance = super(_NoValueType, cls).__new__(cls) | |
return cls.__instance | |
# needed for python 2 to preserve identity through a pickle | |
def __reduce__(self): | |
return (self.__class__, ()) | |
def __repr__(self): | |
return "<no value>" | |
_NoValue = _NoValueType() |
""" | |
Pytest test running. | |
This module implements the ``test()`` function for NumPy modules. The usual | |
boiler plate for doing that is to put the following in the module | |
``__init__.py`` file:: | |
from numpy._pytesttester import PytestTester | |
test = PytestTester(__name__).test | |
del PytestTester | |
Warnings filtering and other runtime settings should be dealt with in the | |
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on | |
whether or not that file is found as follows: | |
* ``pytest.ini`` is present (develop mode) | |
All warnings except those explicily filtered out are raised as error. | |
* ``pytest.ini`` is absent (release mode) | |
DeprecationWarnings and PendingDeprecationWarnings are ignored, other | |
warnings are passed through. | |
In practice, tests run from the numpy repo are run in develop mode. That | |
includes the standard ``python runtests.py`` invocation. | |
This module is imported by every numpy subpackage, so lies at the top level to | |
simplify circular import issues. For the same reason, it contains no numpy | |
imports at module scope, instead importing numpy within function calls. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import sys | |
import os | |
__all__ = ['PytestTester'] | |
def _show_numpy_info(): | |
import numpy as np | |
print("NumPy version %s" % np.__version__) | |
relaxed_strides = np.ones((10, 1), order="C").flags.f_contiguous | |
print("NumPy relaxed strides checking option:", relaxed_strides) | |
class PytestTester(object): | |
""" | |
Pytest test runner. | |
This class is made available in ``numpy.testing``, and a test function | |
is typically added to a package's __init__.py like so:: | |
from numpy.testing import PytestTester | |
test = PytestTester(__name__).test | |
del PytestTester | |
Calling this test function finds and runs all tests associated with the | |
module and all its sub-modules. | |
Attributes | |
---------- | |
module_name : str | |
Full path to the package to test. | |
Parameters | |
---------- | |
module_name : module name | |
The name of the module to test. | |
""" | |
def __init__(self, module_name): | |
self.module_name = module_name | |
def __call__(self, label='fast', verbose=1, extra_argv=None, | |
doctests=False, coverage=False, durations=-1, tests=None): | |
""" | |
Run tests for module using pytest. | |
Parameters | |
---------- | |
label : {'fast', 'full'}, optional | |
Identifies the tests to run. When set to 'fast', tests decorated | |
with `pytest.mark.slow` are skipped, when 'full', the slow marker | |
is ignored. | |
verbose : int, optional | |
Verbosity value for test outputs, in the range 1-3. Default is 1. | |
extra_argv : list, optional | |
List with any extra arguments to pass to pytests. | |
doctests : bool, optional | |
.. note:: Not supported | |
coverage : bool, optional | |
If True, report coverage of NumPy code. Default is False. | |
Requires installation of (pip) pytest-cov. | |
durations : int, optional | |
If < 0, do nothing, If 0, report time of all tests, if > 0, | |
report the time of the slowest `timer` tests. Default is -1. | |
tests : test or list of tests | |
Tests to be executed with pytest '--pyargs' | |
Returns | |
------- | |
result : bool | |
Return True on success, false otherwise. | |
Notes | |
----- | |
Each NumPy module exposes `test` in its namespace to run all tests for | |
it. For example, to run all tests for numpy.lib: | |
>>> np.lib.test() #doctest: +SKIP | |
Examples | |
-------- | |
>>> result = np.lib.test() #doctest: +SKIP | |
... | |
1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds | |
>>> result | |
True | |
""" | |
import pytest | |
import warnings | |
#FIXME This is no longer needed? Assume it was for use in tests. | |
# cap verbosity at 3, which is equivalent to the pytest '-vv' option | |
#from . import utils | |
#verbose = min(int(verbose), 3) | |
#utils.verbose = verbose | |
# | |
module = sys.modules[self.module_name] | |
module_path = os.path.abspath(module.__path__[0]) | |
# setup the pytest arguments | |
pytest_args = ["-l"] | |
# offset verbosity. The "-q" cancels a "-v". | |
pytest_args += ["-q"] | |
# Filter out distutils cpu warnings (could be localized to | |
# distutils tests). ASV has problems with top level import, | |
# so fetch module for suppression here. | |
with warnings.catch_warnings(): | |
warnings.simplefilter("always") | |
from numpy.distutils import cpuinfo | |
# Filter out annoying import messages. Want these in both develop and | |
# release mode. | |
pytest_args += [ | |
"-W ignore:Not importing directory", | |
"-W ignore:numpy.dtype size changed", | |
"-W ignore:numpy.ufunc size changed", | |
"-W ignore::UserWarning:cpuinfo", | |
] | |
# When testing matrices, ignore their PendingDeprecationWarnings | |
pytest_args += [ | |
"-W ignore:the matrix subclass is not", | |
] | |
# Ignore python2.7 -3 warnings | |
pytest_args += [ | |
r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning", | |
r"-W ignore:in 3\.x, __setslice__:DeprecationWarning", | |
r"-W ignore:in 3\.x, __getslice__:DeprecationWarning", | |
r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning", | |
r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning", | |
r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning", | |
r"-W ignore:the commands module has been removed in Python 3\.0:DeprecationWarning", | |
r"-W ignore:The 'new' module has been removed in Python 3\.0:DeprecationWarning", | |
] | |
if doctests: | |
raise ValueError("Doctests not supported") | |
if extra_argv: | |
pytest_args += list(extra_argv) | |
if verbose > 1: | |
pytest_args += ["-" + "v"*(verbose - 1)] | |
if coverage: | |
pytest_args += ["--cov=" + module_path] | |
if label == "fast": | |
pytest_args += ["-m", "not slow"] | |
elif label != "full": | |
pytest_args += ["-m", label] | |
if durations >= 0: | |
pytest_args += ["--durations=%s" % durations] | |
if tests is None: | |
tests = [self.module_name] | |
pytest_args += ["--pyargs"] + list(tests) | |
# run tests. | |
_show_numpy_info() | |
try: | |
code = pytest.main(pytest_args) | |
except SystemExit as exc: | |
code = exc.code | |
return code == 0 |
""" | |
Compatibility module. | |
This module contains duplicated code from Python itself or 3rd party | |
extensions, which may be included for the following reasons: | |
* compatibility | |
* we may only need a small subset of the copied library/module | |
""" | |
from __future__ import division, absolute_import, print_function | |
from . import _inspect | |
from . import py3k | |
from ._inspect import getargspec, formatargspec | |
from .py3k import * | |
__all__ = [] | |
__all__.extend(_inspect.__all__) | |
__all__.extend(py3k.__all__) |
"""Subset of inspect module from upstream python | |
We use this instead of upstream because upstream inspect is slow to import, and | |
significantly contributes to numpy import times. Importing this copy has almost | |
no overhead. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import types | |
__all__ = ['getargspec', 'formatargspec'] | |
# ----------------------------------------------------------- type-checking | |
def ismethod(object): | |
"""Return true if the object is an instance method. | |
Instance method objects provide these attributes: | |
__doc__ documentation string | |
__name__ name with which this method was defined | |
im_class class object in which this method belongs | |
im_func function object containing implementation of method | |
im_self instance to which this method is bound, or None | |
""" | |
return isinstance(object, types.MethodType) | |
def isfunction(object): | |
"""Return true if the object is a user-defined function. | |
Function objects provide these attributes: | |
__doc__ documentation string | |
__name__ name with which this function was defined | |
func_code code object containing compiled function bytecode | |
func_defaults tuple of any default values for arguments | |
func_doc (same as __doc__) | |
func_globals global namespace in which this function was defined | |
func_name (same as __name__) | |
""" | |
return isinstance(object, types.FunctionType) | |
def iscode(object): | |
"""Return true if the object is a code object. | |
Code objects provide these attributes: | |
co_argcount number of arguments (not including * or ** args) | |
co_code string of raw compiled bytecode | |
co_consts tuple of constants used in the bytecode | |
co_filename name of file in which this code object was created | |
co_firstlineno number of first line in Python source code | |
co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg | |
co_lnotab encoded mapping of line numbers to bytecode indices | |
co_name name with which this code object was defined | |
co_names tuple of names of local variables | |
co_nlocals number of local variables | |
co_stacksize virtual machine stack space required | |
co_varnames tuple of names of arguments and local variables | |
""" | |
return isinstance(object, types.CodeType) | |
# ------------------------------------------------ argument list extraction | |
# These constants are from Python's compile.h. | |
CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 | |
def getargs(co): | |
"""Get information about the arguments accepted by a code object. | |
Three things are returned: (args, varargs, varkw), where 'args' is | |
a list of argument names (possibly containing nested lists), and | |
'varargs' and 'varkw' are the names of the * and ** arguments or None. | |
""" | |
if not iscode(co): | |
raise TypeError('arg is not a code object') | |
nargs = co.co_argcount | |
names = co.co_varnames | |
args = list(names[:nargs]) | |
# The following acrobatics are for anonymous (tuple) arguments. | |
# Which we do not need to support, so remove to avoid importing | |
# the dis module. | |
for i in range(nargs): | |
if args[i][:1] in ['', '.']: | |
raise TypeError("tuple function arguments are not supported") | |
varargs = None | |
if co.co_flags & CO_VARARGS: | |
varargs = co.co_varnames[nargs] | |
nargs = nargs + 1 | |
varkw = None | |
if co.co_flags & CO_VARKEYWORDS: | |
varkw = co.co_varnames[nargs] | |
return args, varargs, varkw | |
def getargspec(func): | |
"""Get the names and default values of a function's arguments. | |
A tuple of four things is returned: (args, varargs, varkw, defaults). | |
'args' is a list of the argument names (it may contain nested lists). | |
'varargs' and 'varkw' are the names of the * and ** arguments or None. | |
'defaults' is an n-tuple of the default values of the last n arguments. | |
""" | |
if ismethod(func): | |
func = func.__func__ | |
if not isfunction(func): | |
raise TypeError('arg is not a Python function') | |
args, varargs, varkw = getargs(func.__code__) | |
return args, varargs, varkw, func.__defaults__ | |
def getargvalues(frame): | |
"""Get information about arguments passed into a particular frame. | |
A tuple of four things is returned: (args, varargs, varkw, locals). | |
'args' is a list of the argument names (it may contain nested lists). | |
'varargs' and 'varkw' are the names of the * and ** arguments or None. | |
'locals' is the locals dictionary of the given frame. | |
""" | |
args, varargs, varkw = getargs(frame.f_code) | |
return args, varargs, varkw, frame.f_locals | |
def joinseq(seq): | |
if len(seq) == 1: | |
return '(' + seq[0] + ',)' | |
else: | |
return '(' + ', '.join(seq) + ')' | |
def strseq(object, convert, join=joinseq): | |
"""Recursively walk a sequence, stringifying each element. | |
""" | |
if type(object) in [list, tuple]: | |
return join([strseq(_o, convert, join) for _o in object]) | |
else: | |
return convert(object) | |
def formatargspec(args, varargs=None, varkw=None, defaults=None, | |
formatarg=str, | |
formatvarargs=lambda name: '*' + name, | |
formatvarkw=lambda name: '**' + name, | |
formatvalue=lambda value: '=' + repr(value), | |
join=joinseq): | |
"""Format an argument spec from the 4 values returned by getargspec. | |
The first four arguments are (args, varargs, varkw, defaults). The | |
other four arguments are the corresponding optional formatting functions | |
that are called to turn names and values into strings. The ninth | |
argument is an optional function to format the sequence of arguments. | |
""" | |
specs = [] | |
if defaults: | |
firstdefault = len(args) - len(defaults) | |
for i in range(len(args)): | |
spec = strseq(args[i], formatarg, join) | |
if defaults and i >= firstdefault: | |
spec = spec + formatvalue(defaults[i - firstdefault]) | |
specs.append(spec) | |
if varargs is not None: | |
specs.append(formatvarargs(varargs)) | |
if varkw is not None: | |
specs.append(formatvarkw(varkw)) | |
return '(' + ', '.join(specs) + ')' | |
def formatargvalues(args, varargs, varkw, locals, | |
formatarg=str, | |
formatvarargs=lambda name: '*' + name, | |
formatvarkw=lambda name: '**' + name, | |
formatvalue=lambda value: '=' + repr(value), | |
join=joinseq): | |
"""Format an argument spec from the 4 values returned by getargvalues. | |
The first four arguments are (args, varargs, varkw, locals). The | |
next four arguments are the corresponding optional formatting functions | |
that are called to turn names and values into strings. The ninth | |
argument is an optional function to format the sequence of arguments. | |
""" | |
def convert(name, locals=locals, | |
formatarg=formatarg, formatvalue=formatvalue): | |
return formatarg(name) + formatvalue(locals[name]) | |
specs = [strseq(arg, convert, join) for arg in args] | |
if varargs: | |
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) | |
if varkw: | |
specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) | |
return '(' + ', '.join(specs) + ')' |
""" | |
Python 3 compatibility tools. | |
""" | |
from __future__ import division, absolute_import, print_function | |
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', | |
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested', | |
'asstr', 'open_latin1', 'long', 'basestring', 'sixu', | |
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path', | |
'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] | |
import sys | |
try: | |
from pathlib import Path, PurePath | |
except ImportError: | |
Path = PurePath = None | |
if sys.version_info[0] >= 3: | |
import io | |
long = int | |
integer_types = (int,) | |
basestring = str | |
unicode = str | |
bytes = bytes | |
def asunicode(s): | |
if isinstance(s, bytes): | |
return s.decode('latin1') | |
return str(s) | |
def asbytes(s): | |
if isinstance(s, bytes): | |
return s | |
return str(s).encode('latin1') | |
def asstr(s): | |
if isinstance(s, bytes): | |
return s.decode('latin1') | |
return str(s) | |
def isfileobj(f): | |
return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)) | |
def open_latin1(filename, mode='r'): | |
return open(filename, mode=mode, encoding='iso-8859-1') | |
def sixu(s): | |
return s | |
strchar = 'U' | |
else: | |
bytes = str | |
long = long | |
basestring = basestring | |
unicode = unicode | |
integer_types = (int, long) | |
asbytes = str | |
asstr = str | |
strchar = 'S' | |
def isfileobj(f): | |
return isinstance(f, file) | |
def asunicode(s): | |
if isinstance(s, unicode): | |
return s | |
return str(s).decode('ascii') | |
def open_latin1(filename, mode='r'): | |
return open(filename, mode=mode) | |
def sixu(s): | |
return unicode(s, 'unicode_escape') | |
def getexception(): | |
return sys.exc_info()[1] | |
def asbytes_nested(x): | |
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): | |
return [asbytes_nested(y) for y in x] | |
else: | |
return asbytes(x) | |
def asunicode_nested(x): | |
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)): | |
return [asunicode_nested(y) for y in x] | |
else: | |
return asunicode(x) | |
def is_pathlib_path(obj): | |
""" | |
Check whether obj is a pathlib.Path object. | |
Prefer using `isinstance(obj, os_PathLike)` instead of this function. | |
""" | |
return Path is not None and isinstance(obj, Path) | |
# from Python 3.7 | |
class contextlib_nullcontext(object): | |
"""Context manager that does no additional processing. | |
Used as a stand-in for a normal context manager, when a particular | |
block of code is only sometimes used with a normal context manager: | |
cm = optional_cm if condition else nullcontext() | |
with cm: | |
# Perform operation, using optional_cm if condition is True | |
""" | |
def __init__(self, enter_result=None): | |
self.enter_result = enter_result | |
def __enter__(self): | |
return self.enter_result | |
def __exit__(self, *excinfo): | |
pass | |
if sys.version_info[0] >= 3 and sys.version_info[1] >= 4: | |
def npy_load_module(name, fn, info=None): | |
""" | |
Load a module. | |
.. versionadded:: 1.11.2 | |
Parameters | |
---------- | |
name : str | |
Full module name. | |
fn : str | |
Path to module file. | |
info : tuple, optional | |
Only here for backward compatibility with Python 2.*. | |
Returns | |
------- | |
mod : module | |
""" | |
import importlib.machinery | |
return importlib.machinery.SourceFileLoader(name, fn).load_module() | |
else: | |
def npy_load_module(name, fn, info=None): | |
""" | |
Load a module. | |
.. versionadded:: 1.11.2 | |
Parameters | |
---------- | |
name : str | |
Full module name. | |
fn : str | |
Path to module file. | |
info : tuple, optional | |
Information as returned by `imp.find_module` | |
(suffix, mode, type). | |
Returns | |
------- | |
mod : module | |
""" | |
import imp | |
import os | |
if info is None: | |
path = os.path.dirname(fn) | |
fo, fn, info = imp.find_module(name, [path]) | |
else: | |
fo = open(fn, info[1]) | |
try: | |
mod = imp.load_module(name, fo, fn, info) | |
finally: | |
fo.close() | |
return mod | |
# backport abc.ABC | |
import abc | |
if sys.version_info[:2] >= (3, 4): | |
abc_ABC = abc.ABC | |
else: | |
abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) | |
# Backport os.fs_path, os.PathLike, and PurePath.__fspath__ | |
if sys.version_info[:2] >= (3, 6): | |
import os | |
os_fspath = os.fspath | |
os_PathLike = os.PathLike | |
else: | |
def _PurePath__fspath__(self): | |
return str(self) | |
class os_PathLike(abc_ABC): | |
"""Abstract base class for implementing the file system path protocol.""" | |
@abc.abstractmethod | |
def __fspath__(self): | |
"""Return the file system path representation of the object.""" | |
raise NotImplementedError | |
@classmethod | |
def __subclasshook__(cls, subclass): | |
if PurePath is not None and issubclass(subclass, PurePath): | |
return True | |
return hasattr(subclass, '__fspath__') | |
def os_fspath(path): | |
"""Return the path representation of a path-like object. | |
If str or bytes is passed in, it is returned unchanged. Otherwise the | |
os.PathLike interface is used to get the path representation. If the | |
path representation is not str or bytes, TypeError is raised. If the | |
provided path is not str, bytes, or os.PathLike, TypeError is raised. | |
""" | |
if isinstance(path, (unicode, bytes)): | |
return path | |
# Work from the object's type to match method resolution of other magic | |
# methods. | |
path_type = type(path) | |
try: | |
path_repr = path_type.__fspath__(path) | |
except AttributeError: | |
if hasattr(path_type, '__fspath__'): | |
raise | |
elif PurePath is not None and issubclass(path_type, PurePath): | |
return _PurePath__fspath__(path) | |
else: | |
raise TypeError("expected str, bytes or os.PathLike object, " | |
"not " + path_type.__name__) | |
if isinstance(path_repr, (unicode, bytes)): | |
return path_repr | |
else: | |
raise TypeError("expected {}.__fspath__() to return str or bytes, " | |
"not {}".format(path_type.__name__, | |
type(path_repr).__name__)) |
from __future__ import division, print_function | |
def configuration(parent_package='',top_path=None): | |
from numpy.distutils.misc_util import Configuration | |
config = Configuration('compat', parent_package, top_path) | |
config.add_data_dir('tests') | |
return config | |
if __name__ == '__main__': | |
from numpy.distutils.core import setup | |
setup(configuration=configuration) |
from __future__ import division, absolute_import, print_function | |
from os.path import join | |
from numpy.compat import isfileobj, os_fspath | |
from numpy.testing import assert_ | |
from numpy.testing import tempdir | |
def test_isfileobj(): | |
with tempdir(prefix="numpy_test_compat_") as folder: | |
filename = join(folder, 'a.bin') | |
with open(filename, 'wb') as f: | |
assert_(isfileobj(f)) | |
with open(filename, 'ab') as f: | |
assert_(isfileobj(f)) | |
with open(filename, 'rb') as f: | |
assert_(isfileobj(f)) | |
def test_os_fspath_strings(): | |
for string_path in (b'/a/b/c.d', u'/a/b/c.d'): | |
assert_(os_fspath(string_path) == string_path) |
""" | |
Pytest configuration and fixtures for the Numpy test suite. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import pytest | |
import numpy | |
from numpy.core._multiarray_tests import get_fpu_mode | |
_old_fpu_mode = None | |
_collect_results = {} | |
def pytest_configure(config): | |
config.addinivalue_line("markers", | |
"valgrind_error: Tests that are known to error under valgrind.") | |
config.addinivalue_line("markers", | |
"slow: Tests that are very slow.") | |
#FIXME when yield tests are gone. | |
@pytest.hookimpl() | |
def pytest_itemcollected(item): | |
""" | |
Check FPU precision mode was not changed during test collection. | |
The clumsy way we do it here is mainly necessary because numpy | |
still uses yield tests, which can execute code at test collection | |
time. | |
""" | |
global _old_fpu_mode | |
mode = get_fpu_mode() | |
if _old_fpu_mode is None: | |
_old_fpu_mode = mode | |
elif mode != _old_fpu_mode: | |
_collect_results[item] = (_old_fpu_mode, mode) | |
_old_fpu_mode = mode | |
@pytest.fixture(scope="function", autouse=True) | |
def check_fpu_mode(request): | |
""" | |
Check FPU precision mode was not changed during the test. | |
""" | |
old_mode = get_fpu_mode() | |
yield | |
new_mode = get_fpu_mode() | |
if old_mode != new_mode: | |
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" | |
" during the test".format(old_mode, new_mode)) | |
collect_result = _collect_results.get(request.node) | |
if collect_result is not None: | |
old_mode, new_mode = collect_result | |
raise AssertionError("FPU precision mode changed from {0:#x} to {1:#x}" | |
" when collecting the test".format(old_mode, | |
new_mode)) | |
@pytest.fixture(autouse=True) | |
def add_np(doctest_namespace): | |
doctest_namespace['np'] = numpy |
from __future__ import division, absolute_import, print_function | |
from .info import __doc__ | |
from numpy.version import version as __version__ | |
import os | |
# on Windows NumPy loads an important OpenBLAS-related DLL | |
# and the code below aims to alleviate issues with DLL | |
# path resolution portability with an absolute path DLL load | |
if os.name == 'nt': | |
from ctypes import WinDLL | |
import glob | |
# convention for storing / loading the DLL from | |
# numpy/.libs/, if present | |
libs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), | |
'..', '.libs')) | |
DLL_filenames = [] | |
if os.path.isdir(libs_path): | |
for filename in glob.glob(os.path.join(libs_path, '*openblas*dll')): | |
# NOTE: would it change behavior to load ALL | |
# DLLs at this path vs. the name restriction? | |
WinDLL(os.path.abspath(filename)) | |
DLL_filenames.append(filename) | |
if len(DLL_filenames) > 1: | |
import warnings | |
warnings.warn("loaded more than 1 DLL from .libs:\n%s" % | |
"\n".join(DLL_filenames), | |
stacklevel=1) | |
# disables OpenBLAS affinity setting of the main thread that limits | |
# python threads or processes to one core | |
env_added = [] | |
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']: | |
if envkey not in os.environ: | |
os.environ[envkey] = '1' | |
env_added.append(envkey) | |
try: | |
from . import multiarray | |
except ImportError as exc: | |
import sys | |
msg = """ | |
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE! | |
Importing the multiarray numpy extension module failed. Most | |
likely you are trying to import a failed build of numpy. | |
Here is how to proceed: | |
- If you're working with a numpy git repository, try `git clean -xdf` | |
(removes all files not under version control) and rebuild numpy. | |
- If you are simply trying to use the numpy version that you have installed: | |
your installation is broken - please reinstall numpy. | |
- If you have already reinstalled and that did not fix the problem, then: | |
1. Check that you are using the Python you expect (you're using %s), | |
and that you have no directories in your PATH or PYTHONPATH that can | |
interfere with the Python and numpy versions you're trying to use. | |
2. If (1) looks fine, you can open a new issue at | |
https://github.com/numpy/numpy/issues. Please include details on: | |
- how you installed Python | |
- how you installed numpy | |
- your operating system | |
- whether or not you have multiple versions of Python installed | |
- if you built from source, your compiler versions and ideally a build log | |
Note: this error has many possible causes, so please don't comment on | |
an existing issue about this - open a new one instead. | |
Original error was: %s | |
""" % (sys.executable, exc) | |
raise ImportError(msg) | |
finally: | |
for envkey in env_added: | |
del os.environ[envkey] | |
del envkey | |
del env_added | |
del os | |
from . import umath | |
# Check that multiarray,umath are pure python modules wrapping | |
# _multiarray_umath and not either of the old c-extension modules | |
if not (hasattr(multiarray, '_multiarray_umath') and | |
hasattr(umath, '_multiarray_umath')): | |
import sys | |
path = sys.modules['numpy'].__path__ | |
msg = ("Something is wrong with the numpy installation. " | |
"While importing we detected an older version of " | |
"numpy in {}. One method of fixing this is to repeatedly uninstall " | |
"numpy until none is found, then reinstall this version.") | |
raise ImportError(msg.format(path)) | |
from . import numerictypes as nt | |
multiarray.set_typeDict(nt.sctypeDict) | |
from . import numeric | |
from .numeric import * | |
from . import fromnumeric | |
from .fromnumeric import * | |
from . import defchararray as char | |
from . import records as rec | |
from .records import * | |
from .memmap import * | |
from .defchararray import chararray | |
from . import function_base | |
from .function_base import * | |
from . import machar | |
from .machar import * | |
from . import getlimits | |
from .getlimits import * | |
from . import shape_base | |
from .shape_base import * | |
from . import einsumfunc | |
from .einsumfunc import * | |
del nt | |
from .fromnumeric import amax as max, amin as min, round_ as round | |
from .numeric import absolute as abs | |
# do this after everything else, to minimize the chance of this misleadingly | |
# appearing in an import-time traceback | |
from . import _add_newdocs | |
# add these for module-freeze analysis (like PyInstaller) | |
from . import _dtype_ctypes | |
from . import _internal | |
from . import _dtype | |
from . import _methods | |
__all__ = ['char', 'rec', 'memmap'] | |
__all__ += numeric.__all__ | |
__all__ += fromnumeric.__all__ | |
__all__ += rec.__all__ | |
__all__ += ['chararray'] | |
__all__ += function_base.__all__ | |
__all__ += machar.__all__ | |
__all__ += getlimits.__all__ | |
__all__ += shape_base.__all__ | |
__all__ += einsumfunc.__all__ | |
# Make it possible so that ufuncs can be pickled | |
# Here are the loading and unloading functions | |
# The name numpy.core._ufunc_reconstruct must be | |
# available for unpickling to work. | |
def _ufunc_reconstruct(module, name): | |
# The `fromlist` kwarg is required to ensure that `mod` points to the | |
# inner-most module rather than the parent package when module name is | |
# nested. This makes it possible to pickle non-toplevel ufuncs such as | |
# scipy.special.expit for instance. | |
mod = __import__(module, fromlist=[name]) | |
return getattr(mod, name) | |
def _ufunc_reduce(func): | |
from pickle import whichmodule | |
name = func.__name__ | |
return _ufunc_reconstruct, (whichmodule(func, name), name) | |
import sys | |
if sys.version_info[0] >= 3: | |
import copyreg | |
else: | |
import copy_reg as copyreg | |
copyreg.pickle(ufunc, _ufunc_reduce, _ufunc_reconstruct) | |
# Unclutter namespace (must keep _ufunc_reconstruct for unpickling) | |
del copyreg | |
del sys | |
del _ufunc_reduce | |
from numpy._pytesttester import PytestTester | |
test = PytestTester(__name__) | |
del PytestTester |
""" | |
This is only meant to add docs to objects defined in C-extension modules. | |
The purpose is to allow easier editing of the docstrings without | |
requiring a re-compile. | |
NOTE: Many of the methods of ndarray have corresponding functions. | |
If you update these docstrings, please keep also the ones in | |
core/fromnumeric.py, core/defmatrix.py up-to-date. | |
""" | |
from __future__ import division, absolute_import, print_function | |
from numpy.core import numerictypes as _numerictypes | |
from numpy.core import dtype | |
from numpy.core.function_base import add_newdoc | |
############################################################################### | |
# | |
# flatiter | |
# | |
# flatiter needs a toplevel description | |
# | |
############################################################################### | |
add_newdoc('numpy.core', 'flatiter', | |
""" | |
Flat iterator object to iterate over arrays. | |
A `flatiter` iterator is returned by ``x.flat`` for any array `x`. | |
It allows iterating over the array as if it were a 1-D array, | |
either in a for-loop or by calling its `next` method. | |
Iteration is done in row-major, C-style order (the last | |
index varying the fastest). The iterator can also be indexed using | |
basic slicing or advanced indexing. | |
See Also | |
-------- | |
ndarray.flat : Return a flat iterator over an array. | |
ndarray.flatten : Returns a flattened copy of an array. | |
Notes | |
----- | |
A `flatiter` iterator can not be constructed directly from Python code | |
by calling the `flatiter` constructor. | |
Examples | |
-------- | |
>>> x = np.arange(6).reshape(2, 3) | |
>>> fl = x.flat | |
>>> type(fl) | |
<type 'numpy.flatiter'> | |
>>> for item in fl: | |
... print(item) | |
... | |
0 | |
1 | |
2 | |
3 | |
4 | |
5 | |
>>> fl[2:4] | |
array([2, 3]) | |
""") | |
# flatiter attributes | |
add_newdoc('numpy.core', 'flatiter', ('base', | |
""" | |
A reference to the array that is iterated over. | |
Examples | |
-------- | |
>>> x = np.arange(5) | |
>>> fl = x.flat | |
>>> fl.base is x | |
True | |
""")) | |
add_newdoc('numpy.core', 'flatiter', ('coords', | |
""" | |
An N-dimensional tuple of current coordinates. | |
Examples | |
-------- | |
>>> x = np.arange(6).reshape(2, 3) | |
>>> fl = x.flat | |
>>> fl.coords | |
(0, 0) | |
>>> fl.next() | |
0 | |
>>> fl.coords | |
(0, 1) | |
""")) | |
add_newdoc('numpy.core', 'flatiter', ('index', | |
""" | |
Current flat index into the array. | |
Examples | |
-------- | |
>>> x = np.arange(6).reshape(2, 3) | |
>>> fl = x.flat | |
>>> fl.index | |
0 | |
>>> fl.next() | |
0 | |
>>> fl.index | |
1 | |
""")) | |
# flatiter functions | |
add_newdoc('numpy.core', 'flatiter', ('__array__', | |
"""__array__(type=None) Get array from iterator | |
""")) | |
add_newdoc('numpy.core', 'flatiter', ('copy', | |
""" | |
copy() | |
Get a copy of the iterator as a 1-D array. | |
Examples | |
-------- | |
>>> x = np.arange(6).reshape(2, 3) | |
>>> x | |
array([[0, 1, 2], | |
[3, 4, 5]]) | |
>>> fl = x.flat | |
>>> fl.copy() | |
array([0, 1, 2, 3, 4, 5]) | |
""")) | |
############################################################################### | |
# | |
# nditer | |
# | |
############################################################################### | |
add_newdoc('numpy.core', 'nditer', | |
""" | |
Efficient multi-dimensional iterator object to iterate over arrays. | |
To get started using this object, see the | |
:ref:`introductory guide to array iteration <arrays.nditer>`. | |
Parameters | |
---------- | |
op : ndarray or sequence of array_like | |
The array(s) to iterate over. | |
flags : sequence of str, optional | |
Flags to control the behavior of the iterator. | |
* "buffered" enables buffering when required. | |
* "c_index" causes a C-order index to be tracked. | |
* "f_index" causes a Fortran-order index to be tracked. | |
* "multi_index" causes a multi-index, or a tuple of indices | |
with one per iteration dimension, to be tracked. | |
* "common_dtype" causes all the operands to be converted to | |
a common data type, with copying or buffering as necessary. | |
* "copy_if_overlap" causes the iterator to determine if read | |
operands have overlap with write operands, and make temporary | |
copies as necessary to avoid overlap. False positives (needless | |
copying) are possible in some cases. | |
* "delay_bufalloc" delays allocation of the buffers until | |
a reset() call is made. Allows "allocate" operands to | |
be initialized before their values are copied into the buffers. | |
* "external_loop" causes the `values` given to be | |
one-dimensional arrays with multiple values instead of | |
zero-dimensional arrays. | |
* "grow_inner" allows the `value` array sizes to be made | |
larger than the buffer size when both "buffered" and | |
"external_loop" is used. | |
* "ranged" allows the iterator to be restricted to a sub-range | |
of the iterindex values. | |
* "refs_ok" enables iteration of reference types, such as | |
object arrays. | |
* "reduce_ok" enables iteration of "readwrite" operands | |
which are broadcasted, also known as reduction operands. | |
* "zerosize_ok" allows `itersize` to be zero. | |
op_flags : list of list of str, optional | |
This is a list of flags for each operand. At minimum, one of | |
"readonly", "readwrite", or "writeonly" must be specified. | |
* "readonly" indicates the operand will only be read from. | |
* "readwrite" indicates the operand will be read from and written to. | |
* "writeonly" indicates the operand will only be written to. | |
* "no_broadcast" prevents the operand from being broadcasted. | |
* "contig" forces the operand data to be contiguous. | |
* "aligned" forces the operand data to be aligned. | |
* "nbo" forces the operand data to be in native byte order. | |
* "copy" allows a temporary read-only copy if required. | |
* "updateifcopy" allows a temporary read-write copy if required. | |
* "allocate" causes the array to be allocated if it is None | |
in the `op` parameter. | |
* "no_subtype" prevents an "allocate" operand from using a subtype. | |
* "arraymask" indicates that this operand is the mask to use | |
for selecting elements when writing to operands with the | |
'writemasked' flag set. The iterator does not enforce this, | |
but when writing from a buffer back to the array, it only | |
copies those elements indicated by this mask. | |
* 'writemasked' indicates that only elements where the chosen | |
'arraymask' operand is True will be written to. | |
* "overlap_assume_elementwise" can be used to mark operands that are | |
accessed only in the iterator order, to allow less conservative | |
copying when "copy_if_overlap" is present. | |
op_dtypes : dtype or tuple of dtype(s), optional | |
The required data type(s) of the operands. If copying or buffering | |
is enabled, the data will be converted to/from their original types. | |
order : {'C', 'F', 'A', 'K'}, optional | |
Controls the iteration order. 'C' means C order, 'F' means | |
Fortran order, 'A' means 'F' order if all the arrays are Fortran | |
contiguous, 'C' order otherwise, and 'K' means as close to the | |
order the array elements appear in memory as possible. This also | |
affects the element memory order of "allocate" operands, as they | |
are allocated to be compatible with iteration order. | |
Default is 'K'. | |
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional | |
Controls what kind of data casting may occur when making a copy | |
or buffering. Setting this to 'unsafe' is not recommended, | |
as it can adversely affect accumulations. | |
* 'no' means the data types should not be cast at all. | |
* 'equiv' means only byte-order changes are allowed. | |
* 'safe' means only casts which can preserve values are allowed. | |
* 'same_kind' means only safe casts or casts within a kind, | |
like float64 to float32, are allowed. | |
* 'unsafe' means any data conversions may be done. | |
op_axes : list of list of ints, optional | |
If provided, is a list of ints or None for each operands. | |
The list of axes for an operand is a mapping from the dimensions | |
of the iterator to the dimensions of the operand. A value of | |
-1 can be placed for entries, causing that dimension to be | |
treated as "newaxis". | |
itershape : tuple of ints, optional | |
The desired shape of the iterator. This allows "allocate" operands | |
with a dimension mapped by op_axes not corresponding to a dimension | |
of a different operand to get a value not equal to 1 for that | |
dimension. | |
buffersize : int, optional | |
When buffering is enabled, controls the size of the temporary | |
buffers. Set to 0 for the default value. | |
Attributes | |
---------- | |
dtypes : tuple of dtype(s) | |
The data types of the values provided in `value`. This may be | |
different from the operand data types if buffering is enabled. | |
Valid only before the iterator is closed. | |
finished : bool | |
Whether the iteration over the operands is finished or not. | |
has_delayed_bufalloc : bool | |
If True, the iterator was created with the "delay_bufalloc" flag, | |
and no reset() function was called on it yet. | |
has_index : bool | |
If True, the iterator was created with either the "c_index" or | |
the "f_index" flag, and the property `index` can be used to | |
retrieve it. | |
has_multi_index : bool | |
If True, the iterator was created with the "multi_index" flag, | |
and the property `multi_index` can be used to retrieve it. | |
index | |
When the "c_index" or "f_index" flag was used, this property | |
provides access to the index. Raises a ValueError if accessed | |
and `has_index` is False. | |
iterationneedsapi : bool | |
Whether iteration requires access to the Python API, for example | |
if one of the operands is an object array. | |
iterindex : int | |
An index which matches the order of iteration. | |
itersize : int | |
Size of the iterator. | |
itviews | |
Structured view(s) of `operands` in memory, matching the reordered | |
and optimized iterator access pattern. Valid only before the iterator | |
is closed. | |
multi_index | |
When the "multi_index" flag was used, this property | |
provides access to the index. Raises a ValueError if accessed | |
accessed and `has_multi_index` is False. | |
ndim : int | |
The iterator's dimension. | |
nop : int | |
The number of iterator operands. | |
operands : tuple of operand(s) | |
The array(s) to be iterated over. Valid only before the iterator is | |
closed. | |
shape : tuple of ints | |
Shape tuple, the shape of the iterator. | |
value | |
Value of `operands` at current iteration. Normally, this is a | |
tuple of array scalars, but if the flag "external_loop" is used, | |
it is a tuple of one dimensional arrays. | |
Notes | |
----- | |
`nditer` supersedes `flatiter`. The iterator implementation behind | |
`nditer` is also exposed by the NumPy C API. | |
The Python exposure supplies two iteration interfaces, one which follows | |
the Python iterator protocol, and another which mirrors the C-style | |
do-while pattern. The native Python approach is better in most cases, but | |
if you need the iterator's coordinates or index, use the C-style pattern. | |
Examples | |
-------- | |
Here is how we might write an ``iter_add`` function, using the | |
Python iterator protocol:: | |
def iter_add_py(x, y, out=None): | |
addop = np.add | |
it = np.nditer([x, y, out], [], | |
[['readonly'], ['readonly'], ['writeonly','allocate']]) | |
with it: | |
for (a, b, c) in it: | |
addop(a, b, out=c) | |
return it.operands[2] | |
Here is the same function, but following the C-style pattern:: | |
def iter_add(x, y, out=None): | |
addop = np.add | |
it = np.nditer([x, y, out], [], | |
[['readonly'], ['readonly'], ['writeonly','allocate']]) | |
with it: | |
while not it.finished: | |
addop(it[0], it[1], out=it[2]) | |
it.iternext() | |
return it.operands[2] | |
Here is an example outer product function:: | |
def outer_it(x, y, out=None): | |
mulop = np.multiply | |
it = np.nditer([x, y, out], ['external_loop'], | |
[['readonly'], ['readonly'], ['writeonly', 'allocate']], | |
op_axes=[list(range(x.ndim)) + [-1] * y.ndim, | |
[-1] * x.ndim + list(range(y.ndim)), | |
None]) | |
with it: | |
for (a, b, c) in it: | |
mulop(a, b, out=c) | |
return it.operands[2] | |
>>> a = np.arange(2)+1 | |
>>> b = np.arange(3)+1 | |
>>> outer_it(a,b) | |
array([[1, 2, 3], | |
[2, 4, 6]]) | |
Here is an example function which operates like a "lambda" ufunc:: | |
def luf(lamdaexpr, *args, **kwargs): | |
"luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" | |
nargs = len(args) | |
op = (kwargs.get('out',None),) + args | |
it = np.nditer(op, ['buffered','external_loop'], | |
[['writeonly','allocate','no_broadcast']] + | |
[['readonly','nbo','aligned']]*nargs, | |
order=kwargs.get('order','K'), | |
casting=kwargs.get('casting','safe'), | |
buffersize=kwargs.get('buffersize',0)) | |
while not it.finished: | |
it[0] = lamdaexpr(*it[1:]) | |
it.iternext() | |
return it.operands[0] | |
>>> a = np.arange(5) | |
>>> b = np.ones(5) | |
>>> luf(lambda i,j:i*i + j/2, a, b) | |
array([ 0.5, 1.5, 4.5, 9.5, 16.5]) | |
If operand flags `"writeonly"` or `"readwrite"` are used the operands may | |
be views into the original data with the `WRITEBACKIFCOPY` flag. In this case | |
nditer must be used as a context manager or the nditer.close | |
method must be called before using the result. The temporary | |
data will be written back to the original data when the `__exit__` | |
function is called but not before: | |
>>> a = np.arange(6, dtype='i4')[::-2] | |
>>> with nditer(a, [], | |
... [['writeonly', 'updateifcopy']], | |
... casting='unsafe', | |
... op_dtypes=[np.dtype('f4')]) as i: | |
... x = i.operands[0] | |
... x[:] = [-1, -2, -3] | |
... # a still unchanged here | |
>>> a, x | |
array([-1, -2, -3]), array([-1, -2, -3]) | |
It is important to note that once the iterator is exited, dangling | |
references (like `x` in the example) may or may not share data with | |
the original data `a`. If writeback semantics were active, i.e. if | |
`x.base.flags.writebackifcopy` is `True`, then exiting the iterator | |
will sever the connection between `x` and `a`, writing to `x` will | |
no longer write to `a`. If writeback semantics are not active, then | |
`x.data` will still point at some part of `a.data`, and writing to | |
one will affect the other. | |
""") | |
# nditer methods | |
add_newdoc('numpy.core', 'nditer', ('copy', | |
""" | |
copy() | |
Get a copy of the iterator in its current state. | |
Examples | |
-------- | |
>>> x = np.arange(10) | |
>>> y = x + 1 | |
>>> it = np.nditer([x, y]) | |
>>> it.next() | |
(array(0), array(1)) | |
>>> it2 = it.copy() | |
>>> it2.next() | |
(array(1), array(2)) | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('operands', | |
""" | |
operands[`Slice`] | |
The array(s) to be iterated over. Valid only before the iterator is closed. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('debug_print', | |
""" | |
debug_print() | |
Print the current state of the `nditer` instance and debug info to stdout. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('enable_external_loop', | |
""" | |
enable_external_loop() | |
When the "external_loop" was not used during construction, but | |
is desired, this modifies the iterator to behave as if the flag | |
was specified. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('iternext', | |
""" | |
iternext() | |
Check whether iterations are left, and perform a single internal iteration | |
without returning the result. Used in the C-style pattern do-while | |
pattern. For an example, see `nditer`. | |
Returns | |
------- | |
iternext : bool | |
Whether or not there are iterations left. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('remove_axis', | |
""" | |
remove_axis(i) | |
Removes axis `i` from the iterator. Requires that the flag "multi_index" | |
be enabled. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('remove_multi_index', | |
""" | |
remove_multi_index() | |
When the "multi_index" flag was specified, this removes it, allowing | |
the internal iteration structure to be optimized further. | |
""")) | |
add_newdoc('numpy.core', 'nditer', ('reset', | |
""" | |
reset() | |
Reset the iterator to its initial state. | |
""")) | |
add_newdoc('numpy.core', 'nested_iters', | |
""" | |
Create nditers for use in nested loops | |
Create a tuple of `nditer` objects which iterate in nested loops over | |
different axes of the op argument. The first iterator is used in the | |
outermost loop, the last in the innermost loop. Advancing one will change | |
the subsequent iterators to point at its new element. | |
Parameters | |
---------- | |
op : ndarray or sequence of array_like | |
The array(s) to iterate over. | |
axes : list of list of int | |
Each item is used as an "op_axes" argument to an nditer | |
flags, op_flags, op_dtypes, order, casting, buffersize (optional) | |
See `nditer` parameters of the same name | |
Returns | |
------- | |
iters : tuple of nditer | |
An nditer for each item in `axes`, outermost first | |
See Also | |
-------- | |
nditer | |
Examples | |
-------- | |
Basic usage. Note how y is the "flattened" version of | |
[a[:, 0, :], a[:, 1, 0], a[:, 2, :]] since we specified | |
the first iter's axes as [1] | |
>>> a = np.arange(12).reshape(2, 3, 2) | |
>>> i, j = np.nested_iters(a, [[1], [0, 2]], flags=["multi_index"]) | |
>>> for x in i: | |
... print(i.multi_index) | |
... for y in j: | |
... print('', j.multi_index, y) | |
(0,) | |
(0, 0) 0 | |
(0, 1) 1 | |
(1, 0) 6 | |
(1, 1) 7 | |
(1,) | |
(0, 0) 2 | |
(0, 1) 3 | |
(1, 0) 8 | |
(1, 1) 9 | |
(2,) | |
(0, 0) 4 | |
(0, 1) 5 | |
(1, 0) 10 | |
(1, 1) 11 | |
""") | |
add_newdoc('numpy.core', 'nditer', ('close', | |
""" | |
close() | |
Resolve all writeback semantics in writeable operands. | |
See Also | |
-------- | |
:ref:`nditer-context-manager` | |
""")) | |
############################################################################### | |
# | |
# broadcast | |
# | |
############################################################################### | |
add_newdoc('numpy.core', 'broadcast', | |
""" | |
Produce an object that mimics broadcasting. | |
Parameters | |
---------- | |
in1, in2, ... : array_like | |
Input parameters. | |
Returns | |
------- | |
b : broadcast object | |
Broadcast the input parameters against one another, and | |
return an object that encapsulates the result. | |
Amongst others, it has ``shape`` and ``nd`` properties, and | |
may be used as an iterator. | |
See Also | |
-------- | |
broadcast_arrays | |
broadcast_to | |
Examples | |
-------- | |
Manually adding two vectors, using broadcasting: | |
>>> x = np.array([[1], [2], [3]]) | |
>>> y = np.array([4, 5, 6]) | |
>>> b = np.broadcast(x, y) | |
>>> out = np.empty(b.shape) | |
>>> out.flat = [u+v for (u,v) in b] | |
>>> out | |
array([[ 5., 6., 7.], | |
[ 6., 7., 8.], | |
[ 7., 8., 9.]]) | |
Compare against built-in broadcasting: | |
>>> x + y | |
array([[5, 6, 7], | |
[6, 7, 8], | |
[7, 8, 9]]) | |
""") | |
# attributes | |
add_newdoc('numpy.core', 'broadcast', ('index', | |
""" | |
current index in broadcasted result | |
Examples | |
-------- | |
>>> x = np.array([[1], [2], [3]]) | |
>>> y = np.array([4, 5, 6]) | |
>>> b = np.broadcast(x, y) | |
>>> b.index | |
0 | |
>>> b.next(), b.next(), b.next() | |
((1, 4), (1, 5), (1, 6)) | |
>>> b.index | |
3 | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('iters', | |
""" | |
tuple of iterators along ``self``'s "components." | |
Returns a tuple of `numpy.flatiter` objects, one for each "component" | |
of ``self``. | |
See Also | |
-------- | |
numpy.flatiter | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> row, col = b.iters | |
>>> row.next(), col.next() | |
(1, 4) | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('ndim', | |
""" | |
Number of dimensions of broadcasted result. Alias for `nd`. | |
.. versionadded:: 1.12.0 | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> b.ndim | |
2 | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('nd', | |
""" | |
Number of dimensions of broadcasted result. For code intended for NumPy | |
1.12.0 and later the more consistent `ndim` is preferred. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> b.nd | |
2 | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('numiter', | |
""" | |
Number of iterators possessed by the broadcasted result. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> b.numiter | |
2 | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('shape', | |
""" | |
Shape of broadcasted result. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> b.shape | |
(3, 3) | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('size', | |
""" | |
Total size of broadcasted result. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]]) | |
>>> b = np.broadcast(x, y) | |
>>> b.size | |
9 | |
""")) | |
add_newdoc('numpy.core', 'broadcast', ('reset', | |
""" | |
reset() | |
Reset the broadcasted result's iterator(s). | |
Parameters | |
---------- | |
None | |
Returns | |
------- | |
None | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> y = np.array([[4], [5], [6]] | |
>>> b = np.broadcast(x, y) | |
>>> b.index | |
0 | |
>>> b.next(), b.next(), b.next() | |
((1, 4), (2, 4), (3, 4)) | |
>>> b.index | |
3 | |
>>> b.reset() | |
>>> b.index | |
0 | |
""")) | |
############################################################################### | |
# | |
# numpy functions | |
# | |
############################################################################### | |
add_newdoc('numpy.core.multiarray', 'array', | |
""" | |
array(object, dtype=None, copy=True, order='K', subok=False, ndmin=0) | |
Create an array. | |
Parameters | |
---------- | |
object : array_like | |
An array, any object exposing the array interface, an object whose | |
__array__ method returns an array, or any (nested) sequence. | |
dtype : data-type, optional | |
The desired data-type for the array. If not given, then the type will | |
be determined as the minimum type required to hold the objects in the | |
sequence. This argument can only be used to 'upcast' the array. For | |
downcasting, use the .astype(t) method. | |
copy : bool, optional | |
If true (default), then the object is copied. Otherwise, a copy will | |
only be made if __array__ returns a copy, if obj is a nested sequence, | |
or if a copy is needed to satisfy any of the other requirements | |
(`dtype`, `order`, etc.). | |
order : {'K', 'A', 'C', 'F'}, optional | |
Specify the memory layout of the array. If object is not an array, the | |
newly created array will be in C order (row major) unless 'F' is | |
specified, in which case it will be in Fortran order (column major). | |
If object is an array the following holds. | |
===== ========= =================================================== | |
order no copy copy=True | |
===== ========= =================================================== | |
'K' unchanged F & C order preserved, otherwise most similar order | |
'A' unchanged F order if input is F and not C, otherwise C order | |
'C' C order C order | |
'F' F order F order | |
===== ========= =================================================== | |
When ``copy=False`` and a copy is made for other reasons, the result is | |
the same as if ``copy=True``, with some exceptions for `A`, see the | |
Notes section. The default order is 'K'. | |
subok : bool, optional | |
If True, then sub-classes will be passed-through, otherwise | |
the returned array will be forced to be a base-class array (default). | |
ndmin : int, optional | |
Specifies the minimum number of dimensions that the resulting | |
array should have. Ones will be pre-pended to the shape as | |
needed to meet this requirement. | |
Returns | |
------- | |
out : ndarray | |
An array object satisfying the specified requirements. | |
See Also | |
-------- | |
empty_like : Return an empty array with shape and type of input. | |
ones_like : Return an array of ones with shape and type of input. | |
zeros_like : Return an array of zeros with shape and type of input. | |
full_like : Return a new array with shape of input filled with value. | |
empty : Return a new uninitialized array. | |
ones : Return a new array setting values to one. | |
zeros : Return a new array setting values to zero. | |
full : Return a new array of given shape filled with value. | |
Notes | |
----- | |
When order is 'A' and `object` is an array in neither 'C' nor 'F' order, | |
and a copy is forced by a change in dtype, then the order of the result is | |
not necessarily 'C' as expected. This is likely a bug. | |
Examples | |
-------- | |
>>> np.array([1, 2, 3]) | |
array([1, 2, 3]) | |
Upcasting: | |
>>> np.array([1, 2, 3.0]) | |
array([ 1., 2., 3.]) | |
More than one dimension: | |
>>> np.array([[1, 2], [3, 4]]) | |
array([[1, 2], | |
[3, 4]]) | |
Minimum dimensions 2: | |
>>> np.array([1, 2, 3], ndmin=2) | |
array([[1, 2, 3]]) | |
Type provided: | |
>>> np.array([1, 2, 3], dtype=complex) | |
array([ 1.+0.j, 2.+0.j, 3.+0.j]) | |
Data-type consisting of more than one element: | |
>>> x = np.array([(1,2),(3,4)],dtype=[('a','<i4'),('b','<i4')]) | |
>>> x['a'] | |
array([1, 3]) | |
Creating an array from sub-classes: | |
>>> np.array(np.mat('1 2; 3 4')) | |
array([[1, 2], | |
[3, 4]]) | |
>>> np.array(np.mat('1 2; 3 4'), subok=True) | |
matrix([[1, 2], | |
[3, 4]]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'empty', | |
""" | |
empty(shape, dtype=float, order='C') | |
Return a new array of given shape and type, without initializing entries. | |
Parameters | |
---------- | |
shape : int or tuple of int | |
Shape of the empty array, e.g., ``(2, 3)`` or ``2``. | |
dtype : data-type, optional | |
Desired output data-type for the array, e.g, `numpy.int8`. Default is | |
`numpy.float64`. | |
order : {'C', 'F'}, optional, default: 'C' | |
Whether to store multi-dimensional data in row-major | |
(C-style) or column-major (Fortran-style) order in | |
memory. | |
Returns | |
------- | |
out : ndarray | |
Array of uninitialized (arbitrary) data of the given shape, dtype, and | |
order. Object arrays will be initialized to None. | |
See Also | |
-------- | |
empty_like : Return an empty array with shape and type of input. | |
ones : Return a new array setting values to one. | |
zeros : Return a new array setting values to zero. | |
full : Return a new array of given shape filled with value. | |
Notes | |
----- | |
`empty`, unlike `zeros`, does not set the array values to zero, | |
and may therefore be marginally faster. On the other hand, it requires | |
the user to manually set all the values in the array, and should be | |
used with caution. | |
Examples | |
-------- | |
>>> np.empty([2, 2]) | |
array([[ -9.74499359e+001, 6.69583040e-309], | |
[ 2.13182611e-314, 3.06959433e-309]]) #random | |
>>> np.empty([2, 2], dtype=int) | |
array([[-1073741821, -1067949133], | |
[ 496041986, 19249760]]) #random | |
""") | |
add_newdoc('numpy.core.multiarray', 'scalar', | |
""" | |
scalar(dtype, obj) | |
Return a new scalar array of the given type initialized with obj. | |
This function is meant mainly for pickle support. `dtype` must be a | |
valid data-type descriptor. If `dtype` corresponds to an object | |
descriptor, then `obj` can be any object, otherwise `obj` must be a | |
string. If `obj` is not given, it will be interpreted as None for object | |
type and as zeros for all other types. | |
""") | |
add_newdoc('numpy.core.multiarray', 'zeros', | |
""" | |
zeros(shape, dtype=float, order='C') | |
Return a new array of given shape and type, filled with zeros. | |
Parameters | |
---------- | |
shape : int or tuple of ints | |
Shape of the new array, e.g., ``(2, 3)`` or ``2``. | |
dtype : data-type, optional | |
The desired data-type for the array, e.g., `numpy.int8`. Default is | |
`numpy.float64`. | |
order : {'C', 'F'}, optional, default: 'C' | |
Whether to store multi-dimensional data in row-major | |
(C-style) or column-major (Fortran-style) order in | |
memory. | |
Returns | |
------- | |
out : ndarray | |
Array of zeros with the given shape, dtype, and order. | |
See Also | |
-------- | |
zeros_like : Return an array of zeros with shape and type of input. | |
empty : Return a new uninitialized array. | |
ones : Return a new array setting values to one. | |
full : Return a new array of given shape filled with value. | |
Examples | |
-------- | |
>>> np.zeros(5) | |
array([ 0., 0., 0., 0., 0.]) | |
>>> np.zeros((5,), dtype=int) | |
array([0, 0, 0, 0, 0]) | |
>>> np.zeros((2, 1)) | |
array([[ 0.], | |
[ 0.]]) | |
>>> s = (2,2) | |
>>> np.zeros(s) | |
array([[ 0., 0.], | |
[ 0., 0.]]) | |
>>> np.zeros((2,), dtype=[('x', 'i4'), ('y', 'i4')]) # custom dtype | |
array([(0, 0), (0, 0)], | |
dtype=[('x', '<i4'), ('y', '<i4')]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'set_typeDict', | |
"""set_typeDict(dict) | |
Set the internal dictionary that can look up an array type using a | |
registered code. | |
""") | |
add_newdoc('numpy.core.multiarray', 'fromstring', | |
""" | |
fromstring(string, dtype=float, count=-1, sep='') | |
A new 1-D array initialized from text data in a string. | |
Parameters | |
---------- | |
string : str | |
A string containing the data. | |
dtype : data-type, optional | |
The data type of the array; default: float. For binary input data, | |
the data must be in exactly this format. | |
count : int, optional | |
Read this number of `dtype` elements from the data. If this is | |
negative (the default), the count will be determined from the | |
length of the data. | |
sep : str, optional | |
The string separating numbers in the data; extra whitespace between | |
elements is also ignored. | |
.. deprecated:: 1.14 | |
If this argument is not provided, `fromstring` falls back on the | |
behaviour of `frombuffer` after encoding unicode string inputs as | |
either utf-8 (python 3), or the default encoding (python 2). | |
Returns | |
------- | |
arr : ndarray | |
The constructed array. | |
Raises | |
------ | |
ValueError | |
If the string is not the correct size to satisfy the requested | |
`dtype` and `count`. | |
See Also | |
-------- | |
frombuffer, fromfile, fromiter | |
Examples | |
-------- | |
>>> np.fromstring('1 2', dtype=int, sep=' ') | |
array([1, 2]) | |
>>> np.fromstring('1, 2', dtype=int, sep=',') | |
array([1, 2]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'compare_chararrays', | |
""" | |
compare_chararrays(a, b, cmp_op, rstrip) | |
Performs element-wise comparison of two string arrays using the | |
comparison operator specified by `cmp_op`. | |
Parameters | |
---------- | |
a, b : array_like | |
Arrays to be compared. | |
cmp_op : {"<", "<=", "==", ">=", ">", "!="} | |
Type of comparison. | |
rstrip : Boolean | |
If True, the spaces at the end of Strings are removed before the comparison. | |
Returns | |
------- | |
out : ndarray | |
The output array of type Boolean with the same shape as a and b. | |
Raises | |
------ | |
ValueError | |
If `cmp_op` is not valid. | |
TypeError | |
If at least one of `a` or `b` is a non-string array | |
Examples | |
-------- | |
>>> a = np.array(["a", "b", "cde"]) | |
>>> b = np.array(["a", "a", "dec"]) | |
>>> np.compare_chararrays(a, b, ">", True) | |
array([False, True, False]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'fromiter', | |
""" | |
fromiter(iterable, dtype, count=-1) | |
Create a new 1-dimensional array from an iterable object. | |
Parameters | |
---------- | |
iterable : iterable object | |
An iterable object providing data for the array. | |
dtype : data-type | |
The data-type of the returned array. | |
count : int, optional | |
The number of items to read from *iterable*. The default is -1, | |
which means all data is read. | |
Returns | |
------- | |
out : ndarray | |
The output array. | |
Notes | |
----- | |
Specify `count` to improve performance. It allows ``fromiter`` to | |
pre-allocate the output array, instead of resizing it on demand. | |
Examples | |
-------- | |
>>> iterable = (x*x for x in range(5)) | |
>>> np.fromiter(iterable, float) | |
array([ 0., 1., 4., 9., 16.]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'fromfile', | |
""" | |
fromfile(file, dtype=float, count=-1, sep='') | |
Construct an array from data in a text or binary file. | |
A highly efficient way of reading binary data with a known data-type, | |
as well as parsing simply formatted text files. Data written using the | |
`tofile` method can be read using this function. | |
Parameters | |
---------- | |
file : file or str | |
Open file object or filename. | |
dtype : data-type | |
Data type of the returned array. | |
For binary files, it is used to determine the size and byte-order | |
of the items in the file. | |
count : int | |
Number of items to read. ``-1`` means all items (i.e., the complete | |
file). | |
sep : str | |
Separator between items if file is a text file. | |
Empty ("") separator means the file should be treated as binary. | |
Spaces (" ") in the separator match zero or more whitespace characters. | |
A separator consisting only of spaces must match at least one | |
whitespace. | |
See also | |
-------- | |
load, save | |
ndarray.tofile | |
loadtxt : More flexible way of loading data from a text file. | |
Notes | |
----- | |
Do not rely on the combination of `tofile` and `fromfile` for | |
data storage, as the binary files generated are are not platform | |
independent. In particular, no byte-order or data-type information is | |
saved. Data can be stored in the platform independent ``.npy`` format | |
using `save` and `load` instead. | |
Examples | |
-------- | |
Construct an ndarray: | |
>>> dt = np.dtype([('time', [('min', int), ('sec', int)]), | |
... ('temp', float)]) | |
>>> x = np.zeros((1,), dtype=dt) | |
>>> x['time']['min'] = 10; x['temp'] = 98.25 | |
>>> x | |
array([((10, 0), 98.25)], | |
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) | |
Save the raw data to disk: | |
>>> import os | |
>>> fname = os.tmpnam() | |
>>> x.tofile(fname) | |
Read the raw data from disk: | |
>>> np.fromfile(fname, dtype=dt) | |
array([((10, 0), 98.25)], | |
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) | |
The recommended way to store and load data: | |
>>> np.save(fname, x) | |
>>> np.load(fname + '.npy') | |
array([((10, 0), 98.25)], | |
dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'frombuffer', | |
""" | |
frombuffer(buffer, dtype=float, count=-1, offset=0) | |
Interpret a buffer as a 1-dimensional array. | |
Parameters | |
---------- | |
buffer : buffer_like | |
An object that exposes the buffer interface. | |
dtype : data-type, optional | |
Data-type of the returned array; default: float. | |
count : int, optional | |
Number of items to read. ``-1`` means all data in the buffer. | |
offset : int, optional | |
Start reading the buffer from this offset (in bytes); default: 0. | |
Notes | |
----- | |
If the buffer has data that is not in machine byte-order, this should | |
be specified as part of the data-type, e.g.:: | |
>>> dt = np.dtype(int) | |
>>> dt = dt.newbyteorder('>') | |
>>> np.frombuffer(buf, dtype=dt) | |
The data of the resulting array will not be byteswapped, but will be | |
interpreted correctly. | |
Examples | |
-------- | |
>>> s = 'hello world' | |
>>> np.frombuffer(s, dtype='S1', count=5, offset=6) | |
array(['w', 'o', 'r', 'l', 'd'], | |
dtype='|S1') | |
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) | |
array([1, 2], dtype=uint8) | |
>>> np.frombuffer(b'\\x01\\x02\\x03\\x04\\x05', dtype=np.uint8, count=3) | |
array([1, 2, 3], dtype=uint8) | |
""") | |
add_newdoc('numpy.core', 'fastCopyAndTranspose', | |
"""_fastCopyAndTranspose(a)""") | |
add_newdoc('numpy.core.multiarray', 'correlate', | |
"""cross_correlate(a,v, mode=0)""") | |
add_newdoc('numpy.core.multiarray', 'arange', | |
""" | |
arange([start,] stop[, step,], dtype=None) | |
Return evenly spaced values within a given interval. | |
Values are generated within the half-open interval ``[start, stop)`` | |
(in other words, the interval including `start` but excluding `stop`). | |
For integer arguments the function is equivalent to the Python built-in | |
`range` function, but returns an ndarray rather than a list. | |
When using a non-integer step, such as 0.1, the results will often not | |
be consistent. It is better to use `numpy.linspace` for these cases. | |
Parameters | |
---------- | |
start : number, optional | |
Start of interval. The interval includes this value. The default | |
start value is 0. | |
stop : number | |
End of interval. The interval does not include this value, except | |
in some cases where `step` is not an integer and floating point | |
round-off affects the length of `out`. | |
step : number, optional | |
Spacing between values. For any output `out`, this is the distance | |
between two adjacent values, ``out[i+1] - out[i]``. The default | |
step size is 1. If `step` is specified as a position argument, | |
`start` must also be given. | |
dtype : dtype | |
The type of the output array. If `dtype` is not given, infer the data | |
type from the other input arguments. | |
Returns | |
------- | |
arange : ndarray | |
Array of evenly spaced values. | |
For floating point arguments, the length of the result is | |
``ceil((stop - start)/step)``. Because of floating point overflow, | |
this rule may result in the last element of `out` being greater | |
than `stop`. | |
See Also | |
-------- | |
linspace : Evenly spaced numbers with careful handling of endpoints. | |
ogrid: Arrays of evenly spaced numbers in N-dimensions. | |
mgrid: Grid-shaped arrays of evenly spaced numbers in N-dimensions. | |
Examples | |
-------- | |
>>> np.arange(3) | |
array([0, 1, 2]) | |
>>> np.arange(3.0) | |
array([ 0., 1., 2.]) | |
>>> np.arange(3,7) | |
array([3, 4, 5, 6]) | |
>>> np.arange(3,7,2) | |
array([3, 5]) | |
""") | |
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version', | |
"""_get_ndarray_c_version() | |
Return the compile time NDARRAY_VERSION number. | |
""") | |
add_newdoc('numpy.core.multiarray', '_reconstruct', | |
"""_reconstruct(subtype, shape, dtype) | |
Construct an empty array. Used by Pickles. | |
""") | |
add_newdoc('numpy.core.multiarray', 'set_string_function', | |
""" | |
set_string_function(f, repr=1) | |
Internal method to set a function to be used when pretty printing arrays. | |
""") | |
add_newdoc('numpy.core.multiarray', 'set_numeric_ops', | |
""" | |
set_numeric_ops(op1=func1, op2=func2, ...) | |
Set numerical operators for array objects. | |
.. deprecated:: 1.16 | |
For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`. | |
For ndarray subclasses, define the ``__array_ufunc__`` method and | |
override the relevant ufunc. | |
Parameters | |
---------- | |
op1, op2, ... : callable | |
Each ``op = func`` pair describes an operator to be replaced. | |
For example, ``add = lambda x, y: np.add(x, y) % 5`` would replace | |
addition by modulus 5 addition. | |
Returns | |
------- | |
saved_ops : list of callables | |
A list of all operators, stored before making replacements. | |
Notes | |
----- | |
.. WARNING:: | |
Use with care! Incorrect usage may lead to memory errors. | |
A function replacing an operator cannot make use of that operator. | |
For example, when replacing add, you may not use ``+``. Instead, | |
directly call ufuncs. | |
Examples | |
-------- | |
>>> def add_mod5(x, y): | |
... return np.add(x, y) % 5 | |
... | |
>>> old_funcs = np.set_numeric_ops(add=add_mod5) | |
>>> x = np.arange(12).reshape((3, 4)) | |
>>> x + x | |
array([[0, 2, 4, 1], | |
[3, 0, 2, 4], | |
[1, 3, 0, 2]]) | |
>>> ignore = np.set_numeric_ops(**old_funcs) # restore operators | |
""") | |
add_newdoc('numpy.core.multiarray', 'promote_types', | |
""" | |
promote_types(type1, type2) | |
Returns the data type with the smallest size and smallest scalar | |
kind to which both ``type1`` and ``type2`` may be safely cast. | |
The returned data type is always in native byte order. | |
This function is symmetric, but rarely associative. | |
Parameters | |
---------- | |
type1 : dtype or dtype specifier | |
First data type. | |
type2 : dtype or dtype specifier | |
Second data type. | |
Returns | |
------- | |
out : dtype | |
The promoted data type. | |
Notes | |
----- | |
.. versionadded:: 1.6.0 | |
Starting in NumPy 1.9, promote_types function now returns a valid string | |
length when given an integer or float dtype as one argument and a string | |
dtype as another argument. Previously it always returned the input string | |
dtype, even if it wasn't long enough to store the max integer/float value | |
converted to a string. | |
See Also | |
-------- | |
result_type, dtype, can_cast | |
Examples | |
-------- | |
>>> np.promote_types('f4', 'f8') | |
dtype('float64') | |
>>> np.promote_types('i8', 'f4') | |
dtype('float64') | |
>>> np.promote_types('>i8', '<c8') | |
dtype('complex128') | |
>>> np.promote_types('i4', 'S8') | |
dtype('S11') | |
An example of a non-associative case: | |
>>> p = np.promote_types | |
>>> p('S', p('i1', 'u1')) | |
dtype('S6') | |
>>> p(p('S', 'i1'), 'u1') | |
dtype('S4') | |
""") | |
add_newdoc('numpy.core.multiarray', 'newbuffer', | |
""" | |
newbuffer(size) | |
Return a new uninitialized buffer object. | |
Parameters | |
---------- | |
size : int | |
Size in bytes of returned buffer object. | |
Returns | |
------- | |
newbuffer : buffer object | |
Returned, uninitialized buffer object of `size` bytes. | |
""") | |
add_newdoc('numpy.core.multiarray', 'getbuffer', | |
""" | |
getbuffer(obj [,offset[, size]]) | |
Create a buffer object from the given object referencing a slice of | |
length size starting at offset. | |
Default is the entire buffer. A read-write buffer is attempted followed | |
by a read-only buffer. | |
Parameters | |
---------- | |
obj : object | |
offset : int, optional | |
size : int, optional | |
Returns | |
------- | |
buffer_obj : buffer | |
Examples | |
-------- | |
>>> buf = np.getbuffer(np.ones(5), 1, 3) | |
>>> len(buf) | |
3 | |
>>> buf[0] | |
'\\x00' | |
>>> buf | |
<read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0> | |
""") | |
add_newdoc('numpy.core.multiarray', 'c_einsum', | |
""" | |
c_einsum(subscripts, *operands, out=None, dtype=None, order='K', | |
casting='safe') | |
*This documentation shadows that of the native python implementation of the `einsum` function, | |
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.* | |
Evaluates the Einstein summation convention on the operands. | |
Using the Einstein summation convention, many common multi-dimensional, | |
linear algebraic array operations can be represented in a simple fashion. | |
In *implicit* mode `einsum` computes these values. | |
In *explicit* mode, `einsum` provides further flexibility to compute | |
other array operations that might not be considered classical Einstein | |
summation operations, by disabling, or forcing summation over specified | |
subscript labels. | |
See the notes and examples for clarification. | |
Parameters | |
---------- | |
subscripts : str | |
Specifies the subscripts for summation as comma separated list of | |
subscript labels. An implicit (classical Einstein summation) | |
calculation is performed unless the explicit indicator '->' is | |
included as well as subscript labels of the precise output form. | |
operands : list of array_like | |
These are the arrays for the operation. | |
out : ndarray, optional | |
If provided, the calculation is done into this array. | |
dtype : {data-type, None}, optional | |
If provided, forces the calculation to use the data type specified. | |
Note that you may have to also give a more liberal `casting` | |
parameter to allow the conversions. Default is None. | |
order : {'C', 'F', 'A', 'K'}, optional | |
Controls the memory layout of the output. 'C' means it should | |
be C contiguous. 'F' means it should be Fortran contiguous, | |
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. | |
'K' means it should be as close to the layout as the inputs as | |
is possible, including arbitrarily permuted axes. | |
Default is 'K'. | |
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional | |
Controls what kind of data casting may occur. Setting this to | |
'unsafe' is not recommended, as it can adversely affect accumulations. | |
* 'no' means the data types should not be cast at all. | |
* 'equiv' means only byte-order changes are allowed. | |
* 'safe' means only casts which can preserve values are allowed. | |
* 'same_kind' means only safe casts or casts within a kind, | |
like float64 to float32, are allowed. | |
* 'unsafe' means any data conversions may be done. | |
Default is 'safe'. | |
optimize : {False, True, 'greedy', 'optimal'}, optional | |
Controls if intermediate optimization should occur. No optimization | |
will occur if False and True will default to the 'greedy' algorithm. | |
Also accepts an explicit contraction list from the ``np.einsum_path`` | |
function. See ``np.einsum_path`` for more details. Defaults to False. | |
Returns | |
------- | |
output : ndarray | |
The calculation based on the Einstein summation convention. | |
See Also | |
-------- | |
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot | |
Notes | |
----- | |
.. versionadded:: 1.6.0 | |
The Einstein summation convention can be used to compute | |
many multi-dimensional, linear algebraic array operations. `einsum` | |
provides a succinct way of representing these. | |
A non-exhaustive list of these operations, | |
which can be computed by `einsum`, is shown below along with examples: | |
* Trace of an array, :py:func:`numpy.trace`. | |
* Return a diagonal, :py:func:`numpy.diag`. | |
* Array axis summations, :py:func:`numpy.sum`. | |
* Transpositions and permutations, :py:func:`numpy.transpose`. | |
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. | |
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. | |
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. | |
* Tensor contractions, :py:func:`numpy.tensordot`. | |
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. | |
The subscripts string is a comma-separated list of subscript labels, | |
where each label refers to a dimension of the corresponding operand. | |
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` | |
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label | |
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a | |
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` | |
describes traditional matrix multiplication and is equivalent to | |
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one | |
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent | |
to :py:func:`np.trace(a) <numpy.trace>`. | |
In *implicit mode*, the chosen subscripts are important | |
since the axes of the output are reordered alphabetically. This | |
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while | |
``np.einsum('ji', a)`` takes its transpose. Additionally, | |
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, | |
``np.einsum('ij,jh', a, b)`` returns the transpose of the | |
multiplication since subscript 'h' precedes subscript 'i'. | |
In *explicit mode* the output can be directly controlled by | |
specifying output subscript labels. This requires the | |
identifier '->' as well as the list of output subscript labels. | |
This feature increases the flexibility of the function since | |
summing can be disabled or forced when required. The call | |
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, | |
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. | |
The difference is that `einsum` does not allow broadcasting by default. | |
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the | |
order of the output subscript labels and therefore returns matrix | |
multiplication, unlike the example above in implicit mode. | |
To enable and control broadcasting, use an ellipsis. Default | |
NumPy-style broadcasting is done by adding an ellipsis | |
to the left of each term, like ``np.einsum('...ii->...i', a)``. | |
To take the trace along the first and last axes, | |
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix | |
product with the left-most indices instead of rightmost, one can do | |
``np.einsum('ij...,jk...->ik...', a, b)``. | |
When there is only one operand, no axes are summed, and no output | |
parameter is provided, a view into the operand is returned instead | |
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` | |
produces a view (changed in version 1.10.0). | |
`einsum` also provides an alternative way to provide the subscripts | |
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. | |
If the output shape is not provided in this format `einsum` will be | |
calculated in implicit mode, otherwise it will be performed explicitly. | |
The examples below have corresponding `einsum` calls with the two | |
parameter methods. | |
.. versionadded:: 1.10.0 | |
Views returned from einsum are now writeable whenever the input array | |
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now | |
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` | |
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal | |
of a 2D array. | |
Examples | |
-------- | |
>>> a = np.arange(25).reshape(5,5) | |
>>> b = np.arange(5) | |
>>> c = np.arange(6).reshape(2,3) | |
Trace of a matrix: | |
>>> np.einsum('ii', a) | |
60 | |
>>> np.einsum(a, [0,0]) | |
60 | |
>>> np.trace(a) | |
60 | |
Extract the diagonal (requires explicit form): | |
>>> np.einsum('ii->i', a) | |
array([ 0, 6, 12, 18, 24]) | |
>>> np.einsum(a, [0,0], [0]) | |
array([ 0, 6, 12, 18, 24]) | |
>>> np.diag(a) | |
array([ 0, 6, 12, 18, 24]) | |
Sum over an axis (requires explicit form): | |
>>> np.einsum('ij->i', a) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.einsum(a, [0,1], [0]) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.sum(a, axis=1) | |
array([ 10, 35, 60, 85, 110]) | |
For higher dimensional arrays summing a single axis can be done with ellipsis: | |
>>> np.einsum('...j->...', a) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.einsum(a, [Ellipsis,1], [Ellipsis]) | |
array([ 10, 35, 60, 85, 110]) | |
Compute a matrix transpose, or reorder any number of axes: | |
>>> np.einsum('ji', c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.einsum('ij->ji', c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.einsum(c, [1,0]) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.transpose(c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
Vector inner products: | |
>>> np.einsum('i,i', b, b) | |
30 | |
>>> np.einsum(b, [0], b, [0]) | |
30 | |
>>> np.inner(b,b) | |
30 | |
Matrix vector multiplication: | |
>>> np.einsum('ij,j', a, b) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.einsum(a, [0,1], b, [1]) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.dot(a, b) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.einsum('...j,j', a, b) | |
array([ 30, 80, 130, 180, 230]) | |
Broadcasting and scalar multiplication: | |
>>> np.einsum('..., ...', 3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.einsum(',ij', 3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.einsum(3, [Ellipsis], c, [Ellipsis]) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.multiply(3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
Vector outer product: | |
>>> np.einsum('i,j', np.arange(2)+1, b) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
>>> np.einsum(np.arange(2)+1, [0], b, [1]) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
>>> np.outer(np.arange(2)+1, b) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
Tensor contraction: | |
>>> a = np.arange(60.).reshape(3,4,5) | |
>>> b = np.arange(24.).reshape(4,3,2) | |
>>> np.einsum('ijk,jil->kl', a, b) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
>>> np.tensordot(a,b, axes=([1,0],[0,1])) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
Writeable returned arrays (since version 1.10.0): | |
>>> a = np.zeros((3, 3)) | |
>>> np.einsum('ii->i', a)[:] = 1 | |
>>> a | |
array([[ 1., 0., 0.], | |
[ 0., 1., 0.], | |
[ 0., 0., 1.]]) | |
Example of ellipsis use: | |
>>> a = np.arange(6).reshape((3,2)) | |
>>> b = np.arange(12).reshape((4,3)) | |
>>> np.einsum('ki,jk->ij', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
>>> np.einsum('ki,...k->i...', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
>>> np.einsum('k...,jk', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
""") | |
############################################################################## | |
# | |
# Documentation for ndarray attributes and methods | |
# | |
############################################################################## | |
############################################################################## | |
# | |
# ndarray object | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'ndarray', | |
""" | |
ndarray(shape, dtype=float, buffer=None, offset=0, | |
strides=None, order=None) | |
An array object represents a multidimensional, homogeneous array | |
of fixed-size items. An associated data-type object describes the | |
format of each element in the array (its byte-order, how many bytes it | |
occupies in memory, whether it is an integer, a floating point number, | |
or something else, etc.) | |
Arrays should be constructed using `array`, `zeros` or `empty` (refer | |
to the See Also section below). The parameters given here refer to | |
a low-level method (`ndarray(...)`) for instantiating an array. | |
For more information, refer to the `numpy` module and examine the | |
methods and attributes of an array. | |
Parameters | |
---------- | |
(for the __new__ method; see Notes below) | |
shape : tuple of ints | |
Shape of created array. | |
dtype : data-type, optional | |
Any object that can be interpreted as a numpy data type. | |
buffer : object exposing buffer interface, optional | |
Used to fill the array with data. | |
offset : int, optional | |
Offset of array data in buffer. | |
strides : tuple of ints, optional | |
Strides of data in memory. | |
order : {'C', 'F'}, optional | |
Row-major (C-style) or column-major (Fortran-style) order. | |
Attributes | |
---------- | |
T : ndarray | |
Transpose of the array. | |
data : buffer | |
The array's elements, in memory. | |
dtype : dtype object | |
Describes the format of the elements in the array. | |
flags : dict | |
Dictionary containing information related to memory use, e.g., | |
'C_CONTIGUOUS', 'OWNDATA', 'WRITEABLE', etc. | |
flat : numpy.flatiter object | |
Flattened version of the array as an iterator. The iterator | |
allows assignments, e.g., ``x.flat = 3`` (See `ndarray.flat` for | |
assignment examples; TODO). | |
imag : ndarray | |
Imaginary part of the array. | |
real : ndarray | |
Real part of the array. | |
size : int | |
Number of elements in the array. | |
itemsize : int | |
The memory use of each array element in bytes. | |
nbytes : int | |
The total number of bytes required to store the array data, | |
i.e., ``itemsize * size``. | |
ndim : int | |
The array's number of dimensions. | |
shape : tuple of ints | |
Shape of the array. | |
strides : tuple of ints | |
The step-size required to move from one element to the next in | |
memory. For example, a contiguous ``(3, 4)`` array of type | |
``int16`` in C-order has strides ``(8, 2)``. This implies that | |
to move from element to element in memory requires jumps of 2 bytes. | |
To move from row-to-row, one needs to jump 8 bytes at a time | |
(``2 * 4``). | |
ctypes : ctypes object | |
Class containing properties of the array needed for interaction | |
with ctypes. | |
base : ndarray | |
If the array is a view into another array, that array is its `base` | |
(unless that array is also a view). The `base` array is where the | |
array data is actually stored. | |
See Also | |
-------- | |
array : Construct an array. | |
zeros : Create an array, each element of which is zero. | |
empty : Create an array, but leave its allocated memory unchanged (i.e., | |
it contains "garbage"). | |
dtype : Create a data-type. | |
Notes | |
----- | |
There are two modes of creating an array using ``__new__``: | |
1. If `buffer` is None, then only `shape`, `dtype`, and `order` | |
are used. | |
2. If `buffer` is an object exposing the buffer interface, then | |
all keywords are interpreted. | |
No ``__init__`` method is needed because the array is fully initialized | |
after the ``__new__`` method. | |
Examples | |
-------- | |
These examples illustrate the low-level `ndarray` constructor. Refer | |
to the `See Also` section above for easier ways of constructing an | |
ndarray. | |
First mode, `buffer` is None: | |
>>> np.ndarray(shape=(2,2), dtype=float, order='F') | |
array([[ -1.13698227e+002, 4.25087011e-303], | |
[ 2.88528414e-306, 3.27025015e-309]]) #random | |
Second mode: | |
>>> np.ndarray((2,), buffer=np.array([1,2,3]), | |
... offset=np.int_().itemsize, | |
... dtype=int) # offset = 1*itemsize, i.e. skip first element | |
array([2, 3]) | |
""") | |
############################################################################## | |
# | |
# ndarray attributes | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_interface__', | |
"""Array protocol: Python side.""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_finalize__', | |
"""None.""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_priority__', | |
"""Array priority.""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__', | |
"""Array protocol: C-struct side.""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_', | |
"""Allow the array to be interpreted as a ctypes object by returning the | |
data-memory location as an integer | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('base', | |
""" | |
Base object if memory is from some other object. | |
Examples | |
-------- | |
The base of an array that owns its memory is None: | |
>>> x = np.array([1,2,3,4]) | |
>>> x.base is None | |
True | |
Slicing creates a view, whose memory is shared with x: | |
>>> y = x[2:] | |
>>> y.base is x | |
True | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', | |
""" | |
An object to simplify the interaction of the array with the ctypes | |
module. | |
This attribute creates an object that makes it easier to use arrays | |
when calling shared libraries with the ctypes module. The returned | |
object has, among others, data, shape, and strides attributes (see | |
Notes below) which themselves return ctypes objects that can be used | |
as arguments to a shared library. | |
Parameters | |
---------- | |
None | |
Returns | |
------- | |
c : Python object | |
Possessing attributes data, shape, strides, etc. | |
See Also | |
-------- | |
numpy.ctypeslib | |
Notes | |
----- | |
Below are the public attributes of this object which were documented | |
in "Guide to NumPy" (we have omitted undocumented public attributes, | |
as well as documented private attributes): | |
.. autoattribute:: numpy.core._internal._ctypes.data | |
.. autoattribute:: numpy.core._internal._ctypes.shape | |
.. autoattribute:: numpy.core._internal._ctypes.strides | |
.. automethod:: numpy.core._internal._ctypes.data_as | |
.. automethod:: numpy.core._internal._ctypes.shape_as | |
.. automethod:: numpy.core._internal._ctypes.strides_as | |
If the ctypes module is not available, then the ctypes attribute | |
of array objects still returns something useful, but ctypes objects | |
are not returned and errors may be raised instead. In particular, | |
the object will still have the as parameter attribute which will | |
return an integer equal to the data attribute. | |
Examples | |
-------- | |
>>> import ctypes | |
>>> x | |
array([[0, 1], | |
[2, 3]]) | |
>>> x.ctypes.data | |
30439712 | |
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)) | |
<ctypes.LP_c_long object at 0x01F01300> | |
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_long)).contents | |
c_long(0) | |
>>> x.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong)).contents | |
c_longlong(4294967296L) | |
>>> x.ctypes.shape | |
<numpy.core._internal.c_long_Array_2 object at 0x01FFD580> | |
>>> x.ctypes.shape_as(ctypes.c_long) | |
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620> | |
>>> x.ctypes.strides | |
<numpy.core._internal.c_long_Array_2 object at 0x01FCE620> | |
>>> x.ctypes.strides_as(ctypes.c_longlong) | |
<numpy.core._internal.c_longlong_Array_2 object at 0x01F01300> | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('data', | |
"""Python buffer object pointing to the start of the array's data.""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('dtype', | |
""" | |
Data-type of the array's elements. | |
Parameters | |
---------- | |
None | |
Returns | |
------- | |
d : numpy dtype object | |
See Also | |
-------- | |
numpy.dtype | |
Examples | |
-------- | |
>>> x | |
array([[0, 1], | |
[2, 3]]) | |
>>> x.dtype | |
dtype('int32') | |
>>> type(x.dtype) | |
<type 'numpy.dtype'> | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('imag', | |
""" | |
The imaginary part of the array. | |
Examples | |
-------- | |
>>> x = np.sqrt([1+0j, 0+1j]) | |
>>> x.imag | |
array([ 0. , 0.70710678]) | |
>>> x.imag.dtype | |
dtype('float64') | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemsize', | |
""" | |
Length of one array element in bytes. | |
Examples | |
-------- | |
>>> x = np.array([1,2,3], dtype=np.float64) | |
>>> x.itemsize | |
8 | |
>>> x = np.array([1,2,3], dtype=np.complex128) | |
>>> x.itemsize | |
16 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('flags', | |
""" | |
Information about the memory layout of the array. | |
Attributes | |
---------- | |
C_CONTIGUOUS (C) | |
The data is in a single, C-style contiguous segment. | |
F_CONTIGUOUS (F) | |
The data is in a single, Fortran-style contiguous segment. | |
OWNDATA (O) | |
The array owns the memory it uses or borrows it from another object. | |
WRITEABLE (W) | |
The data area can be written to. Setting this to False locks | |
the data, making it read-only. A view (slice, etc.) inherits WRITEABLE | |
from its base array at creation time, but a view of a writeable | |
array may be subsequently locked while the base array remains writeable. | |
(The opposite is not true, in that a view of a locked array may not | |
be made writeable. However, currently, locking a base object does not | |
lock any views that already reference it, so under that circumstance it | |
is possible to alter the contents of a locked array via a previously | |
created writeable view onto it.) Attempting to change a non-writeable | |
array raises a RuntimeError exception. | |
ALIGNED (A) | |
The data and all elements are aligned appropriately for the hardware. | |
WRITEBACKIFCOPY (X) | |
This array is a copy of some other array. The C-API function | |
PyArray_ResolveWritebackIfCopy must be called before deallocating | |
to the base array will be updated with the contents of this array. | |
UPDATEIFCOPY (U) | |
(Deprecated, use WRITEBACKIFCOPY) This array is a copy of some other array. | |
When this array is | |
deallocated, the base array will be updated with the contents of | |
this array. | |
FNC | |
F_CONTIGUOUS and not C_CONTIGUOUS. | |
FORC | |
F_CONTIGUOUS or C_CONTIGUOUS (one-segment test). | |
BEHAVED (B) | |
ALIGNED and WRITEABLE. | |
CARRAY (CA) | |
BEHAVED and C_CONTIGUOUS. | |
FARRAY (FA) | |
BEHAVED and F_CONTIGUOUS and not C_CONTIGUOUS. | |
Notes | |
----- | |
The `flags` object can be accessed dictionary-like (as in ``a.flags['WRITEABLE']``), | |
or by using lowercased attribute names (as in ``a.flags.writeable``). Short flag | |
names are only supported in dictionary access. | |
Only the WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED flags can be | |
changed by the user, via direct assignment to the attribute or dictionary | |
entry, or by calling `ndarray.setflags`. | |
The array flags cannot be set arbitrarily: | |
- UPDATEIFCOPY can only be set ``False``. | |
- WRITEBACKIFCOPY can only be set ``False``. | |
- ALIGNED can only be set ``True`` if the data is truly aligned. | |
- WRITEABLE can only be set ``True`` if the array owns its own memory | |
or the ultimate owner of the memory exposes a writeable buffer | |
interface or is a string. | |
Arrays can be both C-style and Fortran-style contiguous simultaneously. | |
This is clear for 1-dimensional arrays, but can also be true for higher | |
dimensional arrays. | |
Even for contiguous arrays a stride for a given dimension | |
``arr.strides[dim]`` may be *arbitrary* if ``arr.shape[dim] == 1`` | |
or the array has no elements. | |
It does *not* generally hold that ``self.strides[-1] == self.itemsize`` | |
for C-style contiguous arrays or ``self.strides[0] == self.itemsize`` for | |
Fortran-style contiguous arrays is true. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', | |
""" | |
A 1-D iterator over the array. | |
This is a `numpy.flatiter` instance, which acts similarly to, but is not | |
a subclass of, Python's built-in iterator object. | |
See Also | |
-------- | |
flatten : Return a copy of the array collapsed into one dimension. | |
flatiter | |
Examples | |
-------- | |
>>> x = np.arange(1, 7).reshape(2, 3) | |
>>> x | |
array([[1, 2, 3], | |
[4, 5, 6]]) | |
>>> x.flat[3] | |
4 | |
>>> x.T | |
array([[1, 4], | |
[2, 5], | |
[3, 6]]) | |
>>> x.T.flat[3] | |
5 | |
>>> type(x.flat) | |
<type 'numpy.flatiter'> | |
An assignment example: | |
>>> x.flat = 3; x | |
array([[3, 3, 3], | |
[3, 3, 3]]) | |
>>> x.flat[[1,4]] = 1; x | |
array([[3, 1, 3], | |
[3, 1, 3]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('nbytes', | |
""" | |
Total bytes consumed by the elements of the array. | |
Notes | |
----- | |
Does not include memory consumed by non-element attributes of the | |
array object. | |
Examples | |
-------- | |
>>> x = np.zeros((3,5,2), dtype=np.complex128) | |
>>> x.nbytes | |
480 | |
>>> np.prod(x.shape) * x.itemsize | |
480 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('ndim', | |
""" | |
Number of array dimensions. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3]) | |
>>> x.ndim | |
1 | |
>>> y = np.zeros((2, 3, 4)) | |
>>> y.ndim | |
3 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('real', | |
""" | |
The real part of the array. | |
Examples | |
-------- | |
>>> x = np.sqrt([1+0j, 0+1j]) | |
>>> x.real | |
array([ 1. , 0.70710678]) | |
>>> x.real.dtype | |
dtype('float64') | |
See Also | |
-------- | |
numpy.real : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('shape', | |
""" | |
Tuple of array dimensions. | |
The shape property is usually used to get the current shape of an array, | |
but may also be used to reshape the array in-place by assigning a tuple of | |
array dimensions to it. As with `numpy.reshape`, one of the new shape | |
dimensions can be -1, in which case its value is inferred from the size of | |
the array and the remaining dimensions. Reshaping an array in-place will | |
fail if a copy is required. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 3, 4]) | |
>>> x.shape | |
(4,) | |
>>> y = np.zeros((2, 3, 4)) | |
>>> y.shape | |
(2, 3, 4) | |
>>> y.shape = (3, 8) | |
>>> y | |
array([[ 0., 0., 0., 0., 0., 0., 0., 0.], | |
[ 0., 0., 0., 0., 0., 0., 0., 0.], | |
[ 0., 0., 0., 0., 0., 0., 0., 0.]]) | |
>>> y.shape = (3, 6) | |
Traceback (most recent call last): | |
File "<stdin>", line 1, in <module> | |
ValueError: total size of new array must be unchanged | |
>>> np.zeros((4,2))[::2].shape = (-1,) | |
Traceback (most recent call last): | |
File "<stdin>", line 1, in <module> | |
AttributeError: incompatible shape for a non-contiguous array | |
See Also | |
-------- | |
numpy.reshape : similar function | |
ndarray.reshape : similar method | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('size', | |
""" | |
Number of elements in the array. | |
Equal to ``np.prod(a.shape)``, i.e., the product of the array's | |
dimensions. | |
Notes | |
----- | |
`a.size` returns a standard arbitrary precision Python integer. This | |
may not be the case with other methods of obtaining the same value | |
(like the suggested ``np.prod(a.shape)``, which returns an instance | |
of ``np.int_``), and may be relevant if the value is used further in | |
calculations that may overflow a fixed size integer type. | |
Examples | |
-------- | |
>>> x = np.zeros((3, 5, 2), dtype=np.complex128) | |
>>> x.size | |
30 | |
>>> np.prod(x.shape) | |
30 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', | |
""" | |
Tuple of bytes to step in each dimension when traversing an array. | |
The byte offset of element ``(i[0], i[1], ..., i[n])`` in an array `a` | |
is:: | |
offset = sum(np.array(i) * a.strides) | |
A more detailed explanation of strides can be found in the | |
"ndarray.rst" file in the NumPy reference guide. | |
Notes | |
----- | |
Imagine an array of 32-bit integers (each 4 bytes):: | |
x = np.array([[0, 1, 2, 3, 4], | |
[5, 6, 7, 8, 9]], dtype=np.int32) | |
This array is stored in memory as 40 bytes, one after the other | |
(known as a contiguous block of memory). The strides of an array tell | |
us how many bytes we have to skip in memory to move to the next position | |
along a certain axis. For example, we have to skip 4 bytes (1 value) to | |
move to the next column, but 20 bytes (5 values) to get to the same | |
position in the next row. As such, the strides for the array `x` will be | |
``(20, 4)``. | |
See Also | |
-------- | |
numpy.lib.stride_tricks.as_strided | |
Examples | |
-------- | |
>>> y = np.reshape(np.arange(2*3*4), (2,3,4)) | |
>>> y | |
array([[[ 0, 1, 2, 3], | |
[ 4, 5, 6, 7], | |
[ 8, 9, 10, 11]], | |
[[12, 13, 14, 15], | |
[16, 17, 18, 19], | |
[20, 21, 22, 23]]]) | |
>>> y.strides | |
(48, 16, 4) | |
>>> y[1,1,1] | |
17 | |
>>> offset=sum(y.strides * np.array((1,1,1))) | |
>>> offset/y.itemsize | |
17 | |
>>> x = np.reshape(np.arange(5*6*7*8), (5,6,7,8)).transpose(2,3,1,0) | |
>>> x.strides | |
(32, 4, 224, 1344) | |
>>> i = np.array([3,5,2,2]) | |
>>> offset = sum(i * x.strides) | |
>>> x[3,5,2,2] | |
813 | |
>>> offset / x.itemsize | |
813 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('T', | |
""" | |
Same as self.transpose(), except that self is returned if | |
self.ndim < 2. | |
Examples | |
-------- | |
>>> x = np.array([[1.,2.],[3.,4.]]) | |
>>> x | |
array([[ 1., 2.], | |
[ 3., 4.]]) | |
>>> x.T | |
array([[ 1., 3.], | |
[ 2., 4.]]) | |
>>> x = np.array([1.,2.,3.,4.]) | |
>>> x | |
array([ 1., 2., 3., 4.]) | |
>>> x.T | |
array([ 1., 2., 3., 4.]) | |
""")) | |
############################################################################## | |
# | |
# ndarray methods | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__', | |
""" a.__array__(|dtype) -> reference if type unchanged, copy otherwise. | |
Returns either a new reference to self if dtype is not given or a new array | |
of provided data type if dtype is different from the current dtype of the | |
array. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__', | |
"""a.__array_prepare__(obj) -> Object of same type as ndarray object obj. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__', | |
"""a.__array_wrap__(obj) -> Object of same type as ndarray object a. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__copy__', | |
"""a.__copy__() | |
Used if :func:`copy.copy` is called on an array. Returns a copy of the array. | |
Equivalent to ``a.copy(order='K')``. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__deepcopy__', | |
"""a.__deepcopy__(memo, /) -> Deep copy of array. | |
Used if :func:`copy.deepcopy` is called on an array. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__reduce__', | |
"""a.__reduce__() | |
For pickling. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('__setstate__', | |
"""a.__setstate__(state, /) | |
For unpickling. | |
The `state` argument must be a sequence that contains the following | |
elements: | |
Parameters | |
---------- | |
version : int | |
optional pickle version. If omitted defaults to 0. | |
shape : tuple | |
dtype : data-type | |
isFortran : bool | |
rawdata : string or list | |
a binary string with the data (or a list if 'a' is an object array) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('all', | |
""" | |
a.all(axis=None, out=None, keepdims=False) | |
Returns True if all elements evaluate to True. | |
Refer to `numpy.all` for full documentation. | |
See Also | |
-------- | |
numpy.all : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('any', | |
""" | |
a.any(axis=None, out=None, keepdims=False) | |
Returns True if any of the elements of `a` evaluate to True. | |
Refer to `numpy.any` for full documentation. | |
See Also | |
-------- | |
numpy.any : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmax', | |
""" | |
a.argmax(axis=None, out=None) | |
Return indices of the maximum values along the given axis. | |
Refer to `numpy.argmax` for full documentation. | |
See Also | |
-------- | |
numpy.argmax : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin', | |
""" | |
a.argmin(axis=None, out=None) | |
Return indices of the minimum values along the given axis of `a`. | |
Refer to `numpy.argmin` for detailed documentation. | |
See Also | |
-------- | |
numpy.argmin : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort', | |
""" | |
a.argsort(axis=-1, kind='quicksort', order=None) | |
Returns the indices that would sort this array. | |
Refer to `numpy.argsort` for full documentation. | |
See Also | |
-------- | |
numpy.argsort : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('argpartition', | |
""" | |
a.argpartition(kth, axis=-1, kind='introselect', order=None) | |
Returns the indices that would partition this array. | |
Refer to `numpy.argpartition` for full documentation. | |
.. versionadded:: 1.8.0 | |
See Also | |
-------- | |
numpy.argpartition : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', | |
""" | |
a.astype(dtype, order='K', casting='unsafe', subok=True, copy=True) | |
Copy of the array, cast to a specified type. | |
Parameters | |
---------- | |
dtype : str or dtype | |
Typecode or data-type to which the array is cast. | |
order : {'C', 'F', 'A', 'K'}, optional | |
Controls the memory layout order of the result. | |
'C' means C order, 'F' means Fortran order, 'A' | |
means 'F' order if all the arrays are Fortran contiguous, | |
'C' order otherwise, and 'K' means as close to the | |
order the array elements appear in memory as possible. | |
Default is 'K'. | |
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional | |
Controls what kind of data casting may occur. Defaults to 'unsafe' | |
for backwards compatibility. | |
* 'no' means the data types should not be cast at all. | |
* 'equiv' means only byte-order changes are allowed. | |
* 'safe' means only casts which can preserve values are allowed. | |
* 'same_kind' means only safe casts or casts within a kind, | |
like float64 to float32, are allowed. | |
* 'unsafe' means any data conversions may be done. | |
subok : bool, optional | |
If True, then sub-classes will be passed-through (default), otherwise | |
the returned array will be forced to be a base-class array. | |
copy : bool, optional | |
By default, astype always returns a newly allocated array. If this | |
is set to false, and the `dtype`, `order`, and `subok` | |
requirements are satisfied, the input array is returned instead | |
of a copy. | |
Returns | |
------- | |
arr_t : ndarray | |
Unless `copy` is False and the other conditions for returning the input | |
array are satisfied (see description for `copy` input parameter), `arr_t` | |
is a new array of the same shape as the input array, with dtype, order | |
given by `dtype`, `order`. | |
Notes | |
----- | |
Starting in NumPy 1.9, astype method now returns an error if the string | |
dtype to cast to is not long enough in 'safe' casting mode to hold the max | |
value of integer/float array that is being casted. Previously the casting | |
was allowed even if the result was truncated. | |
Raises | |
------ | |
ComplexWarning | |
When casting from complex to float or int. To avoid this, | |
one should use ``a.real.astype(t)``. | |
Examples | |
-------- | |
>>> x = np.array([1, 2, 2.5]) | |
>>> x | |
array([ 1. , 2. , 2.5]) | |
>>> x.astype(int) | |
array([1, 2, 2]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', | |
""" | |
a.byteswap(inplace=False) | |
Swap the bytes of the array elements | |
Toggle between low-endian and big-endian data representation by | |
returning a byteswapped array, optionally swapped in-place. | |
Parameters | |
---------- | |
inplace : bool, optional | |
If ``True``, swap bytes in-place, default is ``False``. | |
Returns | |
------- | |
out : ndarray | |
The byteswapped array. If `inplace` is ``True``, this is | |
a view to self. | |
Examples | |
-------- | |
>>> A = np.array([1, 256, 8755], dtype=np.int16) | |
>>> map(hex, A) | |
['0x1', '0x100', '0x2233'] | |
>>> A.byteswap(inplace=True) | |
array([ 256, 1, 13090], dtype=int16) | |
>>> map(hex, A) | |
['0x100', '0x1', '0x3322'] | |
Arrays of strings are not swapped | |
>>> A = np.array(['ceg', 'fac']) | |
>>> A.byteswap() | |
array(['ceg', 'fac'], | |
dtype='|S3') | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('choose', | |
""" | |
a.choose(choices, out=None, mode='raise') | |
Use an index array to construct a new array from a set of choices. | |
Refer to `numpy.choose` for full documentation. | |
See Also | |
-------- | |
numpy.choose : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip', | |
""" | |
a.clip(min=None, max=None, out=None) | |
Return an array whose values are limited to ``[min, max]``. | |
One of max or min must be given. | |
Refer to `numpy.clip` for full documentation. | |
See Also | |
-------- | |
numpy.clip : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('compress', | |
""" | |
a.compress(condition, axis=None, out=None) | |
Return selected slices of this array along given axis. | |
Refer to `numpy.compress` for full documentation. | |
See Also | |
-------- | |
numpy.compress : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('conj', | |
""" | |
a.conj() | |
Complex-conjugate all elements. | |
Refer to `numpy.conjugate` for full documentation. | |
See Also | |
-------- | |
numpy.conjugate : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('conjugate', | |
""" | |
a.conjugate() | |
Return the complex conjugate, element-wise. | |
Refer to `numpy.conjugate` for full documentation. | |
See Also | |
-------- | |
numpy.conjugate : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('copy', | |
""" | |
a.copy(order='C') | |
Return a copy of the array. | |
Parameters | |
---------- | |
order : {'C', 'F', 'A', 'K'}, optional | |
Controls the memory layout of the copy. 'C' means C-order, | |
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, | |
'C' otherwise. 'K' means match the layout of `a` as closely | |
as possible. (Note that this function and :func:`numpy.copy` are very | |
similar, but have different default values for their order= | |
arguments.) | |
See also | |
-------- | |
numpy.copy | |
numpy.copyto | |
Examples | |
-------- | |
>>> x = np.array([[1,2,3],[4,5,6]], order='F') | |
>>> y = x.copy() | |
>>> x.fill(0) | |
>>> x | |
array([[0, 0, 0], | |
[0, 0, 0]]) | |
>>> y | |
array([[1, 2, 3], | |
[4, 5, 6]]) | |
>>> y.flags['C_CONTIGUOUS'] | |
True | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumprod', | |
""" | |
a.cumprod(axis=None, dtype=None, out=None) | |
Return the cumulative product of the elements along the given axis. | |
Refer to `numpy.cumprod` for full documentation. | |
See Also | |
-------- | |
numpy.cumprod : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('cumsum', | |
""" | |
a.cumsum(axis=None, dtype=None, out=None) | |
Return the cumulative sum of the elements along the given axis. | |
Refer to `numpy.cumsum` for full documentation. | |
See Also | |
-------- | |
numpy.cumsum : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('diagonal', | |
""" | |
a.diagonal(offset=0, axis1=0, axis2=1) | |
Return specified diagonals. In NumPy 1.9 the returned array is a | |
read-only view instead of a copy as in previous NumPy versions. In | |
a future version the read-only restriction will be removed. | |
Refer to :func:`numpy.diagonal` for full documentation. | |
See Also | |
-------- | |
numpy.diagonal : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', | |
""" | |
a.dot(b, out=None) | |
Dot product of two arrays. | |
Refer to `numpy.dot` for full documentation. | |
See Also | |
-------- | |
numpy.dot : equivalent function | |
Examples | |
-------- | |
>>> a = np.eye(2) | |
>>> b = np.ones((2, 2)) * 2 | |
>>> a.dot(b) | |
array([[ 2., 2.], | |
[ 2., 2.]]) | |
This array method can be conveniently chained: | |
>>> a.dot(b).dot(b) | |
array([[ 8., 8.], | |
[ 8., 8.]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('dump', | |
"""a.dump(file) | |
Dump a pickle of the array to the specified file. | |
The array can be read back with pickle.load or numpy.load. | |
Parameters | |
---------- | |
file : str | |
A string naming the dump file. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('dumps', | |
""" | |
a.dumps() | |
Returns the pickle of the array as a string. | |
pickle.loads or numpy.loads will convert the string back to an array. | |
Parameters | |
---------- | |
None | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', | |
""" | |
a.fill(value) | |
Fill the array with a scalar value. | |
Parameters | |
---------- | |
value : scalar | |
All elements of `a` will be assigned this value. | |
Examples | |
-------- | |
>>> a = np.array([1, 2]) | |
>>> a.fill(0) | |
>>> a | |
array([0, 0]) | |
>>> a = np.empty(2) | |
>>> a.fill(1) | |
>>> a | |
array([ 1., 1.]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('flatten', | |
""" | |
a.flatten(order='C') | |
Return a copy of the array collapsed into one dimension. | |
Parameters | |
---------- | |
order : {'C', 'F', 'A', 'K'}, optional | |
'C' means to flatten in row-major (C-style) order. | |
'F' means to flatten in column-major (Fortran- | |
style) order. 'A' means to flatten in column-major | |
order if `a` is Fortran *contiguous* in memory, | |
row-major order otherwise. 'K' means to flatten | |
`a` in the order the elements occur in memory. | |
The default is 'C'. | |
Returns | |
------- | |
y : ndarray | |
A copy of the input array, flattened to one dimension. | |
See Also | |
-------- | |
ravel : Return a flattened array. | |
flat : A 1-D flat iterator over the array. | |
Examples | |
-------- | |
>>> a = np.array([[1,2], [3,4]]) | |
>>> a.flatten() | |
array([1, 2, 3, 4]) | |
>>> a.flatten('F') | |
array([1, 3, 2, 4]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', | |
""" | |
a.getfield(dtype, offset=0) | |
Returns a field of the given array as a certain type. | |
A field is a view of the array data with a given data-type. The values in | |
the view are determined by the given type and the offset into the current | |
array in bytes. The offset needs to be such that the view dtype fits in the | |
array dtype; for example an array of dtype complex128 has 16-byte elements. | |
If taking a view with a 32-bit integer (4 bytes), the offset needs to be | |
between 0 and 12 bytes. | |
Parameters | |
---------- | |
dtype : str or dtype | |
The data type of the view. The dtype size of the view can not be larger | |
than that of the array itself. | |
offset : int | |
Number of bytes to skip before beginning the element view. | |
Examples | |
-------- | |
>>> x = np.diag([1.+1.j]*2) | |
>>> x[1, 1] = 2 + 4.j | |
>>> x | |
array([[ 1.+1.j, 0.+0.j], | |
[ 0.+0.j, 2.+4.j]]) | |
>>> x.getfield(np.float64) | |
array([[ 1., 0.], | |
[ 0., 2.]]) | |
By choosing an offset of 8 bytes we can select the complex part of the | |
array for our view: | |
>>> x.getfield(np.float64, offset=8) | |
array([[ 1., 0.], | |
[ 0., 4.]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('item', | |
""" | |
a.item(*args) | |
Copy an element of an array to a standard Python scalar and return it. | |
Parameters | |
---------- | |
\\*args : Arguments (variable number and type) | |
* none: in this case, the method only works for arrays | |
with one element (`a.size == 1`), which element is | |
copied into a standard Python scalar object and returned. | |
* int_type: this argument is interpreted as a flat index into | |
the array, specifying which element to copy and return. | |
* tuple of int_types: functions as does a single int_type argument, | |
except that the argument is interpreted as an nd-index into the | |
array. | |
Returns | |
------- | |
z : Standard Python scalar object | |
A copy of the specified element of the array as a suitable | |
Python scalar | |
Notes | |
----- | |
When the data type of `a` is longdouble or clongdouble, item() returns | |
a scalar array object because there is no available Python scalar that | |
would not lose information. Void arrays return a buffer object for item(), | |
unless fields are defined, in which case a tuple is returned. | |
`item` is very similar to a[args], except, instead of an array scalar, | |
a standard Python scalar is returned. This can be useful for speeding up | |
access to elements of the array and doing arithmetic on elements of the | |
array using Python's optimized math. | |
Examples | |
-------- | |
>>> x = np.random.randint(9, size=(3, 3)) | |
>>> x | |
array([[3, 1, 7], | |
[2, 8, 3], | |
[8, 5, 3]]) | |
>>> x.item(3) | |
2 | |
>>> x.item(7) | |
5 | |
>>> x.item((0, 1)) | |
1 | |
>>> x.item((2, 2)) | |
3 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', | |
""" | |
a.itemset(*args) | |
Insert scalar into an array (scalar is cast to array's dtype, if possible) | |
There must be at least 1 argument, and define the last argument | |
as *item*. Then, ``a.itemset(*args)`` is equivalent to but faster | |
than ``a[args] = item``. The item should be a scalar value and `args` | |
must select a single item in the array `a`. | |
Parameters | |
---------- | |
\\*args : Arguments | |
If one argument: a scalar, only used in case `a` is of size 1. | |
If two arguments: the last argument is the value to be set | |
and must be a scalar, the first argument specifies a single array | |
element location. It is either an int or a tuple. | |
Notes | |
----- | |
Compared to indexing syntax, `itemset` provides some speed increase | |
for placing a scalar into a particular location in an `ndarray`, | |
if you must do this. However, generally this is discouraged: | |
among other problems, it complicates the appearance of the code. | |
Also, when using `itemset` (and `item`) inside a loop, be sure | |
to assign the methods to a local variable to avoid the attribute | |
look-up at each loop iteration. | |
Examples | |
-------- | |
>>> x = np.random.randint(9, size=(3, 3)) | |
>>> x | |
array([[3, 1, 7], | |
[2, 8, 3], | |
[8, 5, 3]]) | |
>>> x.itemset(4, 0) | |
>>> x.itemset((2, 2), 9) | |
>>> x | |
array([[3, 1, 7], | |
[2, 0, 3], | |
[8, 5, 9]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('max', | |
""" | |
a.max(axis=None, out=None, keepdims=False) | |
Return the maximum along a given axis. | |
Refer to `numpy.amax` for full documentation. | |
See Also | |
-------- | |
numpy.amax : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('mean', | |
""" | |
a.mean(axis=None, dtype=None, out=None, keepdims=False) | |
Returns the average of the array elements along given axis. | |
Refer to `numpy.mean` for full documentation. | |
See Also | |
-------- | |
numpy.mean : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('min', | |
""" | |
a.min(axis=None, out=None, keepdims=False) | |
Return the minimum along a given axis. | |
Refer to `numpy.amin` for full documentation. | |
See Also | |
-------- | |
numpy.amin : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'shares_memory', | |
""" | |
shares_memory(a, b, max_work=None) | |
Determine if two arrays share memory | |
Parameters | |
---------- | |
a, b : ndarray | |
Input arrays | |
max_work : int, optional | |
Effort to spend on solving the overlap problem (maximum number | |
of candidate solutions to consider). The following special | |
values are recognized: | |
max_work=MAY_SHARE_EXACT (default) | |
The problem is solved exactly. In this case, the function returns | |
True only if there is an element shared between the arrays. | |
max_work=MAY_SHARE_BOUNDS | |
Only the memory bounds of a and b are checked. | |
Raises | |
------ | |
numpy.TooHardError | |
Exceeded max_work. | |
Returns | |
------- | |
out : bool | |
See Also | |
-------- | |
may_share_memory | |
Examples | |
-------- | |
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) | |
False | |
""") | |
add_newdoc('numpy.core.multiarray', 'may_share_memory', | |
""" | |
may_share_memory(a, b, max_work=None) | |
Determine if two arrays might share memory | |
A return of True does not necessarily mean that the two arrays | |
share any element. It just means that they *might*. | |
Only the memory bounds of a and b are checked by default. | |
Parameters | |
---------- | |
a, b : ndarray | |
Input arrays | |
max_work : int, optional | |
Effort to spend on solving the overlap problem. See | |
`shares_memory` for details. Default for ``may_share_memory`` | |
is to do a bounds check. | |
Returns | |
------- | |
out : bool | |
See Also | |
-------- | |
shares_memory | |
Examples | |
-------- | |
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) | |
False | |
>>> x = np.zeros([3, 4]) | |
>>> np.may_share_memory(x[:,0], x[:,1]) | |
True | |
""") | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder', | |
""" | |
arr.newbyteorder(new_order='S') | |
Return the array with the same data viewed with a different byte order. | |
Equivalent to:: | |
arr.view(arr.dtype.newbytorder(new_order)) | |
Changes are also made in all fields and sub-arrays of the array data | |
type. | |
Parameters | |
---------- | |
new_order : string, optional | |
Byte order to force; a value from the byte order specifications | |
below. `new_order` codes can be any of: | |
* 'S' - swap dtype from current to opposite endian | |
* {'<', 'L'} - little endian | |
* {'>', 'B'} - big endian | |
* {'=', 'N'} - native order | |
* {'|', 'I'} - ignore (no change to byte order) | |
The default value ('S') results in swapping the current | |
byte order. The code does a case-insensitive check on the first | |
letter of `new_order` for the alternatives above. For example, | |
any of 'B' or 'b' or 'biggish' are valid to specify big-endian. | |
Returns | |
------- | |
new_arr : array | |
New array object with the dtype reflecting given change to the | |
byte order. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero', | |
""" | |
a.nonzero() | |
Return the indices of the elements that are non-zero. | |
Refer to `numpy.nonzero` for full documentation. | |
See Also | |
-------- | |
numpy.nonzero : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod', | |
""" | |
a.prod(axis=None, dtype=None, out=None, keepdims=False) | |
Return the product of the array elements over the given axis | |
Refer to `numpy.prod` for full documentation. | |
See Also | |
-------- | |
numpy.prod : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('ptp', | |
""" | |
a.ptp(axis=None, out=None, keepdims=False) | |
Peak to peak (maximum - minimum) value along a given axis. | |
Refer to `numpy.ptp` for full documentation. | |
See Also | |
-------- | |
numpy.ptp : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('put', | |
""" | |
a.put(indices, values, mode='raise') | |
Set ``a.flat[n] = values[n]`` for all `n` in indices. | |
Refer to `numpy.put` for full documentation. | |
See Also | |
-------- | |
numpy.put : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'copyto', | |
""" | |
copyto(dst, src, casting='same_kind', where=True) | |
Copies values from one array to another, broadcasting as necessary. | |
Raises a TypeError if the `casting` rule is violated, and if | |
`where` is provided, it selects which elements to copy. | |
.. versionadded:: 1.7.0 | |
Parameters | |
---------- | |
dst : ndarray | |
The array into which values are copied. | |
src : array_like | |
The array from which values are copied. | |
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional | |
Controls what kind of data casting may occur when copying. | |
* 'no' means the data types should not be cast at all. | |
* 'equiv' means only byte-order changes are allowed. | |
* 'safe' means only casts which can preserve values are allowed. | |
* 'same_kind' means only safe casts or casts within a kind, | |
like float64 to float32, are allowed. | |
* 'unsafe' means any data conversions may be done. | |
where : array_like of bool, optional | |
A boolean array which is broadcasted to match the dimensions | |
of `dst`, and selects elements to copy from `src` to `dst` | |
wherever it contains the value True. | |
""") | |
add_newdoc('numpy.core.multiarray', 'putmask', | |
""" | |
putmask(a, mask, values) | |
Changes elements of an array based on conditional and input values. | |
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``. | |
If `values` is not the same size as `a` and `mask` then it will repeat. | |
This gives behavior different from ``a[mask] = values``. | |
Parameters | |
---------- | |
a : array_like | |
Target array. | |
mask : array_like | |
Boolean mask array. It has to be the same shape as `a`. | |
values : array_like | |
Values to put into `a` where `mask` is True. If `values` is smaller | |
than `a` it will be repeated. | |
See Also | |
-------- | |
place, put, take, copyto | |
Examples | |
-------- | |
>>> x = np.arange(6).reshape(2, 3) | |
>>> np.putmask(x, x>2, x**2) | |
>>> x | |
array([[ 0, 1, 2], | |
[ 9, 16, 25]]) | |
If `values` is smaller than `a` it is repeated: | |
>>> x = np.arange(5) | |
>>> np.putmask(x, x>1, [-33, -44]) | |
>>> x | |
array([ 0, 1, -33, -44, -33]) | |
""") | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel', | |
""" | |
a.ravel([order]) | |
Return a flattened array. | |
Refer to `numpy.ravel` for full documentation. | |
See Also | |
-------- | |
numpy.ravel : equivalent function | |
ndarray.flat : a flat iterator on the array. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('repeat', | |
""" | |
a.repeat(repeats, axis=None) | |
Repeat elements of an array. | |
Refer to `numpy.repeat` for full documentation. | |
See Also | |
-------- | |
numpy.repeat : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('reshape', | |
""" | |
a.reshape(shape, order='C') | |
Returns an array containing the same data with a new shape. | |
Refer to `numpy.reshape` for full documentation. | |
See Also | |
-------- | |
numpy.reshape : equivalent function | |
Notes | |
----- | |
Unlike the free function `numpy.reshape`, this method on `ndarray` allows | |
the elements of the shape parameter to be passed in as separate arguments. | |
For example, ``a.reshape(10, 11)`` is equivalent to | |
``a.reshape((10, 11))``. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', | |
""" | |
a.resize(new_shape, refcheck=True) | |
Change shape and size of array in-place. | |
Parameters | |
---------- | |
new_shape : tuple of ints, or `n` ints | |
Shape of resized array. | |
refcheck : bool, optional | |
If False, reference count will not be checked. Default is True. | |
Returns | |
------- | |
None | |
Raises | |
------ | |
ValueError | |
If `a` does not own its own data or references or views to it exist, | |
and the data memory must be changed. | |
PyPy only: will always raise if the data memory must be changed, since | |
there is no reliable way to determine if references or views to it | |
exist. | |
SystemError | |
If the `order` keyword argument is specified. This behaviour is a | |
bug in NumPy. | |
See Also | |
-------- | |
resize : Return a new array with the specified shape. | |
Notes | |
----- | |
This reallocates space for the data area if necessary. | |
Only contiguous arrays (data elements consecutive in memory) can be | |
resized. | |
The purpose of the reference count check is to make sure you | |
do not use this array as a buffer for another Python object and then | |
reallocate the memory. However, reference counts can increase in | |
other ways so if you are sure that you have not shared the memory | |
for this array with another Python object, then you may safely set | |
`refcheck` to False. | |
Examples | |
-------- | |
Shrinking an array: array is flattened (in the order that the data are | |
stored in memory), resized, and reshaped: | |
>>> a = np.array([[0, 1], [2, 3]], order='C') | |
>>> a.resize((2, 1)) | |
>>> a | |
array([[0], | |
[1]]) | |
>>> a = np.array([[0, 1], [2, 3]], order='F') | |
>>> a.resize((2, 1)) | |
>>> a | |
array([[0], | |
[2]]) | |
Enlarging an array: as above, but missing entries are filled with zeros: | |
>>> b = np.array([[0, 1], [2, 3]]) | |
>>> b.resize(2, 3) # new_shape parameter doesn't have to be a tuple | |
>>> b | |
array([[0, 1, 2], | |
[3, 0, 0]]) | |
Referencing an array prevents resizing... | |
>>> c = a | |
>>> a.resize((1, 1)) | |
Traceback (most recent call last): | |
... | |
ValueError: cannot resize an array that has been referenced ... | |
Unless `refcheck` is False: | |
>>> a.resize((1, 1), refcheck=False) | |
>>> a | |
array([[0]]) | |
>>> c | |
array([[0]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('round', | |
""" | |
a.round(decimals=0, out=None) | |
Return `a` with each element rounded to the given number of decimals. | |
Refer to `numpy.around` for full documentation. | |
See Also | |
-------- | |
numpy.around : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('searchsorted', | |
""" | |
a.searchsorted(v, side='left', sorter=None) | |
Find indices where elements of v should be inserted in a to maintain order. | |
For full documentation, see `numpy.searchsorted` | |
See Also | |
-------- | |
numpy.searchsorted : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', | |
""" | |
a.setfield(val, dtype, offset=0) | |
Put a value into a specified place in a field defined by a data-type. | |
Place `val` into `a`'s field defined by `dtype` and beginning `offset` | |
bytes into the field. | |
Parameters | |
---------- | |
val : object | |
Value to be placed in field. | |
dtype : dtype object | |
Data-type of the field in which to place `val`. | |
offset : int, optional | |
The number of bytes into the field at which to place `val`. | |
Returns | |
------- | |
None | |
See Also | |
-------- | |
getfield | |
Examples | |
-------- | |
>>> x = np.eye(3) | |
>>> x.getfield(np.float64) | |
array([[ 1., 0., 0.], | |
[ 0., 1., 0.], | |
[ 0., 0., 1.]]) | |
>>> x.setfield(3, np.int32) | |
>>> x.getfield(np.int32) | |
array([[3, 3, 3], | |
[3, 3, 3], | |
[3, 3, 3]]) | |
>>> x | |
array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], | |
[ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], | |
[ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) | |
>>> x.setfield(np.eye(3), np.int32) | |
>>> x | |
array([[ 1., 0., 0.], | |
[ 0., 1., 0.], | |
[ 0., 0., 1.]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', | |
""" | |
a.setflags(write=None, align=None, uic=None) | |
Set array flags WRITEABLE, ALIGNED, (WRITEBACKIFCOPY and UPDATEIFCOPY), | |
respectively. | |
These Boolean-valued flags affect how numpy interprets the memory | |
area used by `a` (see Notes below). The ALIGNED flag can only | |
be set to True if the data is actually aligned according to the type. | |
The WRITEBACKIFCOPY and (deprecated) UPDATEIFCOPY flags can never be set | |
to True. The flag WRITEABLE can only be set to True if the array owns its | |
own memory, or the ultimate owner of the memory exposes a writeable buffer | |
interface, or is a string. (The exception for string is made so that | |
unpickling can be done without copying memory.) | |
Parameters | |
---------- | |
write : bool, optional | |
Describes whether or not `a` can be written to. | |
align : bool, optional | |
Describes whether or not `a` is aligned properly for its type. | |
uic : bool, optional | |
Describes whether or not `a` is a copy of another "base" array. | |
Notes | |
----- | |
Array flags provide information about how the memory area used | |
for the array is to be interpreted. There are 7 Boolean flags | |
in use, only four of which can be changed by the user: | |
WRITEBACKIFCOPY, UPDATEIFCOPY, WRITEABLE, and ALIGNED. | |
WRITEABLE (W) the data area can be written to; | |
ALIGNED (A) the data and strides are aligned appropriately for the hardware | |
(as determined by the compiler); | |
UPDATEIFCOPY (U) (deprecated), replaced by WRITEBACKIFCOPY; | |
WRITEBACKIFCOPY (X) this array is a copy of some other array (referenced | |
by .base). When the C-API function PyArray_ResolveWritebackIfCopy is | |
called, the base array will be updated with the contents of this array. | |
All flags can be accessed using the single (upper case) letter as well | |
as the full name. | |
Examples | |
-------- | |
>>> y | |
array([[3, 1, 7], | |
[2, 0, 0], | |
[8, 5, 9]]) | |
>>> y.flags | |
C_CONTIGUOUS : True | |
F_CONTIGUOUS : False | |
OWNDATA : True | |
WRITEABLE : True | |
ALIGNED : True | |
WRITEBACKIFCOPY : False | |
UPDATEIFCOPY : False | |
>>> y.setflags(write=0, align=0) | |
>>> y.flags | |
C_CONTIGUOUS : True | |
F_CONTIGUOUS : False | |
OWNDATA : True | |
WRITEABLE : False | |
ALIGNED : False | |
WRITEBACKIFCOPY : False | |
UPDATEIFCOPY : False | |
>>> y.setflags(uic=1) | |
Traceback (most recent call last): | |
File "<stdin>", line 1, in <module> | |
ValueError: cannot set WRITEBACKIFCOPY flag to True | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', | |
""" | |
a.sort(axis=-1, kind='quicksort', order=None) | |
Sort an array, in-place. | |
Parameters | |
---------- | |
axis : int, optional | |
Axis along which to sort. Default is -1, which means sort along the | |
last axis. | |
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional | |
Sorting algorithm. Default is 'quicksort'. | |
order : str or list of str, optional | |
When `a` is an array with fields defined, this argument specifies | |
which fields to compare first, second, etc. A single field can | |
be specified as a string, and not all fields need be specified, | |
but unspecified fields will still be used, in the order in which | |
they come up in the dtype, to break ties. | |
See Also | |
-------- | |
numpy.sort : Return a sorted copy of an array. | |
argsort : Indirect sort. | |
lexsort : Indirect stable sort on multiple keys. | |
searchsorted : Find elements in sorted array. | |
partition: Partial sort. | |
Notes | |
----- | |
See ``sort`` for notes on the different sorting algorithms. | |
Examples | |
-------- | |
>>> a = np.array([[1,4], [3,1]]) | |
>>> a.sort(axis=1) | |
>>> a | |
array([[1, 4], | |
[1, 3]]) | |
>>> a.sort(axis=0) | |
>>> a | |
array([[1, 3], | |
[1, 4]]) | |
Use the `order` keyword to specify a field to use when sorting a | |
structured array: | |
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) | |
>>> a.sort(order='y') | |
>>> a | |
array([('c', 1), ('a', 2)], | |
dtype=[('x', '|S1'), ('y', '<i4')]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', | |
""" | |
a.partition(kth, axis=-1, kind='introselect', order=None) | |
Rearranges the elements in the array in such a way that the value of the | |
element in kth position is in the position it would be in a sorted array. | |
All elements smaller than the kth element are moved before this element and | |
all equal or greater are moved behind it. The ordering of the elements in | |
the two partitions is undefined. | |
.. versionadded:: 1.8.0 | |
Parameters | |
---------- | |
kth : int or sequence of ints | |
Element index to partition by. The kth element value will be in its | |
final sorted position and all smaller elements will be moved before it | |
and all equal or greater elements behind it. | |
The order of all elements in the partitions is undefined. | |
If provided with a sequence of kth it will partition all elements | |
indexed by kth of them into their sorted position at once. | |
axis : int, optional | |
Axis along which to sort. Default is -1, which means sort along the | |
last axis. | |
kind : {'introselect'}, optional | |
Selection algorithm. Default is 'introselect'. | |
order : str or list of str, optional | |
When `a` is an array with fields defined, this argument specifies | |
which fields to compare first, second, etc. A single field can | |
be specified as a string, and not all fields need to be specified, | |
but unspecified fields will still be used, in the order in which | |
they come up in the dtype, to break ties. | |
See Also | |
-------- | |
numpy.partition : Return a parititioned copy of an array. | |
argpartition : Indirect partition. | |
sort : Full sort. | |
Notes | |
----- | |
See ``np.partition`` for notes on the different algorithms. | |
Examples | |
-------- | |
>>> a = np.array([3, 4, 2, 1]) | |
>>> a.partition(3) | |
>>> a | |
array([2, 1, 3, 4]) | |
>>> a.partition((1, 3)) | |
array([1, 2, 3, 4]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('squeeze', | |
""" | |
a.squeeze(axis=None) | |
Remove single-dimensional entries from the shape of `a`. | |
Refer to `numpy.squeeze` for full documentation. | |
See Also | |
-------- | |
numpy.squeeze : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('std', | |
""" | |
a.std(axis=None, dtype=None, out=None, ddof=0, keepdims=False) | |
Returns the standard deviation of the array elements along given axis. | |
Refer to `numpy.std` for full documentation. | |
See Also | |
-------- | |
numpy.std : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum', | |
""" | |
a.sum(axis=None, dtype=None, out=None, keepdims=False) | |
Return the sum of the array elements over the given axis. | |
Refer to `numpy.sum` for full documentation. | |
See Also | |
-------- | |
numpy.sum : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('swapaxes', | |
""" | |
a.swapaxes(axis1, axis2) | |
Return a view of the array with `axis1` and `axis2` interchanged. | |
Refer to `numpy.swapaxes` for full documentation. | |
See Also | |
-------- | |
numpy.swapaxes : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('take', | |
""" | |
a.take(indices, axis=None, out=None, mode='raise') | |
Return an array formed from the elements of `a` at the given indices. | |
Refer to `numpy.take` for full documentation. | |
See Also | |
-------- | |
numpy.take : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile', | |
""" | |
a.tofile(fid, sep="", format="%s") | |
Write array to a file as text or binary (default). | |
Data is always written in 'C' order, independent of the order of `a`. | |
The data produced by this method can be recovered using the function | |
fromfile(). | |
Parameters | |
---------- | |
fid : file or str | |
An open file object, or a string containing a filename. | |
sep : str | |
Separator between array items for text output. | |
If "" (empty), a binary file is written, equivalent to | |
``file.write(a.tobytes())``. | |
format : str | |
Format string for text file output. | |
Each entry in the array is formatted to text by first converting | |
it to the closest Python type, and then using "format" % item. | |
Notes | |
----- | |
This is a convenience function for quick storage of array data. | |
Information on endianness and precision is lost, so this method is not a | |
good choice for files intended to archive data or transport data between | |
machines with different endianness. Some of these problems can be overcome | |
by outputting the data as text files, at the expense of speed and file | |
size. | |
When fid is a file object, array contents are directly written to the | |
file, bypassing the file object's ``write`` method. As a result, tofile | |
cannot be used with files objects supporting compression (e.g., GzipFile) | |
or file-like objects that do not support ``fileno()`` (e.g., BytesIO). | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist', | |
""" | |
a.tolist() | |
Return the array as a (possibly nested) list. | |
Return a copy of the array data as a (nested) Python list. | |
Data items are converted to the nearest compatible Python type. | |
Parameters | |
---------- | |
none | |
Returns | |
------- | |
y : list | |
The possibly nested list of array elements. | |
Notes | |
----- | |
The array may be recreated, ``a = np.array(a.tolist())``. | |
Examples | |
-------- | |
>>> a = np.array([1, 2]) | |
>>> a.tolist() | |
[1, 2] | |
>>> a = np.array([[1, 2], [3, 4]]) | |
>>> list(a) | |
[array([1, 2]), array([3, 4])] | |
>>> a.tolist() | |
[[1, 2], [3, 4]] | |
""")) | |
tobytesdoc = """ | |
a.{name}(order='C') | |
Construct Python bytes containing the raw data bytes in the array. | |
Constructs Python bytes showing a copy of the raw contents of | |
data memory. The bytes object can be produced in either 'C' or 'Fortran', | |
or 'Any' order (the default is 'C'-order). 'Any' order means C-order | |
unless the F_CONTIGUOUS flag in the array is set, in which case it | |
means 'Fortran' order. | |
{deprecated} | |
Parameters | |
---------- | |
order : {{'C', 'F', None}}, optional | |
Order of the data for multidimensional arrays: | |
C, Fortran, or the same as for the original array. | |
Returns | |
------- | |
s : bytes | |
Python bytes exhibiting a copy of `a`'s raw data. | |
Examples | |
-------- | |
>>> x = np.array([[0, 1], [2, 3]]) | |
>>> x.tobytes() | |
b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' | |
>>> x.tobytes('C') == x.tobytes() | |
True | |
>>> x.tobytes('F') | |
b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' | |
""" | |
add_newdoc('numpy.core.multiarray', 'ndarray', | |
('tostring', tobytesdoc.format(name='tostring', | |
deprecated= | |
'This function is a compatibility ' | |
'alias for tobytes. Despite its ' | |
'name it returns bytes not ' | |
'strings.'))) | |
add_newdoc('numpy.core.multiarray', 'ndarray', | |
('tobytes', tobytesdoc.format(name='tobytes', | |
deprecated='.. versionadded:: 1.9.0'))) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('trace', | |
""" | |
a.trace(offset=0, axis1=0, axis2=1, dtype=None, out=None) | |
Return the sum along diagonals of the array. | |
Refer to `numpy.trace` for full documentation. | |
See Also | |
-------- | |
numpy.trace : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose', | |
""" | |
a.transpose(*axes) | |
Returns a view of the array with axes transposed. | |
For a 1-D array, this has no effect. (To change between column and | |
row vectors, first cast the 1-D array into a matrix object.) | |
For a 2-D array, this is the usual matrix transpose. | |
For an n-D array, if axes are given, their order indicates how the | |
axes are permuted (see Examples). If axes are not provided and | |
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then | |
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``. | |
Parameters | |
---------- | |
axes : None, tuple of ints, or `n` ints | |
* None or no argument: reverses the order of the axes. | |
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s | |
`i`-th axis becomes `a.transpose()`'s `j`-th axis. | |
* `n` ints: same as an n-tuple of the same ints (this form is | |
intended simply as a "convenience" alternative to the tuple form) | |
Returns | |
------- | |
out : ndarray | |
View of `a`, with axes suitably permuted. | |
See Also | |
-------- | |
ndarray.T : Array property returning the array transposed. | |
Examples | |
-------- | |
>>> a = np.array([[1, 2], [3, 4]]) | |
>>> a | |
array([[1, 2], | |
[3, 4]]) | |
>>> a.transpose() | |
array([[1, 3], | |
[2, 4]]) | |
>>> a.transpose((1, 0)) | |
array([[1, 3], | |
[2, 4]]) | |
>>> a.transpose(1, 0) | |
array([[1, 3], | |
[2, 4]]) | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('var', | |
""" | |
a.var(axis=None, dtype=None, out=None, ddof=0, keepdims=False) | |
Returns the variance of the array elements, along given axis. | |
Refer to `numpy.var` for full documentation. | |
See Also | |
-------- | |
numpy.var : equivalent function | |
""")) | |
add_newdoc('numpy.core.multiarray', 'ndarray', ('view', | |
""" | |
a.view(dtype=None, type=None) | |
New view of array with the same data. | |
Parameters | |
---------- | |
dtype : data-type or ndarray sub-class, optional | |
Data-type descriptor of the returned view, e.g., float32 or int16. The | |
default, None, results in the view having the same data-type as `a`. | |
This argument can also be specified as an ndarray sub-class, which | |
then specifies the type of the returned object (this is equivalent to | |
setting the ``type`` parameter). | |
type : Python type, optional | |
Type of the returned view, e.g., ndarray or matrix. Again, the | |
default None results in type preservation. | |
Notes | |
----- | |
``a.view()`` is used two different ways: | |
``a.view(some_dtype)`` or ``a.view(dtype=some_dtype)`` constructs a view | |
of the array's memory with a different data-type. This can cause a | |
reinterpretation of the bytes of memory. | |
``a.view(ndarray_subclass)`` or ``a.view(type=ndarray_subclass)`` just | |
returns an instance of `ndarray_subclass` that looks at the same array | |
(same shape, dtype, etc.) This does not cause a reinterpretation of the | |
memory. | |
For ``a.view(some_dtype)``, if ``some_dtype`` has a different number of | |
bytes per entry than the previous dtype (for example, converting a | |
regular array to a structured array), then the behavior of the view | |
cannot be predicted just from the superficial appearance of ``a`` (shown | |
by ``print(a)``). It also depends on exactly how ``a`` is stored in | |
memory. Therefore if ``a`` is C-ordered versus fortran-ordered, versus | |
defined as a slice or transpose, etc., the view may give different | |
results. | |
Examples | |
-------- | |
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)]) | |
Viewing array data using a different type and dtype: | |
>>> y = x.view(dtype=np.int16, type=np.matrix) | |
>>> y | |
matrix([[513]], dtype=int16) | |
>>> print(type(y)) | |
<class 'numpy.matrixlib.defmatrix.matrix'> | |
Creating a view on a structured array so it can be used in calculations | |
>>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)]) | |
>>> xv = x.view(dtype=np.int8).reshape(-1,2) | |
>>> xv | |
array([[1, 2], | |
[3, 4]], dtype=int8) | |
>>> xv.mean(0) | |
array([ 2., 3.]) | |
Making changes to the view changes the underlying array | |
>>> xv[0,1] = 20 | |
>>> print(x) | |
[(1, 20) (3, 4)] | |
Using a view to convert an array to a recarray: | |
>>> z = x.view(np.recarray) | |
>>> z.a | |
array([1], dtype=int8) | |
Views share data: | |
>>> x[0] = (9, 10) | |
>>> z[0] | |
(9, 10) | |
Views that change the dtype size (bytes per entry) should normally be | |
avoided on arrays defined by slices, transposes, fortran-ordering, etc.: | |
>>> x = np.array([[1,2,3],[4,5,6]], dtype=np.int16) | |
>>> y = x[:, 0:2] | |
>>> y | |
array([[1, 2], | |
[4, 5]], dtype=int16) | |
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) | |
Traceback (most recent call last): | |
File "<stdin>", line 1, in <module> | |
ValueError: new type not compatible with array. | |
>>> z = y.copy() | |
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) | |
array([[(1, 2)], | |
[(4, 5)]], dtype=[('width', '<i2'), ('length', '<i2')]) | |
""")) | |
############################################################################## | |
# | |
# umath functions | |
# | |
############################################################################## | |
add_newdoc('numpy.core.umath', 'frompyfunc', | |
""" | |
frompyfunc(func, nin, nout) | |
Takes an arbitrary Python function and returns a NumPy ufunc. | |
Can be used, for example, to add broadcasting to a built-in Python | |
function (see Examples section). | |
Parameters | |
---------- | |
func : Python function object | |
An arbitrary Python function. | |
nin : int | |
The number of input arguments. | |
nout : int | |
The number of objects returned by `func`. | |
Returns | |
------- | |
out : ufunc | |
Returns a NumPy universal function (``ufunc``) object. | |
See Also | |
-------- | |
vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy | |
Notes | |
----- | |
The returned ufunc always returns PyObject arrays. | |
Examples | |
-------- | |
Use frompyfunc to add broadcasting to the Python function ``oct``: | |
>>> oct_array = np.frompyfunc(oct, 1, 1) | |
>>> oct_array(np.array((10, 30, 100))) | |
array([012, 036, 0144], dtype=object) | |
>>> np.array((oct(10), oct(30), oct(100))) # for comparison | |
array(['012', '036', '0144'], | |
dtype='|S4') | |
""") | |
add_newdoc('numpy.core.umath', 'geterrobj', | |
""" | |
geterrobj() | |
Return the current object that defines floating-point error handling. | |
The error object contains all information that defines the error handling | |
behavior in NumPy. `geterrobj` is used internally by the other | |
functions that get and set error handling behavior (`geterr`, `seterr`, | |
`geterrcall`, `seterrcall`). | |
Returns | |
------- | |
errobj : list | |
The error object, a list containing three elements: | |
[internal numpy buffer size, error mask, error callback function]. | |
The error mask is a single integer that holds the treatment information | |
on all four floating point errors. The information for each error type | |
is contained in three bits of the integer. If we print it in base 8, we | |
can see what treatment is set for "invalid", "under", "over", and | |
"divide" (in that order). The printed string can be interpreted with | |
* 0 : 'ignore' | |
* 1 : 'warn' | |
* 2 : 'raise' | |
* 3 : 'call' | |
* 4 : 'print' | |
* 5 : 'log' | |
See Also | |
-------- | |
seterrobj, seterr, geterr, seterrcall, geterrcall | |
getbufsize, setbufsize | |
Notes | |
----- | |
For complete documentation of the types of floating-point exceptions and | |
treatment options, see `seterr`. | |
Examples | |
-------- | |
>>> np.geterrobj() # first get the defaults | |
[10000, 0, None] | |
>>> def err_handler(type, flag): | |
... print("Floating point error (%s), with flag %s" % (type, flag)) | |
... | |
>>> old_bufsize = np.setbufsize(20000) | |
>>> old_err = np.seterr(divide='raise') | |
>>> old_handler = np.seterrcall(err_handler) | |
>>> np.geterrobj() | |
[20000, 2, <function err_handler at 0x91dcaac>] | |
>>> old_err = np.seterr(all='ignore') | |
>>> np.base_repr(np.geterrobj()[1], 8) | |
'0' | |
>>> old_err = np.seterr(divide='warn', over='log', under='call', | |
invalid='print') | |
>>> np.base_repr(np.geterrobj()[1], 8) | |
'4351' | |
""") | |
add_newdoc('numpy.core.umath', 'seterrobj', | |
""" | |
seterrobj(errobj) | |
Set the object that defines floating-point error handling. | |
The error object contains all information that defines the error handling | |
behavior in NumPy. `seterrobj` is used internally by the other | |
functions that set error handling behavior (`seterr`, `seterrcall`). | |
Parameters | |
---------- | |
errobj : list | |
The error object, a list containing three elements: | |
[internal numpy buffer size, error mask, error callback function]. | |
The error mask is a single integer that holds the treatment information | |
on all four floating point errors. The information for each error type | |
is contained in three bits of the integer. If we print it in base 8, we | |
can see what treatment is set for "invalid", "under", "over", and | |
"divide" (in that order). The printed string can be interpreted with | |
* 0 : 'ignore' | |
* 1 : 'warn' | |
* 2 : 'raise' | |
* 3 : 'call' | |
* 4 : 'print' | |
* 5 : 'log' | |
See Also | |
-------- | |
geterrobj, seterr, geterr, seterrcall, geterrcall | |
getbufsize, setbufsize | |
Notes | |
----- | |
For complete documentation of the types of floating-point exceptions and | |
treatment options, see `seterr`. | |
Examples | |
-------- | |
>>> old_errobj = np.geterrobj() # first get the defaults | |
>>> old_errobj | |
[10000, 0, None] | |
>>> def err_handler(type, flag): | |
... print("Floating point error (%s), with flag %s" % (type, flag)) | |
... | |
>>> new_errobj = [20000, 12, err_handler] | |
>>> np.seterrobj(new_errobj) | |
>>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn') | |
'14' | |
>>> np.geterr() | |
{'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'} | |
>>> np.geterrcall() is err_handler | |
True | |
""") | |
############################################################################## | |
# | |
# compiled_base functions | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'add_docstring', | |
""" | |
add_docstring(obj, docstring) | |
Add a docstring to a built-in obj if possible. | |
If the obj already has a docstring raise a RuntimeError | |
If this routine does not know how to add a docstring to the object | |
raise a TypeError | |
""") | |
add_newdoc('numpy.core.umath', '_add_newdoc_ufunc', | |
""" | |
add_ufunc_docstring(ufunc, new_docstring) | |
Replace the docstring for a ufunc with new_docstring. | |
This method will only work if the current docstring for | |
the ufunc is NULL. (At the C level, i.e. when ufunc->doc is NULL.) | |
Parameters | |
---------- | |
ufunc : numpy.ufunc | |
A ufunc whose current doc is NULL. | |
new_docstring : string | |
The new docstring for the ufunc. | |
Notes | |
----- | |
This method allocates memory for new_docstring on | |
the heap. Technically this creates a mempory leak, since this | |
memory will not be reclaimed until the end of the program | |
even if the ufunc itself is removed. However this will only | |
be a problem if the user is repeatedly creating ufuncs with | |
no documentation, adding documentation via add_newdoc_ufunc, | |
and then throwing away the ufunc. | |
""") | |
add_newdoc('numpy.core.multiarray', 'packbits', | |
""" | |
packbits(myarray, axis=None) | |
Packs the elements of a binary-valued array into bits in a uint8 array. | |
The result is padded to full bytes by inserting zero bits at the end. | |
Parameters | |
---------- | |
myarray : array_like | |
An array of integers or booleans whose elements should be packed to | |
bits. | |
axis : int, optional | |
The dimension over which bit-packing is done. | |
``None`` implies packing the flattened array. | |
Returns | |
------- | |
packed : ndarray | |
Array of type uint8 whose elements represent bits corresponding to the | |
logical (0 or nonzero) value of the input elements. The shape of | |
`packed` has the same number of dimensions as the input (unless `axis` | |
is None, in which case the output is 1-D). | |
See Also | |
-------- | |
unpackbits: Unpacks elements of a uint8 array into a binary-valued output | |
array. | |
Examples | |
-------- | |
>>> a = np.array([[[1,0,1], | |
... [0,1,0]], | |
... [[1,1,0], | |
... [0,0,1]]]) | |
>>> b = np.packbits(a, axis=-1) | |
>>> b | |
array([[[160],[64]],[[192],[32]]], dtype=uint8) | |
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, | |
and 32 = 0010 0000. | |
""") | |
add_newdoc('numpy.core.multiarray', 'unpackbits', | |
""" | |
unpackbits(myarray, axis=None) | |
Unpacks elements of a uint8 array into a binary-valued output array. | |
Each element of `myarray` represents a bit-field that should be unpacked | |
into a binary-valued output array. The shape of the output array is either | |
1-D (if `axis` is None) or the same shape as the input array with unpacking | |
done along the axis specified. | |
Parameters | |
---------- | |
myarray : ndarray, uint8 type | |
Input array. | |
axis : int, optional | |
The dimension over which bit-unpacking is done. | |
``None`` implies unpacking the flattened array. | |
Returns | |
------- | |
unpacked : ndarray, uint8 type | |
The elements are binary-valued (0 or 1). | |
See Also | |
-------- | |
packbits : Packs the elements of a binary-valued array into bits in a uint8 | |
array. | |
Examples | |
-------- | |
>>> a = np.array([[2], [7], [23]], dtype=np.uint8) | |
>>> a | |
array([[ 2], | |
[ 7], | |
[23]], dtype=uint8) | |
>>> b = np.unpackbits(a, axis=1) | |
>>> b | |
array([[0, 0, 0, 0, 0, 0, 1, 0], | |
[0, 0, 0, 0, 0, 1, 1, 1], | |
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8) | |
""") | |
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g', | |
""" | |
format_float_OSprintf_g(val, precision) | |
Print a floating point scalar using the system's printf function, | |
equivalent to: | |
printf("%.*g", precision, val); | |
for half/float/double, or replacing 'g' by 'Lg' for longdouble. This | |
method is designed to help cross-validate the format_float_* methods. | |
Parameters | |
---------- | |
val : python float or numpy floating scalar | |
Value to format. | |
precision : non-negative integer, optional | |
Precision given to printf. | |
Returns | |
------- | |
rep : string | |
The string representation of the floating point value | |
See Also | |
-------- | |
format_float_scientific | |
format_float_positional | |
""") | |
############################################################################## | |
# | |
# Documentation for ufunc attributes and methods | |
# | |
############################################################################## | |
############################################################################## | |
# | |
# ufunc object | |
# | |
############################################################################## | |
add_newdoc('numpy.core', 'ufunc', | |
""" | |
Functions that operate element by element on whole arrays. | |
To see the documentation for a specific ufunc, use `info`. For | |
example, ``np.info(np.sin)``. Because ufuncs are written in C | |
(for speed) and linked into Python with NumPy's ufunc facility, | |
Python's help() function finds this page whenever help() is called | |
on a ufunc. | |
A detailed explanation of ufuncs can be found in the docs for :ref:`ufuncs`. | |
Calling ufuncs: | |
=============== | |
op(*x[, out], where=True, **kwargs) | |
Apply `op` to the arguments `*x` elementwise, broadcasting the arguments. | |
The broadcasting rules are: | |
* Dimensions of length 1 may be prepended to either array. | |
* Arrays may be repeated along dimensions of length 1. | |
Parameters | |
---------- | |
*x : array_like | |
Input arrays. | |
out : ndarray, None, or tuple of ndarray and None, optional | |
Alternate array object(s) in which to put the result; if provided, it | |
must have a shape that the inputs broadcast to. A tuple of arrays | |
(possible only as a keyword argument) must have length equal to the | |
number of outputs; use `None` for uninitialized outputs to be | |
allocated by the ufunc. | |
where : array_like, optional | |
Values of True indicate to calculate the ufunc at that position, values | |
of False indicate to leave the value in the output alone. Note that if | |
an uninitialized return array is created via the default ``out=None``, | |
then the elements where the values are False will remain uninitialized. | |
**kwargs | |
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`. | |
Returns | |
------- | |
r : ndarray or tuple of ndarray | |
`r` will have the shape that the arrays in `x` broadcast to; if `out` is | |
provided, it will be returned. If not, `r` will be allocated and | |
may contain uninitialized values. If the function has more than one | |
output, then the result will be a tuple of arrays. | |
""") | |
############################################################################## | |
# | |
# ufunc attributes | |
# | |
############################################################################## | |
add_newdoc('numpy.core', 'ufunc', ('identity', | |
""" | |
The identity value. | |
Data attribute containing the identity element for the ufunc, if it has one. | |
If it does not, the attribute value is None. | |
Examples | |
-------- | |
>>> np.add.identity | |
0 | |
>>> np.multiply.identity | |
1 | |
>>> np.power.identity | |
1 | |
>>> print(np.exp.identity) | |
None | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('nargs', | |
""" | |
The number of arguments. | |
Data attribute containing the number of arguments the ufunc takes, including | |
optional ones. | |
Notes | |
----- | |
Typically this value will be one more than what you might expect because all | |
ufuncs take the optional "out" argument. | |
Examples | |
-------- | |
>>> np.add.nargs | |
3 | |
>>> np.multiply.nargs | |
3 | |
>>> np.power.nargs | |
3 | |
>>> np.exp.nargs | |
2 | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('nin', | |
""" | |
The number of inputs. | |
Data attribute containing the number of arguments the ufunc treats as input. | |
Examples | |
-------- | |
>>> np.add.nin | |
2 | |
>>> np.multiply.nin | |
2 | |
>>> np.power.nin | |
2 | |
>>> np.exp.nin | |
1 | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('nout', | |
""" | |
The number of outputs. | |
Data attribute containing the number of arguments the ufunc treats as output. | |
Notes | |
----- | |
Since all ufuncs can take output arguments, this will always be (at least) 1. | |
Examples | |
-------- | |
>>> np.add.nout | |
1 | |
>>> np.multiply.nout | |
1 | |
>>> np.power.nout | |
1 | |
>>> np.exp.nout | |
1 | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('ntypes', | |
""" | |
The number of types. | |
The number of numerical NumPy types - of which there are 18 total - on which | |
the ufunc can operate. | |
See Also | |
-------- | |
numpy.ufunc.types | |
Examples | |
-------- | |
>>> np.add.ntypes | |
18 | |
>>> np.multiply.ntypes | |
18 | |
>>> np.power.ntypes | |
17 | |
>>> np.exp.ntypes | |
7 | |
>>> np.remainder.ntypes | |
14 | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('types', | |
""" | |
Returns a list with types grouped input->output. | |
Data attribute listing the data-type "Domain-Range" groupings the ufunc can | |
deliver. The data-types are given using the character codes. | |
See Also | |
-------- | |
numpy.ufunc.ntypes | |
Examples | |
-------- | |
>>> np.add.types | |
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', | |
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', | |
'GG->G', 'OO->O'] | |
>>> np.multiply.types | |
['??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', | |
'LL->L', 'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', | |
'GG->G', 'OO->O'] | |
>>> np.power.types | |
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', | |
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'FF->F', 'DD->D', 'GG->G', | |
'OO->O'] | |
>>> np.exp.types | |
['f->f', 'd->d', 'g->g', 'F->F', 'D->D', 'G->G', 'O->O'] | |
>>> np.remainder.types | |
['bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l', 'LL->L', | |
'qq->q', 'QQ->Q', 'ff->f', 'dd->d', 'gg->g', 'OO->O'] | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('signature', | |
""" | |
Definition of the core elements a generalized ufunc operates on. | |
The signature determines how the dimensions of each input/output array | |
are split into core and loop dimensions: | |
1. Each dimension in the signature is matched to a dimension of the | |
corresponding passed-in array, starting from the end of the shape tuple. | |
2. Core dimensions assigned to the same label in the signature must have | |
exactly matching sizes, no broadcasting is performed. | |
3. The core dimensions are removed from all inputs and the remaining | |
dimensions are broadcast together, defining the loop dimensions. | |
Notes | |
----- | |
Generalized ufuncs are used internally in many linalg functions, and in | |
the testing suite; the examples below are taken from these. | |
For ufuncs that operate on scalars, the signature is `None`, which is | |
equivalent to '()' for every argument. | |
Examples | |
-------- | |
>>> np.core.umath_tests.matrix_multiply.signature | |
'(m,n),(n,p)->(m,p)' | |
>>> np.linalg._umath_linalg.det.signature | |
'(m,m)->()' | |
>>> np.add.signature is None | |
True # equivalent to '(),()->()' | |
""")) | |
############################################################################## | |
# | |
# ufunc methods | |
# | |
############################################################################## | |
add_newdoc('numpy.core', 'ufunc', ('reduce', | |
""" | |
reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial) | |
Reduces `a`'s dimension by one, by applying ufunc along one axis. | |
Let :math:`a.shape = (N_0, ..., N_i, ..., N_{M-1})`. Then | |
:math:`ufunc.reduce(a, axis=i)[k_0, ..,k_{i-1}, k_{i+1}, .., k_{M-1}]` = | |
the result of iterating `j` over :math:`range(N_i)`, cumulatively applying | |
ufunc to each :math:`a[k_0, ..,k_{i-1}, j, k_{i+1}, .., k_{M-1}]`. | |
For a one-dimensional array, reduce produces results equivalent to: | |
:: | |
r = op.identity # op = ufunc | |
for i in range(len(A)): | |
r = op(r, A[i]) | |
return r | |
For example, add.reduce() is equivalent to sum(). | |
Parameters | |
---------- | |
a : array_like | |
The array to act on. | |
axis : None or int or tuple of ints, optional | |
Axis or axes along which a reduction is performed. | |
The default (`axis` = 0) is perform a reduction over the first | |
dimension of the input array. `axis` may be negative, in | |
which case it counts from the last to the first axis. | |
.. versionadded:: 1.7.0 | |
If this is `None`, a reduction is performed over all the axes. | |
If this is a tuple of ints, a reduction is performed on multiple | |
axes, instead of a single axis or all the axes as before. | |
For operations which are either not commutative or not associative, | |
doing a reduction over multiple axes is not well-defined. The | |
ufuncs do not currently raise an exception in this case, but will | |
likely do so in the future. | |
dtype : data-type code, optional | |
The type used to represent the intermediate results. Defaults | |
to the data-type of the output array if this is provided, or | |
the data-type of the input array if no output array is provided. | |
out : ndarray, None, or tuple of ndarray and None, optional | |
A location into which the result is stored. If not provided or `None`, | |
a freshly-allocated array is returned. For consistency with | |
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a | |
1-element tuple. | |
.. versionchanged:: 1.13.0 | |
Tuples are allowed for keyword argument. | |
keepdims : bool, optional | |
If this is set to True, the axes which are reduced are left | |
in the result as dimensions with size one. With this option, | |
the result will broadcast correctly against the original `arr`. | |
.. versionadded:: 1.7.0 | |
initial : scalar, optional | |
The value with which to start the reduction. | |
If the ufunc has no identity or the dtype is object, this defaults | |
to None - otherwise it defaults to ufunc.identity. | |
If ``None`` is given, the first element of the reduction is used, | |
and an error is thrown if the reduction is empty. | |
.. versionadded:: 1.15.0 | |
Returns | |
------- | |
r : ndarray | |
The reduced array. If `out` was supplied, `r` is a reference to it. | |
Examples | |
-------- | |
>>> np.multiply.reduce([2,3,5]) | |
30 | |
A multi-dimensional array example: | |
>>> X = np.arange(8).reshape((2,2,2)) | |
>>> X | |
array([[[0, 1], | |
[2, 3]], | |
[[4, 5], | |
[6, 7]]]) | |
>>> np.add.reduce(X, 0) | |
array([[ 4, 6], | |
[ 8, 10]]) | |
>>> np.add.reduce(X) # confirm: default axis value is 0 | |
array([[ 4, 6], | |
[ 8, 10]]) | |
>>> np.add.reduce(X, 1) | |
array([[ 2, 4], | |
[10, 12]]) | |
>>> np.add.reduce(X, 2) | |
array([[ 1, 5], | |
[ 9, 13]]) | |
You can use the ``initial`` keyword argument to initialize the reduction with a | |
different value. | |
>>> np.add.reduce([10], initial=5) | |
15 | |
>>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10) | |
array([14., 14.]) | |
Allows reductions of empty arrays where they would normally fail, i.e. | |
for ufuncs without an identity. | |
>>> np.minimum.reduce([], initial=np.inf) | |
inf | |
>>> np.minimum.reduce([]) | |
Traceback (most recent call last): | |
... | |
ValueError: zero-size array to reduction operation minimum which has no identity | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('accumulate', | |
""" | |
accumulate(array, axis=0, dtype=None, out=None) | |
Accumulate the result of applying the operator to all elements. | |
For a one-dimensional array, accumulate produces results equivalent to:: | |
r = np.empty(len(A)) | |
t = op.identity # op = the ufunc being applied to A's elements | |
for i in range(len(A)): | |
t = op(t, A[i]) | |
r[i] = t | |
return r | |
For example, add.accumulate() is equivalent to np.cumsum(). | |
For a multi-dimensional array, accumulate is applied along only one | |
axis (axis zero by default; see Examples below) so repeated use is | |
necessary if one wants to accumulate over multiple axes. | |
Parameters | |
---------- | |
array : array_like | |
The array to act on. | |
axis : int, optional | |
The axis along which to apply the accumulation; default is zero. | |
dtype : data-type code, optional | |
The data-type used to represent the intermediate results. Defaults | |
to the data-type of the output array if such is provided, or the | |
the data-type of the input array if no output array is provided. | |
out : ndarray, None, or tuple of ndarray and None, optional | |
A location into which the result is stored. If not provided or `None`, | |
a freshly-allocated array is returned. For consistency with | |
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a | |
1-element tuple. | |
.. versionchanged:: 1.13.0 | |
Tuples are allowed for keyword argument. | |
Returns | |
------- | |
r : ndarray | |
The accumulated values. If `out` was supplied, `r` is a reference to | |
`out`. | |
Examples | |
-------- | |
1-D array examples: | |
>>> np.add.accumulate([2, 3, 5]) | |
array([ 2, 5, 10]) | |
>>> np.multiply.accumulate([2, 3, 5]) | |
array([ 2, 6, 30]) | |
2-D array examples: | |
>>> I = np.eye(2) | |
>>> I | |
array([[ 1., 0.], | |
[ 0., 1.]]) | |
Accumulate along axis 0 (rows), down columns: | |
>>> np.add.accumulate(I, 0) | |
array([[ 1., 0.], | |
[ 1., 1.]]) | |
>>> np.add.accumulate(I) # no axis specified = axis zero | |
array([[ 1., 0.], | |
[ 1., 1.]]) | |
Accumulate along axis 1 (columns), through rows: | |
>>> np.add.accumulate(I, 1) | |
array([[ 1., 1.], | |
[ 0., 1.]]) | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('reduceat', | |
""" | |
reduceat(a, indices, axis=0, dtype=None, out=None) | |
Performs a (local) reduce with specified slices over a single axis. | |
For i in ``range(len(indices))``, `reduceat` computes | |
``ufunc.reduce(a[indices[i]:indices[i+1]])``, which becomes the i-th | |
generalized "row" parallel to `axis` in the final result (i.e., in a | |
2-D array, for example, if `axis = 0`, it becomes the i-th row, but if | |
`axis = 1`, it becomes the i-th column). There are three exceptions to this: | |
* when ``i = len(indices) - 1`` (so for the last index), | |
``indices[i+1] = a.shape[axis]``. | |
* if ``indices[i] >= indices[i + 1]``, the i-th generalized "row" is | |
simply ``a[indices[i]]``. | |
* if ``indices[i] >= len(a)`` or ``indices[i] < 0``, an error is raised. | |
The shape of the output depends on the size of `indices`, and may be | |
larger than `a` (this happens if ``len(indices) > a.shape[axis]``). | |
Parameters | |
---------- | |
a : array_like | |
The array to act on. | |
indices : array_like | |
Paired indices, comma separated (not colon), specifying slices to | |
reduce. | |
axis : int, optional | |
The axis along which to apply the reduceat. | |
dtype : data-type code, optional | |
The type used to represent the intermediate results. Defaults | |
to the data type of the output array if this is provided, or | |
the data type of the input array if no output array is provided. | |
out : ndarray, None, or tuple of ndarray and None, optional | |
A location into which the result is stored. If not provided or `None`, | |
a freshly-allocated array is returned. For consistency with | |
:ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a | |
1-element tuple. | |
.. versionchanged:: 1.13.0 | |
Tuples are allowed for keyword argument. | |
Returns | |
------- | |
r : ndarray | |
The reduced values. If `out` was supplied, `r` is a reference to | |
`out`. | |
Notes | |
----- | |
A descriptive example: | |
If `a` is 1-D, the function `ufunc.accumulate(a)` is the same as | |
``ufunc.reduceat(a, indices)[::2]`` where `indices` is | |
``range(len(array) - 1)`` with a zero placed | |
in every other element: | |
``indices = zeros(2 * len(a) - 1)``, ``indices[1::2] = range(1, len(a))``. | |
Don't be fooled by this attribute's name: `reduceat(a)` is not | |
necessarily smaller than `a`. | |
Examples | |
-------- | |
To take the running sum of four successive values: | |
>>> np.add.reduceat(np.arange(8),[0,4, 1,5, 2,6, 3,7])[::2] | |
array([ 6, 10, 14, 18]) | |
A 2-D example: | |
>>> x = np.linspace(0, 15, 16).reshape(4,4) | |
>>> x | |
array([[ 0., 1., 2., 3.], | |
[ 4., 5., 6., 7.], | |
[ 8., 9., 10., 11.], | |
[ 12., 13., 14., 15.]]) | |
:: | |
# reduce such that the result has the following five rows: | |
# [row1 + row2 + row3] | |
# [row4] | |
# [row2] | |
# [row3] | |
# [row1 + row2 + row3 + row4] | |
>>> np.add.reduceat(x, [0, 3, 1, 2, 0]) | |
array([[ 12., 15., 18., 21.], | |
[ 12., 13., 14., 15.], | |
[ 4., 5., 6., 7.], | |
[ 8., 9., 10., 11.], | |
[ 24., 28., 32., 36.]]) | |
:: | |
# reduce such that result has the following two columns: | |
# [col1 * col2 * col3, col4] | |
>>> np.multiply.reduceat(x, [0, 3], 1) | |
array([[ 0., 3.], | |
[ 120., 7.], | |
[ 720., 11.], | |
[ 2184., 15.]]) | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('outer', | |
""" | |
outer(A, B, **kwargs) | |
Apply the ufunc `op` to all pairs (a, b) with a in `A` and b in `B`. | |
Let ``M = A.ndim``, ``N = B.ndim``. Then the result, `C`, of | |
``op.outer(A, B)`` is an array of dimension M + N such that: | |
.. math:: C[i_0, ..., i_{M-1}, j_0, ..., j_{N-1}] = | |
op(A[i_0, ..., i_{M-1}], B[j_0, ..., j_{N-1}]) | |
For `A` and `B` one-dimensional, this is equivalent to:: | |
r = empty(len(A),len(B)) | |
for i in range(len(A)): | |
for j in range(len(B)): | |
r[i,j] = op(A[i], B[j]) # op = ufunc in question | |
Parameters | |
---------- | |
A : array_like | |
First array | |
B : array_like | |
Second array | |
kwargs : any | |
Arguments to pass on to the ufunc. Typically `dtype` or `out`. | |
Returns | |
------- | |
r : ndarray | |
Output array | |
See Also | |
-------- | |
numpy.outer | |
Examples | |
-------- | |
>>> np.multiply.outer([1, 2, 3], [4, 5, 6]) | |
array([[ 4, 5, 6], | |
[ 8, 10, 12], | |
[12, 15, 18]]) | |
A multi-dimensional example: | |
>>> A = np.array([[1, 2, 3], [4, 5, 6]]) | |
>>> A.shape | |
(2, 3) | |
>>> B = np.array([[1, 2, 3, 4]]) | |
>>> B.shape | |
(1, 4) | |
>>> C = np.multiply.outer(A, B) | |
>>> C.shape; C | |
(2, 3, 1, 4) | |
array([[[[ 1, 2, 3, 4]], | |
[[ 2, 4, 6, 8]], | |
[[ 3, 6, 9, 12]]], | |
[[[ 4, 8, 12, 16]], | |
[[ 5, 10, 15, 20]], | |
[[ 6, 12, 18, 24]]]]) | |
""")) | |
add_newdoc('numpy.core', 'ufunc', ('at', | |
""" | |
at(a, indices, b=None) | |
Performs unbuffered in place operation on operand 'a' for elements | |
specified by 'indices'. For addition ufunc, this method is equivalent to | |
``a[indices] += b``, except that results are accumulated for elements that | |
are indexed more than once. For example, ``a[[0,0]] += 1`` will only | |
increment the first element once because of buffering, whereas | |
``add.at(a, [0,0], 1)`` will increment the first element twice. | |
.. versionadded:: 1.8.0 | |
Parameters | |
---------- | |
a : array_like | |
The array to perform in place operation on. | |
indices : array_like or tuple | |
Array like index object or slice object for indexing into first | |
operand. If first operand has multiple dimensions, indices can be a | |
tuple of array like index objects or slice objects. | |
b : array_like | |
Second operand for ufuncs requiring two operands. Operand must be | |
broadcastable over first operand after indexing or slicing. | |
Examples | |
-------- | |
Set items 0 and 1 to their negative values: | |
>>> a = np.array([1, 2, 3, 4]) | |
>>> np.negative.at(a, [0, 1]) | |
>>> print(a) | |
array([-1, -2, 3, 4]) | |
Increment items 0 and 1, and increment item 2 twice: | |
>>> a = np.array([1, 2, 3, 4]) | |
>>> np.add.at(a, [0, 1, 2, 2], 1) | |
>>> print(a) | |
array([2, 3, 5, 4]) | |
Add items 0 and 1 in first array to second array, | |
and store results in first array: | |
>>> a = np.array([1, 2, 3, 4]) | |
>>> b = np.array([1, 2]) | |
>>> np.add.at(a, [0, 1], b) | |
>>> print(a) | |
array([2, 4, 3, 4]) | |
""")) | |
############################################################################## | |
# | |
# Documentation for dtype attributes and methods | |
# | |
############################################################################## | |
############################################################################## | |
# | |
# dtype object | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'dtype', | |
""" | |
dtype(obj, align=False, copy=False) | |
Create a data type object. | |
A numpy array is homogeneous, and contains elements described by a | |
dtype object. A dtype object can be constructed from different | |
combinations of fundamental numeric types. | |
Parameters | |
---------- | |
obj | |
Object to be converted to a data type object. | |
align : bool, optional | |
Add padding to the fields to match what a C compiler would output | |
for a similar C-struct. Can be ``True`` only if `obj` is a dictionary | |
or a comma-separated string. If a struct dtype is being created, | |
this also sets a sticky alignment flag ``isalignedstruct``. | |
copy : bool, optional | |
Make a new copy of the data-type object. If ``False``, the result | |
may just be a reference to a built-in data-type object. | |
See also | |
-------- | |
result_type | |
Examples | |
-------- | |
Using array-scalar type: | |
>>> np.dtype(np.int16) | |
dtype('int16') | |
Structured type, one field name 'f1', containing int16: | |
>>> np.dtype([('f1', np.int16)]) | |
dtype([('f1', '<i2')]) | |
Structured type, one field named 'f1', in itself containing a structured | |
type with one field: | |
>>> np.dtype([('f1', [('f1', np.int16)])]) | |
dtype([('f1', [('f1', '<i2')])]) | |
Structured type, two fields: the first field contains an unsigned int, the | |
second an int32: | |
>>> np.dtype([('f1', np.uint), ('f2', np.int32)]) | |
dtype([('f1', '<u4'), ('f2', '<i4')]) | |
Using array-protocol type strings: | |
>>> np.dtype([('a','f8'),('b','S10')]) | |
dtype([('a', '<f8'), ('b', '|S10')]) | |
Using comma-separated field formats. The shape is (2,3): | |
>>> np.dtype("i4, (2,3)f8") | |
dtype([('f0', '<i4'), ('f1', '<f8', (2, 3))]) | |
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` | |
is a flexible type, here of size 10: | |
>>> np.dtype([('hello',(int,3)),('world',np.void,10)]) | |
dtype([('hello', '<i4', 3), ('world', '|V10')]) | |
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are | |
the offsets in bytes: | |
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) | |
dtype(('<i2', [('x', '|i1'), ('y', '|i1')])) | |
Using dictionaries. Two fields named 'gender' and 'age': | |
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) | |
dtype([('gender', '|S1'), ('age', '|u1')]) | |
Offsets in bytes, here 0 and 25: | |
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) | |
dtype([('surname', '|S25'), ('age', '|u1')]) | |
""") | |
############################################################################## | |
# | |
# dtype attributes | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'dtype', ('alignment', | |
""" | |
The required alignment (bytes) of this data-type according to the compiler. | |
More information is available in the C-API section of the manual. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder', | |
""" | |
A character indicating the byte-order of this data-type object. | |
One of: | |
=== ============== | |
'=' native | |
'<' little-endian | |
'>' big-endian | |
'|' not applicable | |
=== ============== | |
All built-in data-type objects have byteorder either '=' or '|'. | |
Examples | |
-------- | |
>>> dt = np.dtype('i2') | |
>>> dt.byteorder | |
'=' | |
>>> # endian is not relevant for 8 bit numbers | |
>>> np.dtype('i1').byteorder | |
'|' | |
>>> # or ASCII strings | |
>>> np.dtype('S2').byteorder | |
'|' | |
>>> # Even if specific code is given, and it is native | |
>>> # '=' is the byteorder | |
>>> import sys | |
>>> sys_is_le = sys.byteorder == 'little' | |
>>> native_code = sys_is_le and '<' or '>' | |
>>> swapped_code = sys_is_le and '>' or '<' | |
>>> dt = np.dtype(native_code + 'i2') | |
>>> dt.byteorder | |
'=' | |
>>> # Swapped code shows up as itself | |
>>> dt = np.dtype(swapped_code + 'i2') | |
>>> dt.byteorder == swapped_code | |
True | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('char', | |
"""A unique character code for each of the 21 different built-in types.""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('descr', | |
""" | |
`__array_interface__` description of the data-type. | |
The format is that required by the 'descr' key in the | |
`__array_interface__` attribute. | |
Warning: This attribute exists specifically for `__array_interface__`, | |
and is not a datatype description compatible with `np.dtype`. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('fields', | |
""" | |
Dictionary of named fields defined for this data type, or ``None``. | |
The dictionary is indexed by keys that are the names of the fields. | |
Each entry in the dictionary is a tuple fully describing the field:: | |
(dtype, offset[, title]) | |
Offset is limited to C int, which is signed and usually 32 bits. | |
If present, the optional title can be any object (if it is a string | |
or unicode then it will also be a key in the fields dictionary, | |
otherwise it's meta-data). Notice also that the first two elements | |
of the tuple can be passed directly as arguments to the ``ndarray.getfield`` | |
and ``ndarray.setfield`` methods. | |
See Also | |
-------- | |
ndarray.getfield, ndarray.setfield | |
Examples | |
-------- | |
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) | |
>>> print(dt.fields) | |
{'grades': (dtype(('float64',(2,))), 16), 'name': (dtype('|S16'), 0)} | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('flags', | |
""" | |
Bit-flags describing how this data type is to be interpreted. | |
Bit-masks are in `numpy.core.multiarray` as the constants | |
`ITEM_HASOBJECT`, `LIST_PICKLE`, `ITEM_IS_POINTER`, `NEEDS_INIT`, | |
`NEEDS_PYAPI`, `USE_GETITEM`, `USE_SETITEM`. A full explanation | |
of these flags is in C-API documentation; they are largely useful | |
for user-defined data-types. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject', | |
""" | |
Boolean indicating whether this dtype contains any reference-counted | |
objects in any fields or sub-dtypes. | |
Recall that what is actually in the ndarray memory representing | |
the Python object is the memory address of that object (a pointer). | |
Special handling may be required, and this attribute is useful for | |
distinguishing data types that may contain arbitrary Python objects | |
and data-types that won't. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin', | |
""" | |
Integer indicating how this dtype relates to the built-in dtypes. | |
Read-only. | |
= ======================================================================== | |
0 if this is a structured array type, with fields | |
1 if this is a dtype compiled into numpy (such as ints, floats etc) | |
2 if the dtype is for a user-defined numpy type | |
A user-defined type uses the numpy C-API machinery to extend | |
numpy to handle a new array type. See | |
:ref:`user.user-defined-data-types` in the NumPy manual. | |
= ======================================================================== | |
Examples | |
-------- | |
>>> dt = np.dtype('i2') | |
>>> dt.isbuiltin | |
1 | |
>>> dt = np.dtype('f8') | |
>>> dt.isbuiltin | |
1 | |
>>> dt = np.dtype([('field1', 'f8')]) | |
>>> dt.isbuiltin | |
0 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('isnative', | |
""" | |
Boolean indicating whether the byte order of this dtype is native | |
to the platform. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct', | |
""" | |
Boolean indicating whether the dtype is a struct which maintains | |
field alignment. This flag is sticky, so when combining multiple | |
structs together, it is preserved and produces new dtypes which | |
are also aligned. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize', | |
""" | |
The element size of this data-type object. | |
For 18 of the 21 types this number is fixed by the data-type. | |
For the flexible data-types, this number can be anything. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('kind', | |
""" | |
A character code (one of 'biufcmMOSUV') identifying the general kind of data. | |
= ====================== | |
b boolean | |
i signed integer | |
u unsigned integer | |
f floating-point | |
c complex floating-point | |
m timedelta | |
M datetime | |
O object | |
S (byte-)string | |
U Unicode | |
V void | |
= ====================== | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('name', | |
""" | |
A bit-width name for this data-type. | |
Un-sized flexible data-type objects do not have this attribute. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('names', | |
""" | |
Ordered list of field names, or ``None`` if there are no fields. | |
The names are ordered according to increasing byte offset. This can be | |
used, for example, to walk through all of the named fields in offset order. | |
Examples | |
-------- | |
>>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))]) | |
>>> dt.names | |
('name', 'grades') | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('num', | |
""" | |
A unique number for each of the 21 different built-in types. | |
These are roughly ordered from least-to-most precision. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('shape', | |
""" | |
Shape tuple of the sub-array if this data type describes a sub-array, | |
and ``()`` otherwise. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim', | |
""" | |
Number of dimensions of the sub-array if this data type describes a | |
sub-array, and ``0`` otherwise. | |
.. versionadded:: 1.13.0 | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('str', | |
"""The array-protocol typestring of this data-type object.""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype', | |
""" | |
Tuple ``(item_dtype, shape)`` if this `dtype` describes a sub-array, and | |
None otherwise. | |
The *shape* is the fixed shape of the sub-array described by this | |
data type, and *item_dtype* the data type of the array. | |
If a field whose dtype object has this attribute is retrieved, | |
then the extra dimensions implied by *shape* are tacked on to | |
the end of the retrieved array. | |
""")) | |
add_newdoc('numpy.core.multiarray', 'dtype', ('type', | |
"""The type object used to instantiate a scalar of this data-type.""")) | |
############################################################################## | |
# | |
# dtype methods | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'dtype', ('newbyteorder', | |
""" | |
newbyteorder(new_order='S') | |
Return a new dtype with a different byte order. | |
Changes are also made in all fields and sub-arrays of the data type. | |
Parameters | |
---------- | |
new_order : string, optional | |
Byte order to force; a value from the byte order specifications | |
below. The default value ('S') results in swapping the current | |
byte order. `new_order` codes can be any of: | |
* 'S' - swap dtype from current to opposite endian | |
* {'<', 'L'} - little endian | |
* {'>', 'B'} - big endian | |
* {'=', 'N'} - native order | |
* {'|', 'I'} - ignore (no change to byte order) | |
The code does a case-insensitive check on the first letter of | |
`new_order` for these alternatives. For example, any of '>' | |
or 'B' or 'b' or 'brian' are valid to specify big-endian. | |
Returns | |
------- | |
new_dtype : dtype | |
New dtype object with the given change to the byte order. | |
Notes | |
----- | |
Changes are also made in all fields and sub-arrays of the data type. | |
Examples | |
-------- | |
>>> import sys | |
>>> sys_is_le = sys.byteorder == 'little' | |
>>> native_code = sys_is_le and '<' or '>' | |
>>> swapped_code = sys_is_le and '>' or '<' | |
>>> native_dt = np.dtype(native_code+'i2') | |
>>> swapped_dt = np.dtype(swapped_code+'i2') | |
>>> native_dt.newbyteorder('S') == swapped_dt | |
True | |
>>> native_dt.newbyteorder() == swapped_dt | |
True | |
>>> native_dt == swapped_dt.newbyteorder('S') | |
True | |
>>> native_dt == swapped_dt.newbyteorder('=') | |
True | |
>>> native_dt == swapped_dt.newbyteorder('N') | |
True | |
>>> native_dt == native_dt.newbyteorder('|') | |
True | |
>>> np.dtype('<i2') == native_dt.newbyteorder('<') | |
True | |
>>> np.dtype('<i2') == native_dt.newbyteorder('L') | |
True | |
>>> np.dtype('>i2') == native_dt.newbyteorder('>') | |
True | |
>>> np.dtype('>i2') == native_dt.newbyteorder('B') | |
True | |
""")) | |
############################################################################## | |
# | |
# Datetime-related Methods | |
# | |
############################################################################## | |
add_newdoc('numpy.core.multiarray', 'busdaycalendar', | |
""" | |
busdaycalendar(weekmask='1111100', holidays=None) | |
A business day calendar object that efficiently stores information | |
defining valid days for the busday family of functions. | |
The default valid days are Monday through Friday ("business days"). | |
A busdaycalendar object can be specified with any set of weekly | |
valid days, plus an optional "holiday" dates that always will be invalid. | |
Once a busdaycalendar object is created, the weekmask and holidays | |
cannot be modified. | |
.. versionadded:: 1.7.0 | |
Parameters | |
---------- | |
weekmask : str or array_like of bool, optional | |
A seven-element array indicating which of Monday through Sunday are | |
valid days. May be specified as a length-seven list or array, like | |
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string | |
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for | |
weekdays, optionally separated by white space. Valid abbreviations | |
are: Mon Tue Wed Thu Fri Sat Sun | |
holidays : array_like of datetime64[D], optional | |
An array of dates to consider as invalid dates, no matter which | |
weekday they fall upon. Holiday dates may be specified in any | |
order, and NaT (not-a-time) dates are ignored. This list is | |
saved in a normalized form that is suited for fast calculations | |
of valid days. | |
Returns | |
------- | |
out : busdaycalendar | |
A business day calendar object containing the specified | |
weekmask and holidays values. | |
See Also | |
-------- | |
is_busday : Returns a boolean array indicating valid days. | |
busday_offset : Applies an offset counted in valid days. | |
busday_count : Counts how many valid days are in a half-open date range. | |
Attributes | |
---------- | |
Note: once a busdaycalendar object is created, you cannot modify the | |
weekmask or holidays. The attributes return copies of internal data. | |
weekmask : (copy) seven-element array of bool | |
holidays : (copy) sorted array of datetime64[D] | |
Examples | |
-------- | |
>>> # Some important days in July | |
... bdd = np.busdaycalendar( | |
... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) | |
>>> # Default is Monday to Friday weekdays | |
... bdd.weekmask | |
array([ True, True, True, True, True, False, False], dtype='bool') | |
>>> # Any holidays already on the weekend are removed | |
... bdd.holidays | |
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') | |
""") | |
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask', | |
"""A copy of the seven-element boolean mask indicating valid days.""")) | |
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays', | |
"""A copy of the holiday array indicating additional invalid days.""")) | |
add_newdoc('numpy.core.multiarray', 'normalize_axis_index', | |
""" | |
normalize_axis_index(axis, ndim, msg_prefix=None) | |
Normalizes an axis index, `axis`, such that is a valid positive index into | |
the shape of array with `ndim` dimensions. Raises an AxisError with an | |
appropriate message if this is not possible. | |
Used internally by all axis-checking logic. | |
.. versionadded:: 1.13.0 | |
Parameters | |
---------- | |
axis : int | |
The un-normalized index of the axis. Can be negative | |
ndim : int | |
The number of dimensions of the array that `axis` should be normalized | |
against | |
msg_prefix : str | |
A prefix to put before the message, typically the name of the argument | |
Returns | |
------- | |
normalized_axis : int | |
The normalized axis index, such that `0 <= normalized_axis < ndim` | |
Raises | |
------ | |
AxisError | |
If the axis index is invalid, when `-ndim <= axis < ndim` is false. | |
Examples | |
-------- | |
>>> normalize_axis_index(0, ndim=3) | |
0 | |
>>> normalize_axis_index(1, ndim=3) | |
1 | |
>>> normalize_axis_index(-1, ndim=3) | |
2 | |
>>> normalize_axis_index(3, ndim=3) | |
Traceback (most recent call last): | |
... | |
AxisError: axis 3 is out of bounds for array of dimension 3 | |
>>> normalize_axis_index(-4, ndim=3, msg_prefix='axes_arg') | |
Traceback (most recent call last): | |
... | |
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3 | |
""") | |
add_newdoc('numpy.core.multiarray', 'datetime_data', | |
""" | |
datetime_data(dtype, /) | |
Get information about the step size of a date or time type. | |
The returned tuple can be passed as the second argument of `numpy.datetime64` and | |
`numpy.timedelta64`. | |
Parameters | |
---------- | |
dtype : dtype | |
The dtype object, which must be a `datetime64` or `timedelta64` type. | |
Returns | |
------- | |
unit : str | |
The :ref:`datetime unit <arrays.dtypes.dateunits>` on which this dtype | |
is based. | |
count : int | |
The number of base units in a step. | |
Examples | |
-------- | |
>>> dt_25s = np.dtype('timedelta64[25s]') | |
>>> np.datetime_data(dt_25s) | |
('s', 25) | |
>>> np.array(10, dt_25s).astype('timedelta64[s]') | |
array(250, dtype='timedelta64[s]') | |
The result can be used to construct a datetime that uses the same units | |
as a timedelta | |
>>> np.datetime64('2010', np.datetime_data(dt_25s)) | |
numpy.datetime64('2010-01-01T00:00:00', '25s') | |
""") | |
############################################################################## | |
# | |
# Documentation for `generic` attributes and methods | |
# | |
############################################################################## | |
add_newdoc('numpy.core.numerictypes', 'generic', | |
""" | |
Base class for numpy scalar types. | |
Class from which most (all?) numpy scalar types are derived. For | |
consistency, exposes the same API as `ndarray`, despite many | |
consequent attributes being either "get-only," or completely irrelevant. | |
This is the class from which it is strongly suggested users should derive | |
custom scalar types. | |
""") | |
# Attributes | |
add_newdoc('numpy.core.numerictypes', 'generic', ('T', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class so as to | |
provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('base', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class so as to | |
a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('data', | |
"""Pointer to start of data.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('dtype', | |
"""Get array data-descriptor.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('flags', | |
"""The integer value of flags.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('flat', | |
"""A 1-D view of the scalar.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('imag', | |
"""The imaginary part of the scalar.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('itemsize', | |
"""The length of one element in bytes.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('nbytes', | |
"""The length of the scalar in bytes.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('ndim', | |
"""The number of array dimensions.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('real', | |
"""The real part of the scalar.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('shape', | |
"""Tuple of array dimensions.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('size', | |
"""The number of elements in the gentype.""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('strides', | |
"""Tuple of bytes steps in each dimension.""")) | |
# Methods | |
add_newdoc('numpy.core.numerictypes', 'generic', ('all', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('any', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('argmax', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('argmin', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('argsort', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('astype', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class so as to | |
provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('choose', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('clip', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('compress', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('copy', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('dump', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('dumps', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('fill', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('flatten', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('getfield', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('item', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('itemset', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('max', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('mean', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('min', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('newbyteorder', | |
""" | |
newbyteorder(new_order='S') | |
Return a new `dtype` with a different byte order. | |
Changes are also made in all fields and sub-arrays of the data type. | |
The `new_order` code can be any from the following: | |
* 'S' - swap dtype from current to opposite endian | |
* {'<', 'L'} - little endian | |
* {'>', 'B'} - big endian | |
* {'=', 'N'} - native order | |
* {'|', 'I'} - ignore (no change to byte order) | |
Parameters | |
---------- | |
new_order : str, optional | |
Byte order to force; a value from the byte order specifications | |
above. The default value ('S') results in swapping the current | |
byte order. The code does a case-insensitive check on the first | |
letter of `new_order` for the alternatives above. For example, | |
any of 'B' or 'b' or 'biggish' are valid to specify big-endian. | |
Returns | |
------- | |
new_dtype : dtype | |
New `dtype` object with the given change to the byte order. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('prod', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('ptp', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('put', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('ravel', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('repeat', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('reshape', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('resize', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('round', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('setfield', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('setflags', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class so as to | |
provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('sort', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('std', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('sum', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('take', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('tofile', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('tolist', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('tostring', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('trace', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('transpose', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('var', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
add_newdoc('numpy.core.numerictypes', 'generic', ('view', | |
""" | |
Not implemented (virtual attribute) | |
Class generic exists solely to derive numpy scalars from, and possesses, | |
albeit unimplemented, all the attributes of the ndarray class | |
so as to provide a uniform API. | |
See Also | |
-------- | |
The corresponding attribute of the derived class of interest. | |
""")) | |
############################################################################## | |
# | |
# Documentation for scalar type abstract base classes in type hierarchy | |
# | |
############################################################################## | |
add_newdoc('numpy.core.numerictypes', 'number', | |
""" | |
Abstract base class of all numeric scalar types. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'integer', | |
""" | |
Abstract base class of all integer scalar types. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'signedinteger', | |
""" | |
Abstract base class of all signed integer scalar types. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'unsignedinteger', | |
""" | |
Abstract base class of all unsigned integer scalar types. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'inexact', | |
""" | |
Abstract base class of all numeric scalar types with a (potentially) | |
inexact representation of the values in its range, such as | |
floating-point numbers. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'floating', | |
""" | |
Abstract base class of all floating-point scalar types. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'complexfloating', | |
""" | |
Abstract base class of all complex number scalar types that are made up of | |
floating-point numbers. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'flexible', | |
""" | |
Abstract base class of all scalar types without predefined length. | |
The actual size of these types depends on the specific `np.dtype` | |
instantiation. | |
""") | |
add_newdoc('numpy.core.numerictypes', 'character', | |
""" | |
Abstract base class of all character string scalar types. | |
""") | |
############################################################################## | |
# | |
# Documentation for concrete scalar classes | |
# | |
############################################################################## | |
def numeric_type_aliases(aliases): | |
def type_aliases_gen(): | |
for alias, doc in aliases: | |
try: | |
alias_type = getattr(_numerictypes, alias) | |
except AttributeError: | |
# The set of aliases that actually exist varies between platforms | |
pass | |
else: | |
yield (alias_type, alias, doc) | |
return list(type_aliases_gen()) | |
possible_aliases = numeric_type_aliases([ | |
('int8', '8-bit signed integer (-128 to 127)'), | |
('int16', '16-bit signed integer (-32768 to 32767)'), | |
('int32', '32-bit signed integer (-2147483648 to 2147483647)'), | |
('int64', '64-bit signed integer (-9223372036854775808 to 9223372036854775807)'), | |
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'), | |
('uint8', '8-bit unsigned integer (0 to 255)'), | |
('uint16', '16-bit unsigned integer (0 to 65535)'), | |
('uint32', '32-bit unsigned integer (0 to 4294967295)'), | |
('uint64', '64-bit unsigned integer (0 to 18446744073709551615)'), | |
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'), | |
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'), | |
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'), | |
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'), | |
('float96', '96-bit extended-precision floating-point number type'), | |
('float128', '128-bit extended-precision floating-point number type'), | |
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'), | |
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'), | |
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'), | |
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'), | |
]) | |
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc): | |
o = getattr(_numerictypes, obj) | |
character_code = dtype(o).char | |
canonical_name_doc = "" if obj == o.__name__ else "Canonical name: ``np.{}``.\n ".format(obj) | |
alias_doc = ''.join("Alias: ``np.{}``.\n ".format(alias) for alias in fixed_aliases) | |
alias_doc += ''.join("Alias *on this platform*: ``np.{}``: {}.\n ".format(alias, doc) | |
for (alias_type, alias, doc) in possible_aliases if alias_type is o) | |
docstring = """ | |
{doc} | |
Character code: ``'{character_code}'``. | |
{canonical_name_doc}{alias_doc} | |
""".format(doc=doc.strip(), character_code=character_code, | |
canonical_name_doc=canonical_name_doc, alias_doc=alias_doc) | |
add_newdoc('numpy.core.numerictypes', obj, docstring) | |
add_newdoc_for_scalar_type('bool_', ['bool8'], | |
""" | |
Boolean type (True or False), stored as a byte. | |
""") | |
add_newdoc_for_scalar_type('byte', [], | |
""" | |
Signed integer type, compatible with C ``char``. | |
""") | |
add_newdoc_for_scalar_type('short', [], | |
""" | |
Signed integer type, compatible with C ``short``. | |
""") | |
add_newdoc_for_scalar_type('intc', [], | |
""" | |
Signed integer type, compatible with C ``int``. | |
""") | |
add_newdoc_for_scalar_type('int_', [], | |
""" | |
Signed integer type, compatible with Python `int` anc C ``long``. | |
""") | |
add_newdoc_for_scalar_type('longlong', [], | |
""" | |
Signed integer type, compatible with C ``long long``. | |
""") | |
add_newdoc_for_scalar_type('ubyte', [], | |
""" | |
Unsigned integer type, compatible with C ``unsigned char``. | |
""") | |
add_newdoc_for_scalar_type('ushort', [], | |
""" | |
Unsigned integer type, compatible with C ``unsigned short``. | |
""") | |
add_newdoc_for_scalar_type('uintc', [], | |
""" | |
Unsigned integer type, compatible with C ``unsigned int``. | |
""") | |
add_newdoc_for_scalar_type('uint', [], | |
""" | |
Unsigned integer type, compatible with C ``unsigned long``. | |
""") | |
add_newdoc_for_scalar_type('ulonglong', [], | |
""" | |
Signed integer type, compatible with C ``unsigned long long``. | |
""") | |
add_newdoc_for_scalar_type('half', [], | |
""" | |
Half-precision floating-point number type. | |
""") | |
add_newdoc_for_scalar_type('single', [], | |
""" | |
Single-precision floating-point number type, compatible with C ``float``. | |
""") | |
add_newdoc_for_scalar_type('double', ['float_'], | |
""" | |
Double-precision floating-point number type, compatible with Python `float` | |
and C ``double``. | |
""") | |
add_newdoc_for_scalar_type('longdouble', ['longfloat'], | |
""" | |
Extended-precision floating-point number type, compatible with C | |
``long double`` but not necessarily with IEEE 754 quadruple-precision. | |
""") | |
add_newdoc_for_scalar_type('csingle', ['singlecomplex'], | |
""" | |
Complex number type composed of two single-precision floating-point | |
numbers. | |
""") | |
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'], | |
""" | |
Complex number type composed of two double-precision floating-point | |
numbers, compatible with Python `complex`. | |
""") | |
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'], | |
""" | |
Complex number type composed of two extended-precision floating-point | |
numbers. | |
""") | |
add_newdoc_for_scalar_type('object_', [], | |
""" | |
Any Python object. | |
""") |
""" | |
A place for code to be called from the implementation of np.dtype | |
String handling is much easier to do correctly in python. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import sys | |
import numpy as np | |
_kind_to_stem = { | |
'u': 'uint', | |
'i': 'int', | |
'c': 'complex', | |
'f': 'float', | |
'b': 'bool', | |
'V': 'void', | |
'O': 'object', | |
'M': 'datetime', | |
'm': 'timedelta' | |
} | |
if sys.version_info[0] >= 3: | |
_kind_to_stem.update({ | |
'S': 'bytes', | |
'U': 'str' | |
}) | |
else: | |
_kind_to_stem.update({ | |
'S': 'string', | |
'U': 'unicode' | |
}) | |
def _kind_name(dtype): | |
try: | |
return _kind_to_stem[dtype.kind] | |
except KeyError: | |
raise RuntimeError( | |
"internal dtype error, unknown kind {!r}" | |
.format(dtype.kind) | |
) | |
def __str__(dtype): | |
if dtype.fields is not None: | |
return _struct_str(dtype, include_align=True) | |
elif dtype.subdtype: | |
return _subarray_str(dtype) | |
elif issubclass(dtype.type, np.flexible) or not dtype.isnative: | |
return dtype.str | |
else: | |
return dtype.name | |
def __repr__(dtype): | |
arg_str = _construction_repr(dtype, include_align=False) | |
if dtype.isalignedstruct: | |
arg_str = arg_str + ", align=True" | |
return "dtype({})".format(arg_str) | |
def _unpack_field(dtype, offset, title=None): | |
""" | |
Helper function to normalize the items in dtype.fields. | |
Call as: | |
dtype, offset, title = _unpack_field(*dtype.fields[name]) | |
""" | |
return dtype, offset, title | |
def _isunsized(dtype): | |
# PyDataType_ISUNSIZED | |
return dtype.itemsize == 0 | |
def _construction_repr(dtype, include_align=False, short=False): | |
""" | |
Creates a string repr of the dtype, excluding the 'dtype()' part | |
surrounding the object. This object may be a string, a list, or | |
a dict depending on the nature of the dtype. This | |
is the object passed as the first parameter to the dtype | |
constructor, and if no additional constructor parameters are | |
given, will reproduce the exact memory layout. | |
Parameters | |
---------- | |
short : bool | |
If true, this creates a shorter repr using 'kind' and 'itemsize', instead | |
of the longer type name. | |
include_align : bool | |
If true, this includes the 'align=True' parameter | |
inside the struct dtype construction dict when needed. Use this flag | |
if you want a proper repr string without the 'dtype()' part around it. | |
If false, this does not preserve the | |
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for | |
struct arrays like the regular repr does, because the 'align' | |
flag is not part of first dtype constructor parameter. This | |
mode is intended for a full 'repr', where the 'align=True' is | |
provided as the second parameter. | |
""" | |
if dtype.fields is not None: | |
return _struct_str(dtype, include_align=include_align) | |
elif dtype.subdtype: | |
return _subarray_str(dtype) | |
else: | |
return _scalar_str(dtype, short=short) | |
def _scalar_str(dtype, short): | |
byteorder = _byte_order_str(dtype) | |
if dtype.type == np.bool_: | |
if short: | |
return "'?'" | |
else: | |
return "'bool'" | |
elif dtype.type == np.object_: | |
# The object reference may be different sizes on different | |
# platforms, so it should never include the itemsize here. | |
return "'O'" | |
elif dtype.type == np.string_: | |
if _isunsized(dtype): | |
return "'S'" | |
else: | |
return "'S%d'" % dtype.itemsize | |
elif dtype.type == np.unicode_: | |
if _isunsized(dtype): | |
return "'%sU'" % byteorder | |
else: | |
return "'%sU%d'" % (byteorder, dtype.itemsize / 4) | |
# unlike the other types, subclasses of void are preserved - but | |
# historically the repr does not actually reveal the subclass | |
elif issubclass(dtype.type, np.void): | |
if _isunsized(dtype): | |
return "'V'" | |
else: | |
return "'V%d'" % dtype.itemsize | |
elif dtype.type == np.datetime64: | |
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype)) | |
elif dtype.type == np.timedelta64: | |
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype)) | |
elif np.issubdtype(dtype, np.number): | |
# Short repr with endianness, like '<f8' | |
if short or dtype.byteorder not in ('=', '|'): | |
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize) | |
# Longer repr, like 'float64' | |
else: | |
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize) | |
elif dtype.isbuiltin == 2: | |
return dtype.type.__name__ | |
else: | |
raise RuntimeError( | |
"Internal error: NumPy dtype unrecognized type number") | |
def _byte_order_str(dtype): | |
""" Normalize byteorder to '<' or '>' """ | |
# hack to obtain the native and swapped byte order characters | |
swapped = np.dtype(int).newbyteorder('s') | |
native = swapped.newbyteorder('s') | |
byteorder = dtype.byteorder | |
if byteorder == '=': | |
return native.byteorder | |
if byteorder == 's': | |
# TODO: this path can never be reached | |
return swapped.byteorder | |
elif byteorder == '|': | |
return '' | |
else: | |
return byteorder | |
def _datetime_metadata_str(dtype): | |
# TODO: this duplicates the C append_metastr_to_string | |
unit, count = np.datetime_data(dtype) | |
if unit == 'generic': | |
return '' | |
elif count == 1: | |
return '[{}]'.format(unit) | |
else: | |
return '[{}{}]'.format(count, unit) | |
def _struct_dict_str(dtype, includealignedflag): | |
# unpack the fields dictionary into ls | |
names = dtype.names | |
fld_dtypes = [] | |
offsets = [] | |
titles = [] | |
for name in names: | |
fld_dtype, offset, title = _unpack_field(*dtype.fields[name]) | |
fld_dtypes.append(fld_dtype) | |
offsets.append(offset) | |
titles.append(title) | |
# Build up a string to make the dictionary | |
# First, the names | |
ret = "{'names':[" | |
ret += ",".join(repr(name) for name in names) | |
# Second, the formats | |
ret += "], 'formats':[" | |
ret += ",".join( | |
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes) | |
# Third, the offsets | |
ret += "], 'offsets':[" | |
ret += ",".join("%d" % offset for offset in offsets) | |
# Fourth, the titles | |
if any(title is not None for title in titles): | |
ret += "], 'titles':[" | |
ret += ",".join(repr(title) for title in titles) | |
# Fifth, the itemsize | |
ret += "], 'itemsize':%d" % dtype.itemsize | |
if (includealignedflag and dtype.isalignedstruct): | |
# Finally, the aligned flag | |
ret += ", 'aligned':True}" | |
else: | |
ret += "}" | |
return ret | |
def _is_packed(dtype): | |
""" | |
Checks whether the structured data type in 'dtype' | |
has a simple layout, where all the fields are in order, | |
and follow each other with no alignment padding. | |
When this returns true, the dtype can be reconstructed | |
from a list of the field names and dtypes with no additional | |
dtype parameters. | |
Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio. | |
""" | |
total_offset = 0 | |
for name in dtype.names: | |
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) | |
if fld_offset != total_offset: | |
return False | |
total_offset += fld_dtype.itemsize | |
if total_offset != dtype.itemsize: | |
return False | |
return True | |
def _struct_list_str(dtype): | |
items = [] | |
for name in dtype.names: | |
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) | |
item = "(" | |
if title is not None: | |
item += "({!r}, {!r}), ".format(title, name) | |
else: | |
item += "{!r}, ".format(name) | |
# Special case subarray handling here | |
if fld_dtype.subdtype is not None: | |
base, shape = fld_dtype.subdtype | |
item += "{}, {}".format( | |
_construction_repr(base, short=True), | |
shape | |
) | |
else: | |
item += _construction_repr(fld_dtype, short=True) | |
item += ")" | |
items.append(item) | |
return "[" + ", ".join(items) + "]" | |
def _struct_str(dtype, include_align): | |
# The list str representation can't include the 'align=' flag, | |
# so if it is requested and the struct has the aligned flag set, | |
# we must use the dict str instead. | |
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype): | |
sub = _struct_list_str(dtype) | |
else: | |
sub = _struct_dict_str(dtype, include_align) | |
# If the data type isn't the default, void, show it | |
if dtype.type != np.void: | |
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub) | |
else: | |
return sub | |
def _subarray_str(dtype): | |
base, shape = dtype.subdtype | |
return "({}, {})".format( | |
_construction_repr(base, short=True), | |
shape | |
) | |
def _name_get(dtype): | |
# provides dtype.name.__get__ | |
if dtype.isbuiltin == 2: | |
# user dtypes don't promise to do anything special | |
return dtype.type.__name__ | |
# Builtin classes are documented as returning a "bit name" | |
name = dtype.type.__name__ | |
# handle bool_, str_, etc | |
if name[-1] == '_': | |
name = name[:-1] | |
# append bit counts to str, unicode, and void | |
if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype): | |
name += "{}".format(dtype.itemsize * 8) | |
# append metadata to datetimes | |
elif dtype.type in (np.datetime64, np.timedelta64): | |
name += _datetime_metadata_str(dtype) | |
return name |
""" | |
Conversion from ctypes to dtype. | |
In an ideal world, we could acheive this through the PEP3118 buffer protocol, | |
something like:: | |
def dtype_from_ctypes_type(t): | |
# needed to ensure that the shape of `t` is within memoryview.format | |
class DummyStruct(ctypes.Structure): | |
_fields_ = [('a', t)] | |
# empty to avoid memory allocation | |
ctype_0 = (DummyStruct * 0)() | |
mv = memoryview(ctype_0) | |
# convert the struct, and slice back out the field | |
return _dtype_from_pep3118(mv.format)['a'] | |
Unfortunately, this fails because: | |
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782) | |
* PEP3118 cannot represent unions, but both numpy and ctypes can | |
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780) | |
""" | |
import _ctypes | |
import ctypes | |
import numpy as np | |
def _from_ctypes_array(t): | |
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,))) | |
def _from_ctypes_structure(t): | |
for item in t._fields_: | |
if len(item) > 2: | |
raise TypeError( | |
"ctypes bitfields have no dtype equivalent") | |
if hasattr(t, "_pack_"): | |
formats = [] | |
offsets = [] | |
names = [] | |
current_offset = 0 | |
for fname, ftyp in t._fields_: | |
names.append(fname) | |
formats.append(dtype_from_ctypes_type(ftyp)) | |
# Each type has a default offset, this is platform dependent for some types. | |
effective_pack = min(t._pack_, ctypes.alignment(ftyp)) | |
current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack | |
offsets.append(current_offset) | |
current_offset += ctypes.sizeof(ftyp) | |
return np.dtype(dict( | |
formats=formats, | |
offsets=offsets, | |
names=names, | |
itemsize=ctypes.sizeof(t))) | |
else: | |
fields = [] | |
for fname, ftyp in t._fields_: | |
fields.append((fname, dtype_from_ctypes_type(ftyp))) | |
# by default, ctypes structs are aligned | |
return np.dtype(fields, align=True) | |
def _from_ctypes_scalar(t): | |
""" | |
Return the dtype type with endianness included if it's the case | |
""" | |
if getattr(t, '__ctype_be__', None) is t: | |
return np.dtype('>' + t._type_) | |
elif getattr(t, '__ctype_le__', None) is t: | |
return np.dtype('<' + t._type_) | |
else: | |
return np.dtype(t._type_) | |
def _from_ctypes_union(t): | |
formats = [] | |
offsets = [] | |
names = [] | |
for fname, ftyp in t._fields_: | |
names.append(fname) | |
formats.append(dtype_from_ctypes_type(ftyp)) | |
offsets.append(0) # Union fields are offset to 0 | |
return np.dtype(dict( | |
formats=formats, | |
offsets=offsets, | |
names=names, | |
itemsize=ctypes.sizeof(t))) | |
def dtype_from_ctypes_type(t): | |
""" | |
Construct a dtype object from a ctypes type | |
""" | |
if issubclass(t, _ctypes.Array): | |
return _from_ctypes_array(t) | |
elif issubclass(t, _ctypes._Pointer): | |
raise TypeError("ctypes pointers have no dtype equivalent") | |
elif issubclass(t, _ctypes.Structure): | |
return _from_ctypes_structure(t) | |
elif issubclass(t, _ctypes.Union): | |
return _from_ctypes_union(t) | |
elif isinstance(getattr(t, '_type_', None), str): | |
return _from_ctypes_scalar(t) | |
else: | |
raise NotImplementedError( | |
"Unknown ctypes type {}".format(t.__name__)) |
""" | |
A place for internal code | |
Some things are more easily handled Python. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import re | |
import sys | |
from numpy.compat import unicode | |
from numpy.core.overrides import set_module | |
from .multiarray import dtype, array, ndarray | |
try: | |
import ctypes | |
except ImportError: | |
ctypes = None | |
if (sys.byteorder == 'little'): | |
_nbo = b'<' | |
else: | |
_nbo = b'>' | |
def _makenames_list(adict, align): | |
allfields = [] | |
fnames = list(adict.keys()) | |
for fname in fnames: | |
obj = adict[fname] | |
n = len(obj) | |
if not isinstance(obj, tuple) or n not in [2, 3]: | |
raise ValueError("entry not a 2- or 3- tuple") | |
if (n > 2) and (obj[2] == fname): | |
continue | |
num = int(obj[1]) | |
if (num < 0): | |
raise ValueError("invalid offset.") | |
format = dtype(obj[0], align=align) | |
if (n > 2): | |
title = obj[2] | |
else: | |
title = None | |
allfields.append((fname, format, num, title)) | |
# sort by offsets | |
allfields.sort(key=lambda x: x[2]) | |
names = [x[0] for x in allfields] | |
formats = [x[1] for x in allfields] | |
offsets = [x[2] for x in allfields] | |
titles = [x[3] for x in allfields] | |
return names, formats, offsets, titles | |
# Called in PyArray_DescrConverter function when | |
# a dictionary without "names" and "formats" | |
# fields is used as a data-type descriptor. | |
def _usefields(adict, align): | |
try: | |
names = adict[-1] | |
except KeyError: | |
names = None | |
if names is None: | |
names, formats, offsets, titles = _makenames_list(adict, align) | |
else: | |
formats = [] | |
offsets = [] | |
titles = [] | |
for name in names: | |
res = adict[name] | |
formats.append(res[0]) | |
offsets.append(res[1]) | |
if (len(res) > 2): | |
titles.append(res[2]) | |
else: | |
titles.append(None) | |
return dtype({"names": names, | |
"formats": formats, | |
"offsets": offsets, | |
"titles": titles}, align) | |
# construct an array_protocol descriptor list | |
# from the fields attribute of a descriptor | |
# This calls itself recursively but should eventually hit | |
# a descriptor that has no fields and then return | |
# a simple typestring | |
def _array_descr(descriptor): | |
fields = descriptor.fields | |
if fields is None: | |
subdtype = descriptor.subdtype | |
if subdtype is None: | |
if descriptor.metadata is None: | |
return descriptor.str | |
else: | |
new = descriptor.metadata.copy() | |
if new: | |
return (descriptor.str, new) | |
else: | |
return descriptor.str | |
else: | |
return (_array_descr(subdtype[0]), subdtype[1]) | |
names = descriptor.names | |
ordered_fields = [fields[x] + (x,) for x in names] | |
result = [] | |
offset = 0 | |
for field in ordered_fields: | |
if field[1] > offset: | |
num = field[1] - offset | |
result.append(('', '|V%d' % num)) | |
offset += num | |
elif field[1] < offset: | |
raise ValueError( | |
"dtype.descr is not defined for types with overlapping or " | |
"out-of-order fields") | |
if len(field) > 3: | |
name = (field[2], field[3]) | |
else: | |
name = field[2] | |
if field[0].subdtype: | |
tup = (name, _array_descr(field[0].subdtype[0]), | |
field[0].subdtype[1]) | |
else: | |
tup = (name, _array_descr(field[0])) | |
offset += field[0].itemsize | |
result.append(tup) | |
if descriptor.itemsize > offset: | |
num = descriptor.itemsize - offset | |
result.append(('', '|V%d' % num)) | |
return result | |
# Build a new array from the information in a pickle. | |
# Note that the name numpy.core._internal._reconstruct is embedded in | |
# pickles of ndarrays made with NumPy before release 1.0 | |
# so don't remove the name here, or you'll | |
# break backward compatibility. | |
def _reconstruct(subtype, shape, dtype): | |
return ndarray.__new__(subtype, shape, dtype) | |
# format_re was originally from numarray by J. Todd Miller | |
format_re = re.compile(br'(?P<order1>[<>|=]?)' | |
br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)' | |
br'(?P<order2>[<>|=]?)' | |
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') | |
sep_re = re.compile(br'\s*,\s*') | |
space_re = re.compile(br'\s+$') | |
# astr is a string (perhaps comma separated) | |
_convorder = {b'=': _nbo} | |
def _commastring(astr): | |
startindex = 0 | |
result = [] | |
while startindex < len(astr): | |
mo = format_re.match(astr, pos=startindex) | |
try: | |
(order1, repeats, order2, dtype) = mo.groups() | |
except (TypeError, AttributeError): | |
raise ValueError('format number %d of "%s" is not recognized' % | |
(len(result)+1, astr)) | |
startindex = mo.end() | |
# Separator or ending padding | |
if startindex < len(astr): | |
if space_re.match(astr, pos=startindex): | |
startindex = len(astr) | |
else: | |
mo = sep_re.match(astr, pos=startindex) | |
if not mo: | |
raise ValueError( | |
'format number %d of "%s" is not recognized' % | |
(len(result)+1, astr)) | |
startindex = mo.end() | |
if order2 == b'': | |
order = order1 | |
elif order1 == b'': | |
order = order2 | |
else: | |
order1 = _convorder.get(order1, order1) | |
order2 = _convorder.get(order2, order2) | |
if (order1 != order2): | |
raise ValueError( | |
'inconsistent byte-order specification %s and %s' % | |
(order1, order2)) | |
order = order1 | |
if order in [b'|', b'=', _nbo]: | |
order = b'' | |
dtype = order + dtype | |
if (repeats == b''): | |
newitem = dtype | |
else: | |
newitem = (dtype, eval(repeats)) | |
result.append(newitem) | |
return result | |
class dummy_ctype(object): | |
def __init__(self, cls): | |
self._cls = cls | |
def __mul__(self, other): | |
return self | |
def __call__(self, *other): | |
return self._cls(other) | |
def __eq__(self, other): | |
return self._cls == other._cls | |
def __ne__(self, other): | |
return self._cls != other._cls | |
def _getintp_ctype(): | |
val = _getintp_ctype.cache | |
if val is not None: | |
return val | |
if ctypes is None: | |
import numpy as np | |
val = dummy_ctype(np.intp) | |
else: | |
char = dtype('p').char | |
if (char == 'i'): | |
val = ctypes.c_int | |
elif char == 'l': | |
val = ctypes.c_long | |
elif char == 'q': | |
val = ctypes.c_longlong | |
else: | |
val = ctypes.c_long | |
_getintp_ctype.cache = val | |
return val | |
_getintp_ctype.cache = None | |
# Used for .ctypes attribute of ndarray | |
class _missing_ctypes(object): | |
def cast(self, num, obj): | |
return num.value | |
class c_void_p(object): | |
def __init__(self, ptr): | |
self.value = ptr | |
class _unsafe_first_element_pointer(object): | |
""" | |
Helper to allow viewing an array as a ctypes pointer to the first element | |
This avoids: | |
* dealing with strides | |
* `.view` rejecting object-containing arrays | |
* `memoryview` not supporting overlapping fields | |
""" | |
def __init__(self, arr): | |
self.base = arr | |
@property | |
def __array_interface__(self): | |
i = dict( | |
shape=(), | |
typestr='|V0', | |
data=(self.base.__array_interface__['data'][0], False), | |
strides=(), | |
version=3, | |
) | |
return i | |
def _get_void_ptr(arr): | |
""" | |
Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array | |
""" | |
import numpy as np | |
# convert to a 0d array that has a data pointer referrign to the start | |
# of arr. This holds a reference to arr. | |
simple_arr = np.asarray(_unsafe_first_element_pointer(arr)) | |
# create a `char[0]` using the same memory. | |
c_arr = (ctypes.c_char * 0).from_buffer(simple_arr) | |
# finally cast to void* | |
return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p) | |
class _ctypes(object): | |
def __init__(self, array, ptr=None): | |
self._arr = array | |
if ctypes: | |
self._ctypes = ctypes | |
# get a void pointer to the buffer, which keeps the array alive | |
self._data = _get_void_ptr(array) | |
assert self._data.value == ptr | |
else: | |
# fake a pointer-like object that holds onto the reference | |
self._ctypes = _missing_ctypes() | |
self._data = self._ctypes.c_void_p(ptr) | |
self._data._objects = array | |
if self._arr.ndim == 0: | |
self._zerod = True | |
else: | |
self._zerod = False | |
def data_as(self, obj): | |
""" | |
Return the data pointer cast to a particular c-types object. | |
For example, calling ``self._as_parameter_`` is equivalent to | |
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a | |
pointer to a ctypes array of floating-point data: | |
``self.data_as(ctypes.POINTER(ctypes.c_double))``. | |
The returned pointer will keep a reference to the array. | |
""" | |
return self._ctypes.cast(self._data, obj) | |
def shape_as(self, obj): | |
""" | |
Return the shape tuple as an array of some other c-types | |
type. For example: ``self.shape_as(ctypes.c_short)``. | |
""" | |
if self._zerod: | |
return None | |
return (obj*self._arr.ndim)(*self._arr.shape) | |
def strides_as(self, obj): | |
""" | |
Return the strides tuple as an array of some other | |
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. | |
""" | |
if self._zerod: | |
return None | |
return (obj*self._arr.ndim)(*self._arr.strides) | |
@property | |
def data(self): | |
""" | |
A pointer to the memory area of the array as a Python integer. | |
This memory area may contain data that is not aligned, or not in correct | |
byte-order. The memory area may not even be writeable. The array | |
flags and data-type of this array should be respected when passing this | |
attribute to arbitrary C-code to avoid trouble that can include Python | |
crashing. User Beware! The value of this attribute is exactly the same | |
as ``self._array_interface_['data'][0]``. | |
Note that unlike `data_as`, a reference will not be kept to the array: | |
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a | |
pointer to a deallocated array, and should be spelt | |
``(a + b).ctypes.data_as(ctypes.c_void_p)`` | |
""" | |
return self._data.value | |
@property | |
def shape(self): | |
""" | |
(c_intp*self.ndim): A ctypes array of length self.ndim where | |
the basetype is the C-integer corresponding to ``dtype('p')`` on this | |
platform. This base-type could be `ctypes.c_int`, `ctypes.c_long`, or | |
`ctypes.c_longlong` depending on the platform. | |
The c_intp type is defined accordingly in `numpy.ctypeslib`. | |
The ctypes array contains the shape of the underlying array. | |
""" | |
return self.shape_as(_getintp_ctype()) | |
@property | |
def strides(self): | |
""" | |
(c_intp*self.ndim): A ctypes array of length self.ndim where | |
the basetype is the same as for the shape attribute. This ctypes array | |
contains the strides information from the underlying array. This strides | |
information is important for showing how many bytes must be jumped to | |
get to the next element in the array. | |
""" | |
return self.strides_as(_getintp_ctype()) | |
@property | |
def _as_parameter_(self): | |
""" | |
Overrides the ctypes semi-magic method | |
Enables `c_func(some_array.ctypes)` | |
""" | |
return self._data | |
# kept for compatibility | |
get_data = data.fget | |
get_shape = shape.fget | |
get_strides = strides.fget | |
get_as_parameter = _as_parameter_.fget | |
def _newnames(datatype, order): | |
""" | |
Given a datatype and an order object, return a new names tuple, with the | |
order indicated | |
""" | |
oldnames = datatype.names | |
nameslist = list(oldnames) | |
if isinstance(order, (str, unicode)): | |
order = [order] | |
seen = set() | |
if isinstance(order, (list, tuple)): | |
for name in order: | |
try: | |
nameslist.remove(name) | |
except ValueError: | |
if name in seen: | |
raise ValueError("duplicate field name: %s" % (name,)) | |
else: | |
raise ValueError("unknown field name: %s" % (name,)) | |
seen.add(name) | |
return tuple(list(order) + nameslist) | |
raise ValueError("unsupported order value: %s" % (order,)) | |
def _copy_fields(ary): | |
"""Return copy of structured array with padding between fields removed. | |
Parameters | |
---------- | |
ary : ndarray | |
Structured array from which to remove padding bytes | |
Returns | |
------- | |
ary_copy : ndarray | |
Copy of ary with padding bytes removed | |
""" | |
dt = ary.dtype | |
copy_dtype = {'names': dt.names, | |
'formats': [dt.fields[name][0] for name in dt.names]} | |
return array(ary, dtype=copy_dtype, copy=True) | |
def _getfield_is_safe(oldtype, newtype, offset): | |
""" Checks safety of getfield for object arrays. | |
As in _view_is_safe, we need to check that memory containing objects is not | |
reinterpreted as a non-object datatype and vice versa. | |
Parameters | |
---------- | |
oldtype : data-type | |
Data type of the original ndarray. | |
newtype : data-type | |
Data type of the field being accessed by ndarray.getfield | |
offset : int | |
Offset of the field being accessed by ndarray.getfield | |
Raises | |
------ | |
TypeError | |
If the field access is invalid | |
""" | |
if newtype.hasobject or oldtype.hasobject: | |
if offset == 0 and newtype == oldtype: | |
return | |
if oldtype.names: | |
for name in oldtype.names: | |
if (oldtype.fields[name][1] == offset and | |
oldtype.fields[name][0] == newtype): | |
return | |
raise TypeError("Cannot get/set field of an object array") | |
return | |
def _view_is_safe(oldtype, newtype): | |
""" Checks safety of a view involving object arrays, for example when | |
doing:: | |
np.zeros(10, dtype=oldtype).view(newtype) | |
Parameters | |
---------- | |
oldtype : data-type | |
Data type of original ndarray | |
newtype : data-type | |
Data type of the view | |
Raises | |
------ | |
TypeError | |
If the new type is incompatible with the old type. | |
""" | |
# if the types are equivalent, there is no problem. | |
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) | |
if oldtype == newtype: | |
return | |
if newtype.hasobject or oldtype.hasobject: | |
raise TypeError("Cannot change data-type for object array.") | |
return | |
# Given a string containing a PEP 3118 format specifier, | |
# construct a NumPy dtype | |
_pep3118_native_map = { | |
'?': '?', | |
'c': 'S1', | |
'b': 'b', | |
'B': 'B', | |
'h': 'h', | |
'H': 'H', | |
'i': 'i', | |
'I': 'I', | |
'l': 'l', | |
'L': 'L', | |
'q': 'q', | |
'Q': 'Q', | |
'e': 'e', | |
'f': 'f', | |
'd': 'd', | |
'g': 'g', | |
'Zf': 'F', | |
'Zd': 'D', | |
'Zg': 'G', | |
's': 'S', | |
'w': 'U', | |
'O': 'O', | |
'x': 'V', # padding | |
} | |
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) | |
_pep3118_standard_map = { | |
'?': '?', | |
'c': 'S1', | |
'b': 'b', | |
'B': 'B', | |
'h': 'i2', | |
'H': 'u2', | |
'i': 'i4', | |
'I': 'u4', | |
'l': 'i4', | |
'L': 'u4', | |
'q': 'i8', | |
'Q': 'u8', | |
'e': 'f2', | |
'f': 'f', | |
'd': 'd', | |
'Zf': 'F', | |
'Zd': 'D', | |
's': 'S', | |
'w': 'U', | |
'O': 'O', | |
'x': 'V', # padding | |
} | |
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) | |
_pep3118_unsupported_map = { | |
'u': 'UCS-2 strings', | |
'&': 'pointers', | |
't': 'bitfields', | |
'X': 'function pointers', | |
} | |
class _Stream(object): | |
def __init__(self, s): | |
self.s = s | |
self.byteorder = '@' | |
def advance(self, n): | |
res = self.s[:n] | |
self.s = self.s[n:] | |
return res | |
def consume(self, c): | |
if self.s[:len(c)] == c: | |
self.advance(len(c)) | |
return True | |
return False | |
def consume_until(self, c): | |
if callable(c): | |
i = 0 | |
while i < len(self.s) and not c(self.s[i]): | |
i = i + 1 | |
return self.advance(i) | |
else: | |
i = self.s.index(c) | |
res = self.advance(i) | |
self.advance(len(c)) | |
return res | |
@property | |
def next(self): | |
return self.s[0] | |
def __bool__(self): | |
return bool(self.s) | |
__nonzero__ = __bool__ | |
def _dtype_from_pep3118(spec): | |
stream = _Stream(spec) | |
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) | |
return dtype | |
def __dtype_from_pep3118(stream, is_subdtype): | |
field_spec = dict( | |
names=[], | |
formats=[], | |
offsets=[], | |
itemsize=0 | |
) | |
offset = 0 | |
common_alignment = 1 | |
is_padding = False | |
# Parse spec | |
while stream: | |
value = None | |
# End of structure, bail out to upper level | |
if stream.consume('}'): | |
break | |
# Sub-arrays (1) | |
shape = None | |
if stream.consume('('): | |
shape = stream.consume_until(')') | |
shape = tuple(map(int, shape.split(','))) | |
# Byte order | |
if stream.next in ('@', '=', '<', '>', '^', '!'): | |
byteorder = stream.advance(1) | |
if byteorder == '!': | |
byteorder = '>' | |
stream.byteorder = byteorder | |
# Byte order characters also control native vs. standard type sizes | |
if stream.byteorder in ('@', '^'): | |
type_map = _pep3118_native_map | |
type_map_chars = _pep3118_native_typechars | |
else: | |
type_map = _pep3118_standard_map | |
type_map_chars = _pep3118_standard_typechars | |
# Item sizes | |
itemsize_str = stream.consume_until(lambda c: not c.isdigit()) | |
if itemsize_str: | |
itemsize = int(itemsize_str) | |
else: | |
itemsize = 1 | |
# Data types | |
is_padding = False | |
if stream.consume('T{'): | |
value, align = __dtype_from_pep3118( | |
stream, is_subdtype=True) | |
elif stream.next in type_map_chars: | |
if stream.next == 'Z': | |
typechar = stream.advance(2) | |
else: | |
typechar = stream.advance(1) | |
is_padding = (typechar == 'x') | |
dtypechar = type_map[typechar] | |
if dtypechar in 'USV': | |
dtypechar += '%d' % itemsize | |
itemsize = 1 | |
numpy_byteorder = {'@': '=', '^': '='}.get( | |
stream.byteorder, stream.byteorder) | |
value = dtype(numpy_byteorder + dtypechar) | |
align = value.alignment | |
elif stream.next in _pep3118_unsupported_map: | |
desc = _pep3118_unsupported_map[stream.next] | |
raise NotImplementedError( | |
"Unrepresentable PEP 3118 data type {!r} ({})" | |
.format(stream.next, desc)) | |
else: | |
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) | |
# | |
# Native alignment may require padding | |
# | |
# Here we assume that the presence of a '@' character implicitly implies | |
# that the start of the array is *already* aligned. | |
# | |
extra_offset = 0 | |
if stream.byteorder == '@': | |
start_padding = (-offset) % align | |
intra_padding = (-value.itemsize) % align | |
offset += start_padding | |
if intra_padding != 0: | |
if itemsize > 1 or (shape is not None and _prod(shape) > 1): | |
# Inject internal padding to the end of the sub-item | |
value = _add_trailing_padding(value, intra_padding) | |
else: | |
# We can postpone the injection of internal padding, | |
# as the item appears at most once | |
extra_offset += intra_padding | |
# Update common alignment | |
common_alignment = _lcm(align, common_alignment) | |
# Convert itemsize to sub-array | |
if itemsize != 1: | |
value = dtype((value, (itemsize,))) | |
# Sub-arrays (2) | |
if shape is not None: | |
value = dtype((value, shape)) | |
# Field name | |
if stream.consume(':'): | |
name = stream.consume_until(':') | |
else: | |
name = None | |
if not (is_padding and name is None): | |
if name is not None and name in field_spec['names']: | |
raise RuntimeError("Duplicate field name '%s' in PEP3118 format" | |
% name) | |
field_spec['names'].append(name) | |
field_spec['formats'].append(value) | |
field_spec['offsets'].append(offset) | |
offset += value.itemsize | |
offset += extra_offset | |
field_spec['itemsize'] = offset | |
# extra final padding for aligned types | |
if stream.byteorder == '@': | |
field_spec['itemsize'] += (-offset) % common_alignment | |
# Check if this was a simple 1-item type, and unwrap it | |
if (field_spec['names'] == [None] | |
and field_spec['offsets'][0] == 0 | |
and field_spec['itemsize'] == field_spec['formats'][0].itemsize | |
and not is_subdtype): | |
ret = field_spec['formats'][0] | |
else: | |
_fix_names(field_spec) | |
ret = dtype(field_spec) | |
# Finished | |
return ret, common_alignment | |
def _fix_names(field_spec): | |
""" Replace names which are None with the next unused f%d name """ | |
names = field_spec['names'] | |
for i, name in enumerate(names): | |
if name is not None: | |
continue | |
j = 0 | |
while True: | |
name = 'f{}'.format(j) | |
if name not in names: | |
break | |
j = j + 1 | |
names[i] = name | |
def _add_trailing_padding(value, padding): | |
"""Inject the specified number of padding bytes at the end of a dtype""" | |
if value.fields is None: | |
field_spec = dict( | |
names=['f0'], | |
formats=[value], | |
offsets=[0], | |
itemsize=value.itemsize | |
) | |
else: | |
fields = value.fields | |
names = value.names | |
field_spec = dict( | |
names=names, | |
formats=[fields[name][0] for name in names], | |
offsets=[fields[name][1] for name in names], | |
itemsize=value.itemsize | |
) | |
field_spec['itemsize'] += padding | |
return dtype(field_spec) | |
def _prod(a): | |
p = 1 | |
for x in a: | |
p *= x | |
return p | |
def _gcd(a, b): | |
"""Calculate the greatest common divisor of a and b""" | |
while b: | |
a, b = b, a % b | |
return a | |
def _lcm(a, b): | |
return a // _gcd(a, b) * b | |
# Exception used in shares_memory() | |
@set_module('numpy') | |
class TooHardError(RuntimeError): | |
pass | |
@set_module('numpy') | |
class AxisError(ValueError, IndexError): | |
""" Axis supplied was invalid. """ | |
def __init__(self, axis, ndim=None, msg_prefix=None): | |
# single-argument form just delegates to base class | |
if ndim is None and msg_prefix is None: | |
msg = axis | |
# do the string formatting here, to save work in the C code | |
else: | |
msg = ("axis {} is out of bounds for array of dimension {}" | |
.format(axis, ndim)) | |
if msg_prefix is not None: | |
msg = "{}: {}".format(msg_prefix, msg) | |
super(AxisError, self).__init__(msg) | |
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): | |
""" Format the error message for when __array_ufunc__ gives up. """ | |
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + | |
['{}={!r}'.format(k, v) | |
for k, v in kwargs.items()]) | |
args = inputs + kwargs.get('out', ()) | |
types_string = ', '.join(repr(type(arg).__name__) for arg in args) | |
return ('operand type(s) all returned NotImplemented from ' | |
'__array_ufunc__({!r}, {!r}, {}): {}' | |
.format(ufunc, method, args_string, types_string)) | |
def array_function_errmsg_formatter(public_api, types): | |
""" Format the error message for when __array_ufunc__ gives up. """ | |
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) | |
return ("no implementation found for '{}' on types that implement " | |
'__array_function__: {}'.format(func_name, list(types))) | |
def _ufunc_doc_signature_formatter(ufunc): | |
""" | |
Builds a signature string which resembles PEP 457 | |
This is used to construct the first line of the docstring | |
""" | |
# input arguments are simple | |
if ufunc.nin == 1: | |
in_args = 'x' | |
else: | |
in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin)) | |
# output arguments are both keyword or positional | |
if ufunc.nout == 0: | |
out_args = ', /, out=()' | |
elif ufunc.nout == 1: | |
out_args = ', /, out=None' | |
else: | |
out_args = '[, {positional}], / [, out={default}]'.format( | |
positional=', '.join( | |
'out{}'.format(i+1) for i in range(ufunc.nout)), | |
default=repr((None,)*ufunc.nout) | |
) | |
# keyword only args depend on whether this is a gufunc | |
kwargs = ( | |
", casting='same_kind'" | |
", order='K'" | |
", dtype=None" | |
", subok=True" | |
"[, signature" | |
", extobj]" | |
) | |
if ufunc.signature is None: | |
kwargs = ", where=True" + kwargs | |
# join all the parts together | |
return '{name}({in_args}{out_args}, *{kwargs})'.format( | |
name=ufunc.__name__, | |
in_args=in_args, | |
out_args=out_args, | |
kwargs=kwargs | |
) | |
def npy_ctypes_check(cls): | |
# determine if a class comes from ctypes, in order to work around | |
# a bug in the buffer protocol for those objects, bpo-10746 | |
try: | |
# ctypes class are new-style, so have an __mro__. This probably fails | |
# for ctypes classes with multiple inheritance. | |
ctype_base = cls.__mro__[-2] | |
# right now, they're part of the _ctypes module | |
return 'ctypes' in ctype_base.__module__ | |
except Exception: | |
return False | |
class recursive(object): | |
''' | |
A decorator class for recursive nested functions. | |
Naive recursive nested functions hold a reference to themselves: | |
def outer(*args): | |
def stringify_leaky(arg0, *arg1): | |
if len(arg1) > 0: | |
return stringify_leaky(*arg1) # <- HERE | |
return str(arg0) | |
stringify_leaky(*args) | |
This design pattern creates a reference cycle that is difficult for a | |
garbage collector to resolve. The decorator class prevents the | |
cycle by passing the nested function in as an argument `self`: | |
def outer(*args): | |
@recursive | |
def stringify(self, arg0, *arg1): | |
if len(arg1) > 0: | |
return self(*arg1) | |
return str(arg0) | |
stringify(*args) | |
''' | |
def __init__(self, func): | |
self.func = func | |
def __call__(self, *args, **kwargs): | |
return self.func(self, *args, **kwargs) | |
""" | |
Array methods which are called by both the C-code for the method | |
and the Python code for the NumPy-namespace function | |
""" | |
from __future__ import division, absolute_import, print_function | |
import warnings | |
from numpy.core import multiarray as mu | |
from numpy.core import umath as um | |
from numpy.core.numeric import asanyarray | |
from numpy.core import numerictypes as nt | |
from numpy._globals import _NoValue | |
# save those O(100) nanoseconds! | |
umr_maximum = um.maximum.reduce | |
umr_minimum = um.minimum.reduce | |
umr_sum = um.add.reduce | |
umr_prod = um.multiply.reduce | |
umr_any = um.logical_or.reduce | |
umr_all = um.logical_and.reduce | |
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very | |
# small reductions | |
def _amax(a, axis=None, out=None, keepdims=False, | |
initial=_NoValue): | |
return umr_maximum(a, axis, None, out, keepdims, initial) | |
def _amin(a, axis=None, out=None, keepdims=False, | |
initial=_NoValue): | |
return umr_minimum(a, axis, None, out, keepdims, initial) | |
def _sum(a, axis=None, dtype=None, out=None, keepdims=False, | |
initial=_NoValue): | |
return umr_sum(a, axis, dtype, out, keepdims, initial) | |
def _prod(a, axis=None, dtype=None, out=None, keepdims=False, | |
initial=_NoValue): | |
return umr_prod(a, axis, dtype, out, keepdims, initial) | |
def _any(a, axis=None, dtype=None, out=None, keepdims=False): | |
return umr_any(a, axis, dtype, out, keepdims) | |
def _all(a, axis=None, dtype=None, out=None, keepdims=False): | |
return umr_all(a, axis, dtype, out, keepdims) | |
def _count_reduce_items(arr, axis): | |
if axis is None: | |
axis = tuple(range(arr.ndim)) | |
if not isinstance(axis, tuple): | |
axis = (axis,) | |
items = 1 | |
for ax in axis: | |
items *= arr.shape[ax] | |
return items | |
def _mean(a, axis=None, dtype=None, out=None, keepdims=False): | |
arr = asanyarray(a) | |
is_float16_result = False | |
rcount = _count_reduce_items(arr, axis) | |
# Make this warning show up first | |
if rcount == 0: | |
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2) | |
# Cast bool, unsigned int, and int to float64 by default | |
if dtype is None: | |
if issubclass(arr.dtype.type, (nt.integer, nt.bool_)): | |
dtype = mu.dtype('f8') | |
elif issubclass(arr.dtype.type, nt.float16): | |
dtype = mu.dtype('f4') | |
is_float16_result = True | |
ret = umr_sum(arr, axis, dtype, out, keepdims) | |
if isinstance(ret, mu.ndarray): | |
ret = um.true_divide( | |
ret, rcount, out=ret, casting='unsafe', subok=False) | |
if is_float16_result and out is None: | |
ret = arr.dtype.type(ret) | |
elif hasattr(ret, 'dtype'): | |
if is_float16_result: | |
ret = arr.dtype.type(ret / rcount) | |
else: | |
ret = ret.dtype.type(ret / rcount) | |
else: | |
ret = ret / rcount | |
return ret | |
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): | |
arr = asanyarray(a) | |
rcount = _count_reduce_items(arr, axis) | |
# Make this warning show up on top. | |
if ddof >= rcount: | |
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, | |
stacklevel=2) | |
# Cast bool, unsigned int, and int to float64 by default | |
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)): | |
dtype = mu.dtype('f8') | |
# Compute the mean. | |
# Note that if dtype is not of inexact type then arraymean will | |
# not be either. | |
arrmean = umr_sum(arr, axis, dtype, keepdims=True) | |
if isinstance(arrmean, mu.ndarray): | |
arrmean = um.true_divide( | |
arrmean, rcount, out=arrmean, casting='unsafe', subok=False) | |
else: | |
arrmean = arrmean.dtype.type(arrmean / rcount) | |
# Compute sum of squared deviations from mean | |
# Note that x may not be inexact and that we need it to be an array, | |
# not a scalar. | |
x = asanyarray(arr - arrmean) | |
if issubclass(arr.dtype.type, nt.complexfloating): | |
x = um.multiply(x, um.conjugate(x), out=x).real | |
else: | |
x = um.multiply(x, x, out=x) | |
ret = umr_sum(x, axis, dtype, out, keepdims) | |
# Compute degrees of freedom and make sure it is not negative. | |
rcount = max([rcount - ddof, 0]) | |
# divide by degrees of freedom | |
if isinstance(ret, mu.ndarray): | |
ret = um.true_divide( | |
ret, rcount, out=ret, casting='unsafe', subok=False) | |
elif hasattr(ret, 'dtype'): | |
ret = ret.dtype.type(ret / rcount) | |
else: | |
ret = ret / rcount | |
return ret | |
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): | |
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof, | |
keepdims=keepdims) | |
if isinstance(ret, mu.ndarray): | |
ret = um.sqrt(ret, out=ret) | |
elif hasattr(ret, 'dtype'): | |
ret = ret.dtype.type(um.sqrt(ret)) | |
else: | |
ret = um.sqrt(ret) | |
return ret | |
def _ptp(a, axis=None, out=None, keepdims=False): | |
return um.subtract( | |
umr_maximum(a, axis, None, out, keepdims), | |
umr_minimum(a, axis, None, None, keepdims), | |
out | |
) |
""" | |
String-handling utilities to avoid locale-dependence. | |
Used primarily to generate type name aliases. | |
""" | |
# "import string" is costly to import! | |
# Construct the translation tables directly | |
# "A" = chr(65), "a" = chr(97) | |
_all_chars = [chr(_m) for _m in range(256)] | |
_ascii_upper = _all_chars[65:65+26] | |
_ascii_lower = _all_chars[97:97+26] | |
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:]) | |
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:]) | |
def english_lower(s): | |
""" Apply English case rules to convert ASCII strings to all lower case. | |
This is an internal utility function to replace calls to str.lower() such | |
that we can avoid changing behavior with changing locales. In particular, | |
Turkish has distinct dotted and dotless variants of the Latin letter "I" in | |
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale. | |
Parameters | |
---------- | |
s : str | |
Returns | |
------- | |
lowered : str | |
Examples | |
-------- | |
>>> from numpy.core.numerictypes import english_lower | |
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') | |
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' | |
>>> english_lower('') | |
'' | |
""" | |
lowered = s.translate(LOWER_TABLE) | |
return lowered | |
def english_upper(s): | |
""" Apply English case rules to convert ASCII strings to all upper case. | |
This is an internal utility function to replace calls to str.upper() such | |
that we can avoid changing behavior with changing locales. In particular, | |
Turkish has distinct dotted and dotless variants of the Latin letter "I" in | |
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale. | |
Parameters | |
---------- | |
s : str | |
Returns | |
------- | |
uppered : str | |
Examples | |
-------- | |
>>> from numpy.core.numerictypes import english_upper | |
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') | |
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' | |
>>> english_upper('') | |
'' | |
""" | |
uppered = s.translate(UPPER_TABLE) | |
return uppered | |
def english_capitalize(s): | |
""" Apply English case rules to convert the first character of an ASCII | |
string to upper case. | |
This is an internal utility function to replace calls to str.capitalize() | |
such that we can avoid changing behavior with changing locales. | |
Parameters | |
---------- | |
s : str | |
Returns | |
------- | |
capitalized : str | |
Examples | |
-------- | |
>>> from numpy.core.numerictypes import english_capitalize | |
>>> english_capitalize('int8') | |
'Int8' | |
>>> english_capitalize('Int8') | |
'Int8' | |
>>> english_capitalize('') | |
'' | |
""" | |
if s: | |
return english_upper(s[0]) + s[1:] | |
else: | |
return s |
""" | |
Due to compatibility, numpy has a very large number of different naming | |
conventions for the scalar types (those subclassing from `numpy.generic`). | |
This file produces a convoluted set of dictionaries mapping names to types, | |
and sometimes other mappings too. | |
.. data:: allTypes | |
A dictionary of names to types that will be exposed as attributes through | |
``np.core.numerictypes.*`` | |
.. data:: sctypeDict | |
Similar to `allTypes`, but maps a broader set of aliases to their types. | |
.. data:: sctypeNA | |
NumArray-compatible names for the scalar types. Contains not only | |
``name: type`` mappings, but ``char: name`` mappings too. | |
.. deprecated:: 1.16 | |
.. data:: sctypes | |
A dictionary keyed by a "type group" string, providing a list of types | |
under that group. | |
""" | |
import warnings | |
import sys | |
from numpy.compat import unicode | |
from numpy._globals import VisibleDeprecationWarning | |
from numpy.core._string_helpers import english_lower, english_capitalize | |
from numpy.core.multiarray import typeinfo, dtype | |
from numpy.core._dtype import _kind_name | |
sctypeDict = {} # Contains all leaf-node scalar types with aliases | |
class TypeNADict(dict): | |
def __getitem__(self, key): | |
# 2018-06-24, 1.16 | |
warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' | |
'of numpy', VisibleDeprecationWarning, stacklevel=2) | |
return dict.__getitem__(self, key) | |
def get(self, key, default=None): | |
# 2018-06-24, 1.16 | |
warnings.warn('sctypeNA and typeNA will be removed in v1.18 ' | |
'of numpy', VisibleDeprecationWarning, stacklevel=2) | |
return dict.get(self, key, default) | |
sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences | |
allTypes = {} # Collect the types we will add to the module | |
# separate the actual type info from the abstract base classes | |
_abstract_types = {} | |
_concrete_typeinfo = {} | |
for k, v in typeinfo.items(): | |
# make all the keys lowercase too | |
k = english_lower(k) | |
if isinstance(v, type): | |
_abstract_types[k] = v | |
else: | |
_concrete_typeinfo[k] = v | |
_concrete_types = {v.type for k, v in _concrete_typeinfo.items()} | |
def _bits_of(obj): | |
try: | |
info = next(v for v in _concrete_typeinfo.values() if v.type is obj) | |
except StopIteration: | |
if obj in _abstract_types.values(): | |
raise ValueError("Cannot count the bits of an abstract type") | |
# some third-party type - make a best-guess | |
return dtype(obj).itemsize * 8 | |
else: | |
return info.bits | |
def bitname(obj): | |
"""Return a bit-width name for a given type object""" | |
bits = _bits_of(obj) | |
dt = dtype(obj) | |
char = dt.kind | |
base = _kind_name(dt) | |
if base == 'object': | |
bits = 0 | |
if bits != 0: | |
char = "%s%d" % (char, bits // 8) | |
return base, bits, char | |
def _add_types(): | |
for name, info in _concrete_typeinfo.items(): | |
# define C-name and insert typenum and typechar references also | |
allTypes[name] = info.type | |
sctypeDict[name] = info.type | |
sctypeDict[info.char] = info.type | |
sctypeDict[info.num] = info.type | |
for name, cls in _abstract_types.items(): | |
allTypes[name] = cls | |
_add_types() | |
# This is the priority order used to assign the bit-sized NPY_INTxx names, which | |
# must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be | |
# consistent. | |
# If two C types have the same size, then the earliest one in this list is used | |
# as the sized name. | |
_int_ctypes = ['long', 'longlong', 'int', 'short', 'byte'] | |
_uint_ctypes = list('u' + t for t in _int_ctypes) | |
def _add_aliases(): | |
for name, info in _concrete_typeinfo.items(): | |
# these are handled by _add_integer_aliases | |
if name in _int_ctypes or name in _uint_ctypes: | |
continue | |
# insert bit-width version for this class (if relevant) | |
base, bit, char = bitname(info.type) | |
myname = "%s%d" % (base, bit) | |
# ensure that (c)longdouble does not overwrite the aliases assigned to | |
# (c)double | |
if name in ('longdouble', 'clongdouble') and myname in allTypes: | |
continue | |
base_capitalize = english_capitalize(base) | |
if base == 'complex': | |
na_name = '%s%d' % (base_capitalize, bit//2) | |
elif base == 'bool': | |
na_name = base_capitalize | |
else: | |
na_name = "%s%d" % (base_capitalize, bit) | |
allTypes[myname] = info.type | |
# add mapping for both the bit name and the numarray name | |
sctypeDict[myname] = info.type | |
sctypeDict[na_name] = info.type | |
# add forward, reverse, and string mapping to numarray | |
sctypeNA[na_name] = info.type | |
sctypeNA[info.type] = na_name | |
sctypeNA[info.char] = na_name | |
sctypeDict[char] = info.type | |
sctypeNA[char] = na_name | |
_add_aliases() | |
def _add_integer_aliases(): | |
seen_bits = set() | |
for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes): | |
i_info = _concrete_typeinfo[i_ctype] | |
u_info = _concrete_typeinfo[u_ctype] | |
bits = i_info.bits # same for both | |
for info, charname, intname, Intname in [ | |
(i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits), | |
(u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]: | |
if bits not in seen_bits: | |
# sometimes two different types have the same number of bits | |
# if so, the one iterated over first takes precedence | |
allTypes[intname] = info.type | |
sctypeDict[intname] = info.type | |
sctypeDict[Intname] = info.type | |
sctypeDict[charname] = info.type | |
sctypeNA[Intname] = info.type | |
sctypeNA[charname] = info.type | |
sctypeNA[info.type] = Intname | |
sctypeNA[info.char] = Intname | |
seen_bits.add(bits) | |
_add_integer_aliases() | |
# We use these later | |
void = allTypes['void'] | |
# | |
# Rework the Python names (so that float and complex and int are consistent | |
# with Python usage) | |
# | |
def _set_up_aliases(): | |
type_pairs = [('complex_', 'cdouble'), | |
('int0', 'intp'), | |
('uint0', 'uintp'), | |
('single', 'float'), | |
('csingle', 'cfloat'), | |
('singlecomplex', 'cfloat'), | |
('float_', 'double'), | |
('intc', 'int'), | |
('uintc', 'uint'), | |
('int_', 'long'), | |
('uint', 'ulong'), | |
('cfloat', 'cdouble'), | |
('longfloat', 'longdouble'), | |
('clongfloat', 'clongdouble'), | |
('longcomplex', 'clongdouble'), | |
('bool_', 'bool'), | |
('bytes_', 'string'), | |
('string_', 'string'), | |
('unicode_', 'unicode'), | |
('object_', 'object')] | |
if sys.version_info[0] >= 3: | |
type_pairs.extend([('str_', 'unicode')]) | |
else: | |
type_pairs.extend([('str_', 'string')]) | |
for alias, t in type_pairs: | |
allTypes[alias] = allTypes[t] | |
sctypeDict[alias] = sctypeDict[t] | |
# Remove aliases overriding python types and modules | |
to_remove = ['ulong', 'object', 'int', 'float', | |
'complex', 'bool', 'string', 'datetime', 'timedelta'] | |
if sys.version_info[0] >= 3: | |
to_remove.extend(['bytes', 'str']) | |
else: | |
to_remove.extend(['unicode', 'long']) | |
for t in to_remove: | |
try: | |
del allTypes[t] | |
del sctypeDict[t] | |
except KeyError: | |
pass | |
_set_up_aliases() | |
sctypes = {'int': [], | |
'uint':[], | |
'float':[], | |
'complex':[], | |
'others':[bool, object, bytes, unicode, void]} | |
def _add_array_type(typename, bits): | |
try: | |
t = allTypes['%s%d' % (typename, bits)] | |
except KeyError: | |
pass | |
else: | |
sctypes[typename].append(t) | |
def _set_array_types(): | |
ibytes = [1, 2, 4, 8, 16, 32, 64] | |
fbytes = [2, 4, 8, 10, 12, 16, 32, 64] | |
for bytes in ibytes: | |
bits = 8*bytes | |
_add_array_type('int', bits) | |
_add_array_type('uint', bits) | |
for bytes in fbytes: | |
bits = 8*bytes | |
_add_array_type('float', bits) | |
_add_array_type('complex', 2*bits) | |
_gi = dtype('p') | |
if _gi.type not in sctypes['int']: | |
indx = 0 | |
sz = _gi.itemsize | |
_lst = sctypes['int'] | |
while (indx < len(_lst) and sz >= _lst[indx](0).itemsize): | |
indx += 1 | |
sctypes['int'].insert(indx, _gi.type) | |
sctypes['uint'].insert(indx, dtype('P').type) | |
_set_array_types() | |
# Add additional strings to the sctypeDict | |
_toadd = ['int', 'float', 'complex', 'bool', 'object'] | |
if sys.version_info[0] >= 3: | |
_toadd.extend(['str', 'bytes', ('a', 'bytes_')]) | |
else: | |
_toadd.extend(['string', ('str', 'string_'), 'unicode', ('a', 'string_')]) | |
for name in _toadd: | |
if isinstance(name, tuple): | |
sctypeDict[name[0]] = allTypes[name[1]] | |
else: | |
sctypeDict[name] = allTypes['%s_' % name] | |
del _toadd, name |
"""Array printing function | |
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $ | |
""" | |
from __future__ import division, absolute_import, print_function | |
__all__ = ["array2string", "array_str", "array_repr", "set_string_function", | |
"set_printoptions", "get_printoptions", "printoptions", | |
"format_float_positional", "format_float_scientific"] | |
__docformat__ = 'restructuredtext' | |
# | |
# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca> | |
# last revision: 1996-3-13 | |
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details) | |
# and by Perry Greenfield 2000-4-1 for numarray | |
# and by Travis Oliphant 2005-8-22 for numpy | |
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy | |
# scalars but for different purposes. scalartypes.c.src has str/reprs for when | |
# the scalar is printed on its own, while arrayprint.py has strs for when | |
# scalars are printed inside an ndarray. Only the latter strs are currently | |
# user-customizable. | |
import sys | |
import functools | |
import numbers | |
if sys.version_info[0] >= 3: | |
try: | |
from _thread import get_ident | |
except ImportError: | |
from _dummy_thread import get_ident | |
else: | |
try: | |
from thread import get_ident | |
except ImportError: | |
from dummy_thread import get_ident | |
import numpy as np | |
from . import numerictypes as _nt | |
from .umath import absolute, not_equal, isnan, isinf, isfinite, isnat | |
from . import multiarray | |
from .multiarray import (array, dragon4_positional, dragon4_scientific, | |
datetime_as_string, datetime_data, ndarray, | |
set_legacy_print_mode) | |
from .fromnumeric import ravel, any | |
from .numeric import concatenate, asarray, errstate | |
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_, | |
flexible) | |
from .overrides import array_function_dispatch, set_module | |
import warnings | |
import contextlib | |
_format_options = { | |
'edgeitems': 3, # repr N leading and trailing items of each dimension | |
'threshold': 1000, # total items > triggers array summarization | |
'floatmode': 'maxprec', | |
'precision': 8, # precision of floating point representations | |
'suppress': False, # suppress printing small floating values in exp format | |
'linewidth': 75, | |
'nanstr': 'nan', | |
'infstr': 'inf', | |
'sign': '-', | |
'formatter': None, | |
'legacy': False} | |
def _make_options_dict(precision=None, threshold=None, edgeitems=None, | |
linewidth=None, suppress=None, nanstr=None, infstr=None, | |
sign=None, formatter=None, floatmode=None, legacy=None): | |
""" make a dictionary out of the non-None arguments, plus sanity checks """ | |
options = {k: v for k, v in locals().items() if v is not None} | |
if suppress is not None: | |
options['suppress'] = bool(suppress) | |
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal'] | |
if floatmode not in modes + [None]: | |
raise ValueError("floatmode option must be one of " + | |
", ".join('"{}"'.format(m) for m in modes)) | |
if sign not in [None, '-', '+', ' ']: | |
raise ValueError("sign option must be one of ' ', '+', or '-'") | |
if legacy not in [None, False, '1.13']: | |
warnings.warn("legacy printing option can currently only be '1.13' or " | |
"`False`", stacklevel=3) | |
if threshold is not None: | |
# forbid the bad threshold arg suggested by stack overflow, gh-12351 | |
if not isinstance(threshold, numbers.Number) or np.isnan(threshold): | |
raise ValueError("threshold must be numeric and non-NAN, try " | |
"sys.maxsize for untruncated representation") | |
return options | |
@set_module('numpy') | |
def set_printoptions(precision=None, threshold=None, edgeitems=None, | |
linewidth=None, suppress=None, nanstr=None, infstr=None, | |
formatter=None, sign=None, floatmode=None, **kwarg): | |
""" | |
Set printing options. | |
These options determine the way floating point numbers, arrays and | |
other NumPy objects are displayed. | |
Parameters | |
---------- | |
precision : int or None, optional | |
Number of digits of precision for floating point output (default 8). | |
May be `None` if `floatmode` is not `fixed`, to print as many digits as | |
necessary to uniquely specify the value. | |
threshold : int, optional | |
Total number of array elements which trigger summarization | |
rather than full repr (default 1000). | |
edgeitems : int, optional | |
Number of array items in summary at beginning and end of | |
each dimension (default 3). | |
linewidth : int, optional | |
The number of characters per line for the purpose of inserting | |
line breaks (default 75). | |
suppress : bool, optional | |
If True, always print floating point numbers using fixed point | |
notation, in which case numbers equal to zero in the current precision | |
will print as zero. If False, then scientific notation is used when | |
absolute value of the smallest number is < 1e-4 or the ratio of the | |
maximum absolute value to the minimum is > 1e3. The default is False. | |
nanstr : str, optional | |
String representation of floating point not-a-number (default nan). | |
infstr : str, optional | |
String representation of floating point infinity (default inf). | |
sign : string, either '-', '+', or ' ', optional | |
Controls printing of the sign of floating-point types. If '+', always | |
print the sign of positive values. If ' ', always prints a space | |
(whitespace character) in the sign position of positive values. If | |
'-', omit the sign character of positive values. (default '-') | |
formatter : dict of callables, optional | |
If not None, the keys should indicate the type(s) that the respective | |
formatting function applies to. Callables should return a string. | |
Types that are not specified (by their corresponding keys) are handled | |
by the default formatters. Individual types for which a formatter | |
can be set are: | |
- 'bool' | |
- 'int' | |
- 'timedelta' : a `numpy.timedelta64` | |
- 'datetime' : a `numpy.datetime64` | |
- 'float' | |
- 'longfloat' : 128-bit floats | |
- 'complexfloat' | |
- 'longcomplexfloat' : composed of two 128-bit floats | |
- 'numpystr' : types `numpy.string_` and `numpy.unicode_` | |
- 'object' : `np.object_` arrays | |
- 'str' : all other strings | |
Other keys that can be used to set a group of types at once are: | |
- 'all' : sets all types | |
- 'int_kind' : sets 'int' | |
- 'float_kind' : sets 'float' and 'longfloat' | |
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' | |
- 'str_kind' : sets 'str' and 'numpystr' | |
floatmode : str, optional | |
Controls the interpretation of the `precision` option for | |
floating-point types. Can take the following values: | |
* 'fixed': Always print exactly `precision` fractional digits, | |
even if this would print more or fewer digits than | |
necessary to specify the value uniquely. | |
* 'unique': Print the minimum number of fractional digits necessary | |
to represent each value uniquely. Different elements may | |
have a different number of digits. The value of the | |
`precision` option is ignored. | |
* 'maxprec': Print at most `precision` fractional digits, but if | |
an element can be uniquely represented with fewer digits | |
only print it with that many. | |
* 'maxprec_equal': Print at most `precision` fractional digits, | |
but if every element in the array can be uniquely | |
represented with an equal number of fewer digits, use that | |
many digits for all elements. | |
legacy : string or `False`, optional | |
If set to the string `'1.13'` enables 1.13 legacy printing mode. This | |
approximates numpy 1.13 print output by including a space in the sign | |
position of floats and different behavior for 0d arrays. If set to | |
`False`, disables legacy mode. Unrecognized strings will be ignored | |
with a warning for forward compatibility. | |
.. versionadded:: 1.14.0 | |
See Also | |
-------- | |
get_printoptions, set_string_function, array2string | |
Notes | |
----- | |
`formatter` is always reset with a call to `set_printoptions`. | |
Examples | |
-------- | |
Floating point precision can be set: | |
>>> np.set_printoptions(precision=4) | |
>>> print(np.array([1.123456789])) | |
[ 1.1235] | |
Long arrays can be summarised: | |
>>> np.set_printoptions(threshold=5) | |
>>> print(np.arange(10)) | |
[0 1 2 ..., 7 8 9] | |
Small results can be suppressed: | |
>>> eps = np.finfo(float).eps | |
>>> x = np.arange(4.) | |
>>> x**2 - (x + eps)**2 | |
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) | |
>>> np.set_printoptions(suppress=True) | |
>>> x**2 - (x + eps)**2 | |
array([-0., -0., 0., 0.]) | |
A custom formatter can be used to display array elements as desired: | |
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)}) | |
>>> x = np.arange(3) | |
>>> x | |
array([int: 0, int: -1, int: -2]) | |
>>> np.set_printoptions() # formatter gets reset | |
>>> x | |
array([0, 1, 2]) | |
To put back the default options, you can use: | |
>>> np.set_printoptions(edgeitems=3,infstr='inf', | |
... linewidth=75, nanstr='nan', precision=8, | |
... suppress=False, threshold=1000, formatter=None) | |
""" | |
legacy = kwarg.pop('legacy', None) | |
if kwarg: | |
msg = "set_printoptions() got unexpected keyword argument '{}'" | |
raise TypeError(msg.format(kwarg.popitem()[0])) | |
opt = _make_options_dict(precision, threshold, edgeitems, linewidth, | |
suppress, nanstr, infstr, sign, formatter, | |
floatmode, legacy) | |
# formatter is always reset | |
opt['formatter'] = formatter | |
_format_options.update(opt) | |
# set the C variable for legacy mode | |
if _format_options['legacy'] == '1.13': | |
set_legacy_print_mode(113) | |
# reset the sign option in legacy mode to avoid confusion | |
_format_options['sign'] = '-' | |
elif _format_options['legacy'] is False: | |
set_legacy_print_mode(0) | |
@set_module('numpy') | |
def get_printoptions(): | |
""" | |
Return the current print options. | |
Returns | |
------- | |
print_opts : dict | |
Dictionary of current print options with keys | |
- precision : int | |
- threshold : int | |
- edgeitems : int | |
- linewidth : int | |
- suppress : bool | |
- nanstr : str | |
- infstr : str | |
- formatter : dict of callables | |
- sign : str | |
For a full description of these options, see `set_printoptions`. | |
See Also | |
-------- | |
set_printoptions, set_string_function | |
""" | |
return _format_options.copy() | |
@set_module('numpy') | |
@contextlib.contextmanager | |
def printoptions(*args, **kwargs): | |
"""Context manager for setting print options. | |
Set print options for the scope of the `with` block, and restore the old | |
options at the end. See `set_printoptions` for the full description of | |
available options. | |
Examples | |
-------- | |
>>> with np.printoptions(precision=2): | |
... print(np.array([2.0])) / 3 | |
[0.67] | |
The `as`-clause of the `with`-statement gives the current print options: | |
>>> with np.printoptions(precision=2) as opts: | |
... assert_equal(opts, np.get_printoptions()) | |
See Also | |
-------- | |
set_printoptions, get_printoptions | |
""" | |
opts = np.get_printoptions() | |
try: | |
np.set_printoptions(*args, **kwargs) | |
yield np.get_printoptions() | |
finally: | |
np.set_printoptions(**opts) | |
def _leading_trailing(a, edgeitems, index=()): | |
""" | |
Keep only the N-D corners (leading and trailing edges) of an array. | |
Should be passed a base-class ndarray, since it makes no guarantees about | |
preserving subclasses. | |
""" | |
axis = len(index) | |
if axis == a.ndim: | |
return a[index] | |
if a.shape[axis] > 2*edgeitems: | |
return concatenate(( | |
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]), | |
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:]) | |
), axis=axis) | |
else: | |
return _leading_trailing(a, edgeitems, index + np.index_exp[:]) | |
def _object_format(o): | |
""" Object arrays containing lists should be printed unambiguously """ | |
if type(o) is list: | |
fmt = 'list({!r})' | |
else: | |
fmt = '{!r}' | |
return fmt.format(o) | |
def repr_format(x): | |
return repr(x) | |
def str_format(x): | |
return str(x) | |
def _get_formatdict(data, **opt): | |
prec, fmode = opt['precision'], opt['floatmode'] | |
supp, sign = opt['suppress'], opt['sign'] | |
legacy = opt['legacy'] | |
# wrapped in lambdas to avoid taking a code path with the wrong type of data | |
formatdict = { | |
'bool': lambda: BoolFormat(data), | |
'int': lambda: IntegerFormat(data), | |
'float': lambda: | |
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), | |
'longfloat': lambda: | |
FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), | |
'complexfloat': lambda: | |
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), | |
'longcomplexfloat': lambda: | |
ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy), | |
'datetime': lambda: DatetimeFormat(data, legacy=legacy), | |
'timedelta': lambda: TimedeltaFormat(data), | |
'object': lambda: _object_format, | |
'void': lambda: str_format, | |
'numpystr': lambda: repr_format, | |
'str': lambda: str} | |
# we need to wrap values in `formatter` in a lambda, so that the interface | |
# is the same as the above values. | |
def indirect(x): | |
return lambda: x | |
formatter = opt['formatter'] | |
if formatter is not None: | |
fkeys = [k for k in formatter.keys() if formatter[k] is not None] | |
if 'all' in fkeys: | |
for key in formatdict.keys(): | |
formatdict[key] = indirect(formatter['all']) | |
if 'int_kind' in fkeys: | |
for key in ['int']: | |
formatdict[key] = indirect(formatter['int_kind']) | |
if 'float_kind' in fkeys: | |
for key in ['float', 'longfloat']: | |
formatdict[key] = indirect(formatter['float_kind']) | |
if 'complex_kind' in fkeys: | |
for key in ['complexfloat', 'longcomplexfloat']: | |
formatdict[key] = indirect(formatter['complex_kind']) | |
if 'str_kind' in fkeys: | |
for key in ['numpystr', 'str']: | |
formatdict[key] = indirect(formatter['str_kind']) | |
for key in formatdict.keys(): | |
if key in fkeys: | |
formatdict[key] = indirect(formatter[key]) | |
return formatdict | |
def _get_format_function(data, **options): | |
""" | |
find the right formatting function for the dtype_ | |
""" | |
dtype_ = data.dtype | |
dtypeobj = dtype_.type | |
formatdict = _get_formatdict(data, **options) | |
if issubclass(dtypeobj, _nt.bool_): | |
return formatdict['bool']() | |
elif issubclass(dtypeobj, _nt.integer): | |
if issubclass(dtypeobj, _nt.timedelta64): | |
return formatdict['timedelta']() | |
else: | |
return formatdict['int']() | |
elif issubclass(dtypeobj, _nt.floating): | |
if issubclass(dtypeobj, _nt.longfloat): | |
return formatdict['longfloat']() | |
else: | |
return formatdict['float']() | |
elif issubclass(dtypeobj, _nt.complexfloating): | |
if issubclass(dtypeobj, _nt.clongfloat): | |
return formatdict['longcomplexfloat']() | |
else: | |
return formatdict['complexfloat']() | |
elif issubclass(dtypeobj, (_nt.unicode_, _nt.string_)): | |
return formatdict['numpystr']() | |
elif issubclass(dtypeobj, _nt.datetime64): | |
return formatdict['datetime']() | |
elif issubclass(dtypeobj, _nt.object_): | |
return formatdict['object']() | |
elif issubclass(dtypeobj, _nt.void): | |
if dtype_.names is not None: | |
return StructuredVoidFormat.from_data(data, **options) | |
else: | |
return formatdict['void']() | |
else: | |
return formatdict['numpystr']() | |
def _recursive_guard(fillvalue='...'): | |
""" | |
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs | |
Decorates a function such that if it calls itself with the same first | |
argument, it returns `fillvalue` instead of recursing. | |
Largely copied from reprlib.recursive_repr | |
""" | |
def decorating_function(f): | |
repr_running = set() | |
@functools.wraps(f) | |
def wrapper(self, *args, **kwargs): | |
key = id(self), get_ident() | |
if key in repr_running: | |
return fillvalue | |
repr_running.add(key) | |
try: | |
return f(self, *args, **kwargs) | |
finally: | |
repr_running.discard(key) | |
return wrapper | |
return decorating_function | |
# gracefully handle recursive calls, when object arrays contain themselves | |
@_recursive_guard() | |
def _array2string(a, options, separator=' ', prefix=""): | |
# The formatter __init__s in _get_format_function cannot deal with | |
# subclasses yet, and we also need to avoid recursion issues in | |
# _formatArray with subclasses which return 0d arrays in place of scalars | |
data = asarray(a) | |
if a.shape == (): | |
a = data | |
if a.size > options['threshold']: | |
summary_insert = "..." | |
data = _leading_trailing(data, options['edgeitems']) | |
else: | |
summary_insert = "" | |
# find the right formatting function for the array | |
format_function = _get_format_function(data, **options) | |
# skip over "[" | |
next_line_prefix = " " | |
# skip over array( | |
next_line_prefix += " "*len(prefix) | |
lst = _formatArray(a, format_function, options['linewidth'], | |
next_line_prefix, separator, options['edgeitems'], | |
summary_insert, options['legacy']) | |
return lst | |
def _array2string_dispatcher( | |
a, max_line_width=None, precision=None, | |
suppress_small=None, separator=None, prefix=None, | |
style=None, formatter=None, threshold=None, | |
edgeitems=None, sign=None, floatmode=None, suffix=None, | |
**kwarg): | |
return (a,) | |
@array_function_dispatch(_array2string_dispatcher, module='numpy') | |
def array2string(a, max_line_width=None, precision=None, | |
suppress_small=None, separator=' ', prefix="", | |
style=np._NoValue, formatter=None, threshold=None, | |
edgeitems=None, sign=None, floatmode=None, suffix="", | |
**kwarg): | |
""" | |
Return a string representation of an array. | |
Parameters | |
---------- | |
a : array_like | |
Input array. | |
max_line_width : int, optional | |
The maximum number of columns the string should span. Newline | |
characters splits the string appropriately after array elements. | |
precision : int or None, optional | |
Floating point precision. Default is the current printing | |
precision (usually 8), which can be altered using `set_printoptions`. | |
suppress_small : bool, optional | |
Represent very small numbers as zero. A number is "very small" if it | |
is smaller than the current printing precision. | |
separator : str, optional | |
Inserted between elements. | |
prefix : str, optional | |
suffix: str, optional | |
The length of the prefix and suffix strings are used to respectively | |
align and wrap the output. An array is typically printed as:: | |
prefix + array2string(a) + suffix | |
The output is left-padded by the length of the prefix string, and | |
wrapping is forced at the column ``max_line_width - len(suffix)``. | |
It should be noted that the content of prefix and suffix strings are | |
not included in the output. | |
style : _NoValue, optional | |
Has no effect, do not use. | |
.. deprecated:: 1.14.0 | |
formatter : dict of callables, optional | |
If not None, the keys should indicate the type(s) that the respective | |
formatting function applies to. Callables should return a string. | |
Types that are not specified (by their corresponding keys) are handled | |
by the default formatters. Individual types for which a formatter | |
can be set are: | |
- 'bool' | |
- 'int' | |
- 'timedelta' : a `numpy.timedelta64` | |
- 'datetime' : a `numpy.datetime64` | |
- 'float' | |
- 'longfloat' : 128-bit floats | |
- 'complexfloat' | |
- 'longcomplexfloat' : composed of two 128-bit floats | |
- 'void' : type `numpy.void` | |
- 'numpystr' : types `numpy.string_` and `numpy.unicode_` | |
- 'str' : all other strings | |
Other keys that can be used to set a group of types at once are: | |
- 'all' : sets all types | |
- 'int_kind' : sets 'int' | |
- 'float_kind' : sets 'float' and 'longfloat' | |
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat' | |
- 'str_kind' : sets 'str' and 'numpystr' | |
threshold : int, optional | |
Total number of array elements which trigger summarization | |
rather than full repr. | |
edgeitems : int, optional | |
Number of array items in summary at beginning and end of | |
each dimension. | |
sign : string, either '-', '+', or ' ', optional | |
Controls printing of the sign of floating-point types. If '+', always | |
print the sign of positive values. If ' ', always prints a space | |
(whitespace character) in the sign position of positive values. If | |
'-', omit the sign character of positive values. | |
floatmode : str, optional | |
Controls the interpretation of the `precision` option for | |
floating-point types. Can take the following values: | |
- 'fixed': Always print exactly `precision` fractional digits, | |
even if this would print more or fewer digits than | |
necessary to specify the value uniquely. | |
- 'unique': Print the minimum number of fractional digits necessary | |
to represent each value uniquely. Different elements may | |
have a different number of digits. The value of the | |
`precision` option is ignored. | |
- 'maxprec': Print at most `precision` fractional digits, but if | |
an element can be uniquely represented with fewer digits | |
only print it with that many. | |
- 'maxprec_equal': Print at most `precision` fractional digits, | |
but if every element in the array can be uniquely | |
represented with an equal number of fewer digits, use that | |
many digits for all elements. | |
legacy : string or `False`, optional | |
If set to the string `'1.13'` enables 1.13 legacy printing mode. This | |
approximates numpy 1.13 print output by including a space in the sign | |
position of floats and different behavior for 0d arrays. If set to | |
`False`, disables legacy mode. Unrecognized strings will be ignored | |
with a warning for forward compatibility. | |
.. versionadded:: 1.14.0 | |
Returns | |
------- | |
array_str : str | |
String representation of the array. | |
Raises | |
------ | |
TypeError | |
if a callable in `formatter` does not return a string. | |
See Also | |
-------- | |
array_str, array_repr, set_printoptions, get_printoptions | |
Notes | |
----- | |
If a formatter is specified for a certain type, the `precision` keyword is | |
ignored for that type. | |
This is a very flexible function; `array_repr` and `array_str` are using | |
`array2string` internally so keywords with the same name should work | |
identically in all three functions. | |
Examples | |
-------- | |
>>> x = np.array([1e-16,1,2,3]) | |
>>> print(np.array2string(x, precision=2, separator=',', | |
... suppress_small=True)) | |
[ 0., 1., 2., 3.] | |
>>> x = np.arange(3.) | |
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) | |
'[0.00 1.00 2.00]' | |
>>> x = np.arange(3) | |
>>> np.array2string(x, formatter={'int':lambda x: hex(x)}) | |
'[0x0L 0x1L 0x2L]' | |
""" | |
legacy = kwarg.pop('legacy', None) | |
if kwarg: | |
msg = "array2string() got unexpected keyword argument '{}'" | |
raise TypeError(msg.format(kwarg.popitem()[0])) | |
overrides = _make_options_dict(precision, threshold, edgeitems, | |
max_line_width, suppress_small, None, None, | |
sign, formatter, floatmode, legacy) | |
options = _format_options.copy() | |
options.update(overrides) | |
if options['legacy'] == '1.13': | |
if style is np._NoValue: | |
style = repr | |
if a.shape == () and not a.dtype.names: | |
return style(a.item()) | |
elif style is not np._NoValue: | |
# Deprecation 11-9-2017 v1.14 | |
warnings.warn("'style' argument is deprecated and no longer functional" | |
" except in 1.13 'legacy' mode", | |
DeprecationWarning, stacklevel=3) | |
if options['legacy'] != '1.13': | |
options['linewidth'] -= len(suffix) | |
# treat as a null array if any of shape elements == 0 | |
if a.size == 0: | |
return "[]" | |
return _array2string(a, options, separator, prefix) | |
def _extendLine(s, line, word, line_width, next_line_prefix, legacy): | |
needs_wrap = len(line) + len(word) > line_width | |
if legacy != '1.13': | |
s# don't wrap lines if it won't help | |
if len(line) <= len(next_line_prefix): | |
needs_wrap = False | |
if needs_wrap: | |
s += line.rstrip() + "\n" | |
line = next_line_prefix | |
line += word | |
return s, line | |
def _formatArray(a, format_function, line_width, next_line_prefix, | |
separator, edge_items, summary_insert, legacy): | |
"""formatArray is designed for two modes of operation: | |
1. Full output | |
2. Summarized output | |
""" | |
def recurser(index, hanging_indent, curr_width): | |
""" | |
By using this local function, we don't need to recurse with all the | |
arguments. Since this function is not created recursively, the cost is | |
not significant | |
""" | |
axis = len(index) | |
axes_left = a.ndim - axis | |
if axes_left == 0: | |
return format_function(a[index]) | |
# when recursing, add a space to align with the [ added, and reduce the | |
# length of the line by 1 | |
next_hanging_indent = hanging_indent + ' ' | |
if legacy == '1.13': | |
next_width = curr_width | |
else: | |
next_width = curr_width - len(']') | |
a_len = a.shape[axis] | |
show_summary = summary_insert and 2*edge_items < a_len | |
if show_summary: | |
leading_items = edge_items | |
trailing_items = edge_items | |
else: | |
leading_items = 0 | |
trailing_items = a_len | |
# stringify the array with the hanging indent on the first line too | |
s = '' | |
# last axis (rows) - wrap elements if they would not fit on one line | |
if axes_left == 1: | |
# the length up until the beginning of the separator / bracket | |
if legacy == '1.13': | |
elem_width = curr_width - len(separator.rstrip()) | |
else: | |
elem_width = curr_width - max(len(separator.rstrip()), len(']')) | |
line = hanging_indent | |
for i in range(leading_items): | |
word = recurser(index + (i,), next_hanging_indent, next_width) | |
s, line = _extendLine( | |
s, line, word, elem_width, hanging_indent, legacy) | |
line += separator | |
if show_summary: | |
s, line = _extendLine( | |
s, line, summary_insert, elem_width, hanging_indent, legacy) | |
if legacy == '1.13': | |
line += ", " | |
else: | |
line += separator | |
for i in range(trailing_items, 1, -1): | |
word = recurser(index + (-i,), next_hanging_indent, next_width) | |
s, line = _extendLine( | |
s, line, word, elem_width, hanging_indent, legacy) | |
line += separator | |
if legacy == '1.13': | |
# width of the separator is not considered on 1.13 | |
elem_width = curr_width | |
word = recurser(index + (-1,), next_hanging_indent, next_width) | |
s, line = _extendLine( | |
s, line, word, elem_width, hanging_indent, legacy) | |
s += line | |
# other axes - insert newlines between rows | |
else: | |
s = '' | |
line_sep = separator.rstrip() + '\n'*(axes_left - 1) | |
for i in range(leading_items): | |
nested = recurser(index + (i,), next_hanging_indent, next_width) | |
s += hanging_indent + nested + line_sep | |
if show_summary: | |
if legacy == '1.13': | |
# trailing space, fixed nbr of newlines, and fixed separator | |
s += hanging_indent + summary_insert + ", \n" | |
else: | |
s += hanging_indent + summary_insert + line_sep | |
for i in range(trailing_items, 1, -1): | |
nested = recurser(index + (-i,), next_hanging_indent, | |
next_width) | |
s += hanging_indent + nested + line_sep | |
nested = recurser(index + (-1,), next_hanging_indent, next_width) | |
s += hanging_indent + nested | |
# remove the hanging indent, and wrap in [] | |
s = '[' + s[len(hanging_indent):] + ']' | |
return s | |
try: | |
# invoke the recursive part with an initial index and prefix | |
return recurser(index=(), | |
hanging_indent=next_line_prefix, | |
curr_width=line_width) | |
finally: | |
# recursive closures have a cyclic reference to themselves, which | |
# requires gc to collect (gh-10620). To avoid this problem, for | |
# performance and PyPy friendliness, we break the cycle: | |
recurser = None | |
def _none_or_positive_arg(x, name): | |
if x is None: | |
return -1 | |
if x < 0: | |
raise ValueError("{} must be >= 0".format(name)) | |
return x | |
class FloatingFormat(object): | |
""" Formatter for subtypes of np.floating """ | |
def __init__(self, data, precision, floatmode, suppress_small, sign=False, | |
**kwarg): | |
# for backcompatibility, accept bools | |
if isinstance(sign, bool): | |
sign = '+' if sign else '-' | |
self._legacy = kwarg.get('legacy', False) | |
if self._legacy == '1.13': | |
# when not 0d, legacy does not support '-' | |
if data.shape != () and sign == '-': | |
sign = ' ' | |
self.floatmode = floatmode | |
if floatmode == 'unique': | |
self.precision = None | |
else: | |
self.precision = precision | |
self.precision = _none_or_positive_arg(self.precision, 'precision') | |
self.suppress_small = suppress_small | |
self.sign = sign | |
self.exp_format = False | |
self.large_exponent = False | |
self.fillFormat(data) | |
def fillFormat(self, data): | |
# only the finite values are used to compute the number of digits | |
finite_vals = data[isfinite(data)] | |
# choose exponential mode based on the non-zero finite values: | |
abs_non_zero = absolute(finite_vals[finite_vals != 0]) | |
if len(abs_non_zero) != 0: | |
max_val = np.max(abs_non_zero) | |
min_val = np.min(abs_non_zero) | |
with errstate(over='ignore'): # division can overflow | |
if max_val >= 1.e8 or (not self.suppress_small and | |
(min_val < 0.0001 or max_val/min_val > 1000.)): | |
self.exp_format = True | |
# do a first pass of printing all the numbers, to determine sizes | |
if len(finite_vals) == 0: | |
self.pad_left = 0 | |
self.pad_right = 0 | |
self.trim = '.' | |
self.exp_size = -1 | |
self.unique = True | |
elif self.exp_format: | |
trim, unique = '.', True | |
if self.floatmode == 'fixed' or self._legacy == '1.13': | |
trim, unique = 'k', False | |
strs = (dragon4_scientific(x, precision=self.precision, | |
unique=unique, trim=trim, sign=self.sign == '+') | |
for x in finite_vals) | |
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs)) | |
int_part, frac_part = zip(*(s.split('.') for s in frac_strs)) | |
self.exp_size = max(len(s) for s in exp_strs) - 1 | |
self.trim = 'k' | |
self.precision = max(len(s) for s in frac_part) | |
# for back-compat with np 1.13, use 2 spaces & sign and full prec | |
if self._legacy == '1.13': | |
self.pad_left = 3 | |
else: | |
# this should be only 1 or 2. Can be calculated from sign. | |
self.pad_left = max(len(s) for s in int_part) | |
# pad_right is only needed for nan length calculation | |
self.pad_right = self.exp_size + 2 + self.precision | |
self.unique = False | |
else: | |
# first pass printing to determine sizes | |
trim, unique = '.', True | |
if self.floatmode == 'fixed': | |
trim, unique = 'k', False | |
strs = (dragon4_positional(x, precision=self.precision, | |
fractional=True, | |
unique=unique, trim=trim, | |
sign=self.sign == '+') | |
for x in finite_vals) | |
int_part, frac_part = zip(*(s.split('.') for s in strs)) | |
if self._legacy == '1.13': | |
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part) | |
else: | |
self.pad_left = max(len(s) for s in int_part) | |
self.pad_right = max(len(s) for s in frac_part) | |
self.exp_size = -1 | |
if self.floatmode in ['fixed', 'maxprec_equal']: | |
self.precision = self.pad_right | |
self.unique = False | |
self.trim = 'k' | |
else: | |
self.unique = True | |
self.trim = '.' | |
if self._legacy != '1.13': | |
# account for sign = ' ' by adding one to pad_left | |
if self.sign == ' ' and not any(np.signbit(finite_vals)): | |
self.pad_left += 1 | |
# if there are non-finite values, may need to increase pad_left | |
if data.size != finite_vals.size: | |
neginf = self.sign != '-' or any(data[isinf(data)] < 0) | |
nanlen = len(_format_options['nanstr']) | |
inflen = len(_format_options['infstr']) + neginf | |
offset = self.pad_right + 1 # +1 for decimal pt | |
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset) | |
def __call__(self, x): | |
if not np.isfinite(x): | |
with errstate(invalid='ignore'): | |
if np.isnan(x): | |
sign = '+' if self.sign == '+' else '' | |
ret = sign + _format_options['nanstr'] | |
else: # isinf | |
sign = '-' if x < 0 else '+' if self.sign == '+' else '' | |
ret = sign + _format_options['infstr'] | |
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret | |
if self.exp_format: | |
return dragon4_scientific(x, | |
precision=self.precision, | |
unique=self.unique, | |
trim=self.trim, | |
sign=self.sign == '+', | |
pad_left=self.pad_left, | |
exp_digits=self.exp_size) | |
else: | |
return dragon4_positional(x, | |
precision=self.precision, | |
unique=self.unique, | |
fractional=True, | |
trim=self.trim, | |
sign=self.sign == '+', | |
pad_left=self.pad_left, | |
pad_right=self.pad_right) | |
# for back-compatibility, we keep the classes for each float type too | |
class FloatFormat(FloatingFormat): | |
def __init__(self, *args, **kwargs): | |
warnings.warn("FloatFormat has been replaced by FloatingFormat", | |
DeprecationWarning, stacklevel=2) | |
super(FloatFormat, self).__init__(*args, **kwargs) | |
class LongFloatFormat(FloatingFormat): | |
def __init__(self, *args, **kwargs): | |
warnings.warn("LongFloatFormat has been replaced by FloatingFormat", | |
DeprecationWarning, stacklevel=2) | |
super(LongFloatFormat, self).__init__(*args, **kwargs) | |
@set_module('numpy') | |
def format_float_scientific(x, precision=None, unique=True, trim='k', | |
sign=False, pad_left=None, exp_digits=None): | |
""" | |
Format a floating-point scalar as a decimal string in scientific notation. | |
Provides control over rounding, trimming and padding. Uses and assumes | |
IEEE unbiased rounding. Uses the "Dragon4" algorithm. | |
Parameters | |
---------- | |
x : python float or numpy floating scalar | |
Value to format. | |
precision : non-negative integer or None, optional | |
Maximum number of digits to print. May be None if `unique` is | |
`True`, but must be an integer if unique is `False`. | |
unique : boolean, optional | |
If `True`, use a digit-generation strategy which gives the shortest | |
representation which uniquely identifies the floating-point number from | |
other values of the same type, by judicious rounding. If `precision` | |
was omitted, print all necessary digits, otherwise digit generation is | |
cut off after `precision` digits and the remaining value is rounded. | |
If `False`, digits are generated as if printing an infinite-precision | |
value and stopping after `precision` digits, rounding the remaining | |
value. | |
trim : one of 'k', '.', '0', '-', optional | |
Controls post-processing trimming of trailing digits, as follows: | |
* 'k' : keep trailing zeros, keep decimal point (no trimming) | |
* '.' : trim all trailing zeros, leave decimal point | |
* '0' : trim all but the zero before the decimal point. Insert the | |
zero if it is missing. | |
* '-' : trim trailing zeros and any trailing decimal point | |
sign : boolean, optional | |
Whether to show the sign for positive values. | |
pad_left : non-negative integer, optional | |
Pad the left side of the string with whitespace until at least that | |
many characters are to the left of the decimal point. | |
exp_digits : non-negative integer, optional | |
Pad the exponent with zeros until it contains at least this many digits. | |
If omitted, the exponent will be at least 2 digits. | |
Returns | |
------- | |
rep : string | |
The string representation of the floating point value | |
See Also | |
-------- | |
format_float_positional | |
Examples | |
-------- | |
>>> np.format_float_scientific(np.float32(np.pi)) | |
'3.1415927e+00' | |
>>> s = np.float32(1.23e24) | |
>>> np.format_float_scientific(s, unique=False, precision=15) | |
'1.230000071797338e+24' | |
>>> np.format_float_scientific(s, exp_digits=4) | |
'1.23e+0024' | |
""" | |
precision = _none_or_positive_arg(precision, 'precision') | |
pad_left = _none_or_positive_arg(pad_left, 'pad_left') | |
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits') | |
return dragon4_scientific(x, precision=precision, unique=unique, | |
trim=trim, sign=sign, pad_left=pad_left, | |
exp_digits=exp_digits) | |
@set_module('numpy') | |
def format_float_positional(x, precision=None, unique=True, | |
fractional=True, trim='k', sign=False, | |
pad_left=None, pad_right=None): | |
""" | |
Format a floating-point scalar as a decimal string in positional notation. | |
Provides control over rounding, trimming and padding. Uses and assumes | |
IEEE unbiased rounding. Uses the "Dragon4" algorithm. | |
Parameters | |
---------- | |
x : python float or numpy floating scalar | |
Value to format. | |
precision : non-negative integer or None, optional | |
Maximum number of digits to print. May be None if `unique` is | |
`True`, but must be an integer if unique is `False`. | |
unique : boolean, optional | |
If `True`, use a digit-generation strategy which gives the shortest | |
representation which uniquely identifies the floating-point number from | |
other values of the same type, by judicious rounding. If `precision` | |
was omitted, print out all necessary digits, otherwise digit generation | |
is cut off after `precision` digits and the remaining value is rounded. | |
If `False`, digits are generated as if printing an infinite-precision | |
value and stopping after `precision` digits, rounding the remaining | |
value. | |
fractional : boolean, optional | |
If `True`, the cutoff of `precision` digits refers to the total number | |
of digits after the decimal point, including leading zeros. | |
If `False`, `precision` refers to the total number of significant | |
digits, before or after the decimal point, ignoring leading zeros. | |
trim : one of 'k', '.', '0', '-', optional | |
Controls post-processing trimming of trailing digits, as follows: | |
* 'k' : keep trailing zeros, keep decimal point (no trimming) | |
* '.' : trim all trailing zeros, leave decimal point | |
* '0' : trim all but the zero before the decimal point. Insert the | |
zero if it is missing. | |
* '-' : trim trailing zeros and any trailing decimal point | |
sign : boolean, optional | |
Whether to show the sign for positive values. | |
pad_left : non-negative integer, optional | |
Pad the left side of the string with whitespace until at least that | |
many characters are to the left of the decimal point. | |
pad_right : non-negative integer, optional | |
Pad the right side of the string with whitespace until at least that | |
many characters are to the right of the decimal point. | |
Returns | |
------- | |
rep : string | |
The string representation of the floating point value | |
See Also | |
-------- | |
format_float_scientific | |
Examples | |
-------- | |
>>> np.format_float_positional(np.float32(np.pi)) | |
'3.1415927' | |
>>> np.format_float_positional(np.float16(np.pi)) | |
'3.14' | |
>>> np.format_float_positional(np.float16(0.3)) | |
'0.3' | |
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10) | |
'0.3000488281' | |
""" | |
precision = _none_or_positive_arg(precision, 'precision') | |
pad_left = _none_or_positive_arg(pad_left, 'pad_left') | |
pad_right = _none_or_positive_arg(pad_right, 'pad_right') | |
return dragon4_positional(x, precision=precision, unique=unique, | |
fractional=fractional, trim=trim, | |
sign=sign, pad_left=pad_left, | |
pad_right=pad_right) | |
class IntegerFormat(object): | |
def __init__(self, data): | |
if data.size > 0: | |
max_str_len = max(len(str(np.max(data))), | |
len(str(np.min(data)))) | |
else: | |
max_str_len = 0 | |
self.format = '%{}d'.format(max_str_len) | |
def __call__(self, x): | |
return self.format % x | |
class BoolFormat(object): | |
def __init__(self, data, **kwargs): | |
# add an extra space so " True" and "False" have the same length and | |
# array elements align nicely when printed, except in 0d arrays | |
self.truestr = ' True' if data.shape != () else 'True' | |
def __call__(self, x): | |
return self.truestr if x else "False" | |
class ComplexFloatingFormat(object): | |
""" Formatter for subtypes of np.complexfloating """ | |
def __init__(self, x, precision, floatmode, suppress_small, | |
sign=False, **kwarg): | |
# for backcompatibility, accept bools | |
if isinstance(sign, bool): | |
sign = '+' if sign else '-' | |
floatmode_real = floatmode_imag = floatmode | |
if kwarg.get('legacy', False) == '1.13': | |
floatmode_real = 'maxprec_equal' | |
floatmode_imag = 'maxprec' | |
self.real_format = FloatingFormat(x.real, precision, floatmode_real, | |
suppress_small, sign=sign, **kwarg) | |
self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag, | |
suppress_small, sign='+', **kwarg) | |
def __call__(self, x): | |
r = self.real_format(x.real) | |
i = self.imag_format(x.imag) | |
# add the 'j' before the terminal whitespace in i | |
sp = len(i.rstrip()) | |
i = i[:sp] + 'j' + i[sp:] | |
return r + i | |
# for back-compatibility, we keep the classes for each complex type too | |
class ComplexFormat(ComplexFloatingFormat): | |
def __init__(self, *args, **kwargs): | |
warnings.warn( | |
"ComplexFormat has been replaced by ComplexFloatingFormat", | |
DeprecationWarning, stacklevel=2) | |
super(ComplexFormat, self).__init__(*args, **kwargs) | |
class LongComplexFormat(ComplexFloatingFormat): | |
def __init__(self, *args, **kwargs): | |
warnings.warn( | |
"LongComplexFormat has been replaced by ComplexFloatingFormat", | |
DeprecationWarning, stacklevel=2) | |
super(LongComplexFormat, self).__init__(*args, **kwargs) | |
class _TimelikeFormat(object): | |
def __init__(self, data): | |
non_nat = data[~isnat(data)] | |
if len(non_nat) > 0: | |
# Max str length of non-NaT elements | |
max_str_len = max(len(self._format_non_nat(np.max(non_nat))), | |
len(self._format_non_nat(np.min(non_nat)))) | |
else: | |
max_str_len = 0 | |
if len(non_nat) < data.size: | |
# data contains a NaT | |
max_str_len = max(max_str_len, 5) | |
self._format = '%{}s'.format(max_str_len) | |
self._nat = "'NaT'".rjust(max_str_len) | |
def _format_non_nat(self, x): | |
# override in subclass | |
raise NotImplementedError | |
def __call__(self, x): | |
if isnat(x): | |
return self._nat | |
else: | |
return self._format % self._format_non_nat(x) | |
class DatetimeFormat(_TimelikeFormat): | |
def __init__(self, x, unit=None, timezone=None, casting='same_kind', | |
legacy=False): | |
# Get the unit from the dtype | |
if unit is None: | |
if x.dtype.kind == 'M': | |
unit = datetime_data(x.dtype)[0] | |
else: | |
unit = 's' | |
if timezone is None: | |
timezone = 'naive' | |
self.timezone = timezone | |
self.unit = unit | |
self.casting = casting | |
self.legacy = legacy | |
# must be called after the above are configured | |
super(DatetimeFormat, self).__init__(x) | |
def __call__(self, x): | |
if self.legacy == '1.13': | |
return self._format_non_nat(x) | |
return super(DatetimeFormat, self).__call__(x) | |
def _format_non_nat(self, x): | |
return "'%s'" % datetime_as_string(x, | |
unit=self.unit, | |
timezone=self.timezone, | |
casting=self.casting) | |
class TimedeltaFormat(_TimelikeFormat): | |
def _format_non_nat(self, x): | |
return str(x.astype('i8')) | |
class SubArrayFormat(object): | |
def __init__(self, format_function): | |
self.format_function = format_function | |
def __call__(self, arr): | |
if arr.ndim <= 1: | |
return "[" + ", ".join(self.format_function(a) for a in arr) + "]" | |
return "[" + ", ".join(self.__call__(a) for a in arr) + "]" | |
class StructuredVoidFormat(object): | |
""" | |
Formatter for structured np.void objects. | |
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), | |
as alias scalars lose their field information, and the implementation | |
relies upon np.void.__getitem__. | |
""" | |
def __init__(self, format_functions): | |
self.format_functions = format_functions | |
@classmethod | |
def from_data(cls, data, **options): | |
""" | |
This is a second way to initialize StructuredVoidFormat, using the raw data | |
as input. Added to avoid changing the signature of __init__. | |
""" | |
format_functions = [] | |
for field_name in data.dtype.names: | |
format_function = _get_format_function(data[field_name], **options) | |
if data.dtype[field_name].shape != (): | |
format_function = SubArrayFormat(format_function) | |
format_functions.append(format_function) | |
return cls(format_functions) | |
def __call__(self, x): | |
str_fields = [ | |
format_function(field) | |
for field, format_function in zip(x, self.format_functions) | |
] | |
if len(str_fields) == 1: | |
return "({},)".format(str_fields[0]) | |
else: | |
return "({})".format(", ".join(str_fields)) | |
# for backwards compatibility | |
class StructureFormat(StructuredVoidFormat): | |
def __init__(self, *args, **kwargs): | |
# NumPy 1.14, 2018-02-14 | |
warnings.warn( | |
"StructureFormat has been replaced by StructuredVoidFormat", | |
DeprecationWarning, stacklevel=2) | |
super(StructureFormat, self).__init__(*args, **kwargs) | |
def _void_scalar_repr(x): | |
""" | |
Implements the repr for structured-void scalars. It is called from the | |
scalartypes.c.src code, and is placed here because it uses the elementwise | |
formatters defined above. | |
""" | |
return StructuredVoidFormat.from_data(array(x), **_format_options)(x) | |
_typelessdata = [int_, float_, complex_, bool_] | |
if issubclass(intc, int): | |
_typelessdata.append(intc) | |
if issubclass(longlong, int): | |
_typelessdata.append(longlong) | |
def dtype_is_implied(dtype): | |
""" | |
Determine if the given dtype is implied by the representation of its values. | |
Parameters | |
---------- | |
dtype : dtype | |
Data type | |
Returns | |
------- | |
implied : bool | |
True if the dtype is implied by the representation of its values. | |
Examples | |
-------- | |
>>> np.core.arrayprint.dtype_is_implied(int) | |
True | |
>>> np.array([1, 2, 3], int) | |
array([1, 2, 3]) | |
>>> np.core.arrayprint.dtype_is_implied(np.int8) | |
False | |
>>> np.array([1, 2, 3], np.int8) | |
array([1, 2, 3], dtype=np.int8) | |
""" | |
dtype = np.dtype(dtype) | |
if _format_options['legacy'] == '1.13' and dtype.type == bool_: | |
return False | |
# not just void types can be structured, and names are not part of the repr | |
if dtype.names is not None: | |
return False | |
return dtype.type in _typelessdata | |
def dtype_short_repr(dtype): | |
""" | |
Convert a dtype to a short form which evaluates to the same dtype. | |
The intent is roughly that the following holds | |
>>> from numpy import * | |
>>> assert eval(dtype_short_repr(dt)) == dt | |
""" | |
if dtype.names is not None: | |
# structured dtypes give a list or tuple repr | |
return str(dtype) | |
elif issubclass(dtype.type, flexible): | |
# handle these separately so they don't give garbage like str256 | |
return "'%s'" % str(dtype) | |
typename = dtype.name | |
# quote typenames which can't be represented as python variable names | |
if typename and not (typename[0].isalpha() and typename.isalnum()): | |
typename = repr(typename) | |
return typename | |
def _array_repr_implementation( | |
arr, max_line_width=None, precision=None, suppress_small=None, | |
array2string=array2string): | |
"""Internal version of array_repr() that allows overriding array2string.""" | |
if max_line_width is None: | |
max_line_width = _format_options['linewidth'] | |
if type(arr) is not ndarray: | |
class_name = type(arr).__name__ | |
else: | |
class_name = "array" | |
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0 | |
prefix = class_name + "(" | |
suffix = ")" if skipdtype else "," | |
if (_format_options['legacy'] == '1.13' and | |
arr.shape == () and not arr.dtype.names): | |
lst = repr(arr.item()) | |
elif arr.size > 0 or arr.shape == (0,): | |
lst = array2string(arr, max_line_width, precision, suppress_small, | |
', ', prefix, suffix=suffix) | |
else: # show zero-length shape unless it is (0,) | |
lst = "[], shape=%s" % (repr(arr.shape),) | |
arr_str = prefix + lst + suffix | |
if skipdtype: | |
return arr_str | |
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype)) | |
# compute whether we should put dtype on a new line: Do so if adding the | |
# dtype would extend the last line past max_line_width. | |
# Note: This line gives the correct result even when rfind returns -1. | |
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1) | |
spacer = " " | |
if _format_options['legacy'] == '1.13': | |
if issubclass(arr.dtype.type, flexible): | |
spacer = '\n' + ' '*len(class_name + "(") | |
elif last_line_len + len(dtype_str) + 1 > max_line_width: | |
spacer = '\n' + ' '*len(class_name + "(") | |
return arr_str + spacer + dtype_str | |
def _array_repr_dispatcher( | |
arr, max_line_width=None, precision=None, suppress_small=None): | |
return (arr,) | |
@array_function_dispatch(_array_repr_dispatcher, module='numpy') | |
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): | |
""" | |
Return the string representation of an array. | |
Parameters | |
---------- | |
arr : ndarray | |
Input array. | |
max_line_width : int, optional | |
The maximum number of columns the string should span. Newline | |
characters split the string appropriately after array elements. | |
precision : int, optional | |
Floating point precision. Default is the current printing precision | |
(usually 8), which can be altered using `set_printoptions`. | |
suppress_small : bool, optional | |
Represent very small numbers as zero, default is False. Very small | |
is defined by `precision`, if the precision is 8 then | |
numbers smaller than 5e-9 are represented as zero. | |
Returns | |
------- | |
string : str | |
The string representation of an array. | |
See Also | |
-------- | |
array_str, array2string, set_printoptions | |
Examples | |
-------- | |
>>> np.array_repr(np.array([1,2])) | |
'array([1, 2])' | |
>>> np.array_repr(np.ma.array([0.])) | |
'MaskedArray([ 0.])' | |
>>> np.array_repr(np.array([], np.int32)) | |
'array([], dtype=int32)' | |
>>> x = np.array([1e-6, 4e-7, 2, 3]) | |
>>> np.array_repr(x, precision=6, suppress_small=True) | |
'array([ 0.000001, 0. , 2. , 3. ])' | |
""" | |
return _array_repr_implementation( | |
arr, max_line_width, precision, suppress_small) | |
_guarded_str = _recursive_guard()(str) | |
def _array_str_implementation( | |
a, max_line_width=None, precision=None, suppress_small=None, | |
array2string=array2string): | |
"""Internal version of array_str() that allows overriding array2string.""" | |
if (_format_options['legacy'] == '1.13' and | |
a.shape == () and not a.dtype.names): | |
return str(a.item()) | |
# the str of 0d arrays is a special case: It should appear like a scalar, | |
# so floats are not truncated by `precision`, and strings are not wrapped | |
# in quotes. So we return the str of the scalar value. | |
if a.shape == (): | |
# obtain a scalar and call str on it, avoiding problems for subclasses | |
# for which indexing with () returns a 0d instead of a scalar by using | |
# ndarray's getindex. Also guard against recursive 0d object arrays. | |
return _guarded_str(np.ndarray.__getitem__(a, ())) | |
return array2string(a, max_line_width, precision, suppress_small, ' ', "") | |
def _array_str_dispatcher( | |
a, max_line_width=None, precision=None, suppress_small=None): | |
return (a,) | |
@array_function_dispatch(_array_str_dispatcher, module='numpy') | |
def array_str(a, max_line_width=None, precision=None, suppress_small=None): | |
""" | |
Return a string representation of the data in an array. | |
The data in the array is returned as a single string. This function is | |
similar to `array_repr`, the difference being that `array_repr` also | |
returns information on the kind of array and its data type. | |
Parameters | |
---------- | |
a : ndarray | |
Input array. | |
max_line_width : int, optional | |
Inserts newlines if text is longer than `max_line_width`. The | |
default is, indirectly, 75. | |
precision : int, optional | |
Floating point precision. Default is the current printing precision | |
(usually 8), which can be altered using `set_printoptions`. | |
suppress_small : bool, optional | |
Represent numbers "very close" to zero as zero; default is False. | |
Very close is defined by precision: if the precision is 8, e.g., | |
numbers smaller (in absolute value) than 5e-9 are represented as | |
zero. | |
See Also | |
-------- | |
array2string, array_repr, set_printoptions | |
Examples | |
-------- | |
>>> np.array_str(np.arange(3)) | |
'[0 1 2]' | |
""" | |
return _array_str_implementation( | |
a, max_line_width, precision, suppress_small) | |
# needed if __array_function__ is disabled | |
_array2string_impl = getattr(array2string, '__wrapped__', array2string) | |
_default_array_str = functools.partial(_array_str_implementation, | |
array2string=_array2string_impl) | |
_default_array_repr = functools.partial(_array_repr_implementation, | |
array2string=_array2string_impl) | |
def set_string_function(f, repr=True): | |
""" | |
Set a Python function to be used when pretty printing arrays. | |
Parameters | |
---------- | |
f : function or None | |
Function to be used to pretty print arrays. The function should expect | |
a single array argument and return a string of the representation of | |
the array. If None, the function is reset to the default NumPy function | |
to print arrays. | |
repr : bool, optional | |
If True (default), the function for pretty printing (``__repr__``) | |
is set, if False the function that returns the default string | |
representation (``__str__``) is set. | |
See Also | |
-------- | |
set_printoptions, get_printoptions | |
Examples | |
-------- | |
>>> def pprint(arr): | |
... return 'HA! - What are you going to do now?' | |
... | |
>>> np.set_string_function(pprint) | |
>>> a = np.arange(10) | |
>>> a | |
HA! - What are you going to do now? | |
>>> print(a) | |
[0 1 2 3 4 5 6 7 8 9] | |
We can reset the function to the default: | |
>>> np.set_string_function(None) | |
>>> a | |
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) | |
`repr` affects either pretty printing or normal string representation. | |
Note that ``__repr__`` is still affected by setting ``__str__`` | |
because the width of each array element in the returned string becomes | |
equal to the length of the result of ``__str__()``. | |
>>> x = np.arange(4) | |
>>> np.set_string_function(lambda x:'random', repr=False) | |
>>> x.__str__() | |
'random' | |
>>> x.__repr__() | |
'array([ 0, 1, 2, 3])' | |
""" | |
if f is None: | |
if repr: | |
return multiarray.set_string_function(_default_array_repr, 1) | |
else: | |
return multiarray.set_string_function(_default_array_str, 0) | |
else: | |
return multiarray.set_string_function(f, repr) | |
set_string_function(_default_array_str, 0) | |
set_string_function(_default_array_repr, 1) |
"""Simple script to compute the api hash of the current API. | |
The API has is defined by numpy_api_order and ufunc_api_order. | |
""" | |
from __future__ import division, absolute_import, print_function | |
from os.path import dirname | |
from code_generators.genapi import fullapi_hash | |
from code_generators.numpy_api import full_api | |
if __name__ == '__main__': | |
curdir = dirname(__file__) | |
print(fullapi_hash(full_api)) |
""" | |
This module contains a set of functions for vectorized string | |
operations and methods. | |
.. note:: | |
The `chararray` class exists for backwards compatibility with | |
Numarray, it is not recommended for new development. Starting from numpy | |
1.4, if one needs arrays of strings, it is recommended to use arrays of | |
`dtype` `object_`, `string_` or `unicode_`, and use the free functions | |
in the `numpy.char` module for fast vectorized string operations. | |
Some methods will only be available if the corresponding string method is | |
available in your version of Python. | |
The preferred alias for `defchararray` is `numpy.char`. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import functools | |
import sys | |
from .numerictypes import string_, unicode_, integer, object_, bool_, character | |
from .numeric import ndarray, compare_chararrays | |
from .numeric import array as narray | |
from numpy.core.multiarray import _vec_string | |
from numpy.core.overrides import set_module | |
from numpy.core import overrides | |
from numpy.compat import asbytes, long | |
import numpy | |
__all__ = [ | |
'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal', | |
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', | |
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', | |
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', | |
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition', | |
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit', | |
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase', | |
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal', | |
'array', 'asarray' | |
] | |
_globalvar = 0 | |
if sys.version_info[0] >= 3: | |
_unicode = str | |
_bytes = bytes | |
else: | |
_unicode = unicode | |
_bytes = str | |
_len = len | |
array_function_dispatch = functools.partial( | |
overrides.array_function_dispatch, module='numpy.char') | |
def _use_unicode(*args): | |
""" | |
Helper function for determining the output type of some string | |
operations. | |
For an operation on two ndarrays, if at least one is unicode, the | |
result should be unicode. | |
""" | |
for x in args: | |
if (isinstance(x, _unicode) or | |
issubclass(numpy.asarray(x).dtype.type, unicode_)): | |
return unicode_ | |
return string_ | |
def _to_string_or_unicode_array(result): | |
""" | |
Helper function to cast a result back into a string or unicode array | |
if an object array must be used as an intermediary. | |
""" | |
return numpy.asarray(result.tolist()) | |
def _clean_args(*args): | |
""" | |
Helper function for delegating arguments to Python string | |
functions. | |
Many of the Python string operations that have optional arguments | |
do not use 'None' to indicate a default value. In these cases, | |
we need to remove all `None` arguments, and those following them. | |
""" | |
newargs = [] | |
for chk in args: | |
if chk is None: | |
break | |
newargs.append(chk) | |
return newargs | |
def _get_num_chars(a): | |
""" | |
Helper function that returns the number of characters per field in | |
a string or unicode array. This is to abstract out the fact that | |
for a unicode array this is itemsize / 4. | |
""" | |
if issubclass(a.dtype.type, unicode_): | |
return a.itemsize // 4 | |
return a.itemsize | |
def _binary_op_dispatcher(x1, x2): | |
return (x1, x2) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def equal(x1, x2): | |
""" | |
Return (x1 == x2) element-wise. | |
Unlike `numpy.equal`, this comparison is performed by first | |
stripping whitespace characters from the end of the string. This | |
behavior is provided for backward-compatibility with numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
not_equal, greater_equal, less_equal, greater, less | |
""" | |
return compare_chararrays(x1, x2, '==', True) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def not_equal(x1, x2): | |
""" | |
Return (x1 != x2) element-wise. | |
Unlike `numpy.not_equal`, this comparison is performed by first | |
stripping whitespace characters from the end of the string. This | |
behavior is provided for backward-compatibility with numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
equal, greater_equal, less_equal, greater, less | |
""" | |
return compare_chararrays(x1, x2, '!=', True) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def greater_equal(x1, x2): | |
""" | |
Return (x1 >= x2) element-wise. | |
Unlike `numpy.greater_equal`, this comparison is performed by | |
first stripping whitespace characters from the end of the string. | |
This behavior is provided for backward-compatibility with | |
numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
equal, not_equal, less_equal, greater, less | |
""" | |
return compare_chararrays(x1, x2, '>=', True) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def less_equal(x1, x2): | |
""" | |
Return (x1 <= x2) element-wise. | |
Unlike `numpy.less_equal`, this comparison is performed by first | |
stripping whitespace characters from the end of the string. This | |
behavior is provided for backward-compatibility with numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
equal, not_equal, greater_equal, greater, less | |
""" | |
return compare_chararrays(x1, x2, '<=', True) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def greater(x1, x2): | |
""" | |
Return (x1 > x2) element-wise. | |
Unlike `numpy.greater`, this comparison is performed by first | |
stripping whitespace characters from the end of the string. This | |
behavior is provided for backward-compatibility with numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
equal, not_equal, greater_equal, less_equal, less | |
""" | |
return compare_chararrays(x1, x2, '>', True) | |
@array_function_dispatch(_binary_op_dispatcher) | |
def less(x1, x2): | |
""" | |
Return (x1 < x2) element-wise. | |
Unlike `numpy.greater`, this comparison is performed by first | |
stripping whitespace characters from the end of the string. This | |
behavior is provided for backward-compatibility with numarray. | |
Parameters | |
---------- | |
x1, x2 : array_like of str or unicode | |
Input arrays of the same shape. | |
Returns | |
------- | |
out : ndarray or bool | |
Output array of bools, or a single bool if x1 and x2 are scalars. | |
See Also | |
-------- | |
equal, not_equal, greater_equal, less_equal, greater | |
""" | |
return compare_chararrays(x1, x2, '<', True) | |
def _unary_op_dispatcher(a): | |
return (a,) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def str_len(a): | |
""" | |
Return len(a) element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of integers | |
See also | |
-------- | |
__builtin__.len | |
""" | |
return _vec_string(a, integer, '__len__') | |
@array_function_dispatch(_binary_op_dispatcher) | |
def add(x1, x2): | |
""" | |
Return element-wise string concatenation for two arrays of str or unicode. | |
Arrays `x1` and `x2` must have the same shape. | |
Parameters | |
---------- | |
x1 : array_like of str or unicode | |
Input array. | |
x2 : array_like of str or unicode | |
Input array. | |
Returns | |
------- | |
add : ndarray | |
Output array of `string_` or `unicode_`, depending on input types | |
of the same shape as `x1` and `x2`. | |
""" | |
arr1 = numpy.asarray(x1) | |
arr2 = numpy.asarray(x2) | |
out_size = _get_num_chars(arr1) + _get_num_chars(arr2) | |
dtype = _use_unicode(arr1, arr2) | |
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,)) | |
def _multiply_dispatcher(a, i): | |
return (a,) | |
@array_function_dispatch(_multiply_dispatcher) | |
def multiply(a, i): | |
""" | |
Return (a * i), that is string multiple concatenation, | |
element-wise. | |
Values in `i` of less than 0 are treated as 0 (which yields an | |
empty string). | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
i : array_like of ints | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input types | |
""" | |
a_arr = numpy.asarray(a) | |
i_arr = numpy.asarray(i) | |
if not issubclass(i_arr.dtype.type, integer): | |
raise ValueError("Can only multiply by integers") | |
out_size = _get_num_chars(a_arr) * max(long(i_arr.max()), 0) | |
return _vec_string( | |
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,)) | |
def _mod_dispatcher(a, values): | |
return (a, values) | |
@array_function_dispatch(_mod_dispatcher) | |
def mod(a, values): | |
""" | |
Return (a % i), that is pre-Python 2.6 string formatting | |
(iterpolation), element-wise for a pair of array_likes of str | |
or unicode. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
values : array_like of values | |
These values will be element-wise interpolated into the string. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input types | |
See also | |
-------- | |
str.__mod__ | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, '__mod__', (values,))) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def capitalize(a): | |
""" | |
Return a copy of `a` with only the first character of each element | |
capitalized. | |
Calls `str.capitalize` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Input array of strings to capitalize. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input | |
types | |
See also | |
-------- | |
str.capitalize | |
Examples | |
-------- | |
>>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c | |
array(['a1b2', '1b2a', 'b2a1', '2a1b'], | |
dtype='|S4') | |
>>> np.char.capitalize(c) | |
array(['A1b2', '1b2a', 'B2a1', '2a1b'], | |
dtype='|S4') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'capitalize') | |
def _center_dispatcher(a, width, fillchar=None): | |
return (a,) | |
@array_function_dispatch(_center_dispatcher) | |
def center(a, width, fillchar=' '): | |
""" | |
Return a copy of `a` with its elements centered in a string of | |
length `width`. | |
Calls `str.center` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
width : int | |
The length of the resulting strings | |
fillchar : str or unicode, optional | |
The padding character to use (default is space). | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input | |
types | |
See also | |
-------- | |
str.center | |
""" | |
a_arr = numpy.asarray(a) | |
width_arr = numpy.asarray(width) | |
size = long(numpy.max(width_arr.flat)) | |
if numpy.issubdtype(a_arr.dtype, numpy.string_): | |
fillchar = asbytes(fillchar) | |
return _vec_string( | |
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar)) | |
def _count_dispatcher(a, sub, start=None, end=None): | |
return (a,) | |
@array_function_dispatch(_count_dispatcher) | |
def count(a, sub, start=0, end=None): | |
""" | |
Returns an array with the number of non-overlapping occurrences of | |
substring `sub` in the range [`start`, `end`]. | |
Calls `str.count` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
sub : str or unicode | |
The substring to search for. | |
start, end : int, optional | |
Optional arguments `start` and `end` are interpreted as slice | |
notation to specify the range in which to count. | |
Returns | |
------- | |
out : ndarray | |
Output array of ints. | |
See also | |
-------- | |
str.count | |
Examples | |
-------- | |
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) | |
>>> c | |
array(['aAaAaA', ' aA ', 'abBABba'], | |
dtype='|S7') | |
>>> np.char.count(c, 'A') | |
array([3, 1, 1]) | |
>>> np.char.count(c, 'aA') | |
array([3, 1, 0]) | |
>>> np.char.count(c, 'A', start=1, end=4) | |
array([2, 1, 1]) | |
>>> np.char.count(c, 'A', start=1, end=3) | |
array([1, 0, 0]) | |
""" | |
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end)) | |
def _code_dispatcher(a, encoding=None, errors=None): | |
return (a,) | |
@array_function_dispatch(_code_dispatcher) | |
def decode(a, encoding=None, errors=None): | |
""" | |
Calls `str.decode` element-wise. | |
The set of available codecs comes from the Python standard library, | |
and may be extended at runtime. For more information, see the | |
:mod:`codecs` module. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
encoding : str, optional | |
The name of an encoding | |
errors : str, optional | |
Specifies how to handle encoding errors | |
Returns | |
------- | |
out : ndarray | |
See also | |
-------- | |
str.decode | |
Notes | |
----- | |
The type of the result will depend on the encoding specified. | |
Examples | |
-------- | |
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) | |
>>> c | |
array(['aAaAaA', ' aA ', 'abBABba'], | |
dtype='|S7') | |
>>> np.char.encode(c, encoding='cp037') | |
array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', | |
'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], | |
dtype='|S7') | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, 'decode', _clean_args(encoding, errors))) | |
@array_function_dispatch(_code_dispatcher) | |
def encode(a, encoding=None, errors=None): | |
""" | |
Calls `str.encode` element-wise. | |
The set of available codecs comes from the Python standard library, | |
and may be extended at runtime. For more information, see the codecs | |
module. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
encoding : str, optional | |
The name of an encoding | |
errors : str, optional | |
Specifies how to handle encoding errors | |
Returns | |
------- | |
out : ndarray | |
See also | |
-------- | |
str.encode | |
Notes | |
----- | |
The type of the result will depend on the encoding specified. | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, 'encode', _clean_args(encoding, errors))) | |
def _endswith_dispatcher(a, suffix, start=None, end=None): | |
return (a,) | |
@array_function_dispatch(_endswith_dispatcher) | |
def endswith(a, suffix, start=0, end=None): | |
""" | |
Returns a boolean array which is `True` where the string element | |
in `a` ends with `suffix`, otherwise `False`. | |
Calls `str.endswith` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
suffix : str | |
start, end : int, optional | |
With optional `start`, test beginning at that position. With | |
optional `end`, stop comparing at that position. | |
Returns | |
------- | |
out : ndarray | |
Outputs an array of bools. | |
See also | |
-------- | |
str.endswith | |
Examples | |
-------- | |
>>> s = np.array(['foo', 'bar']) | |
>>> s[0] = 'foo' | |
>>> s[1] = 'bar' | |
>>> s | |
array(['foo', 'bar'], | |
dtype='|S3') | |
>>> np.char.endswith(s, 'ar') | |
array([False, True]) | |
>>> np.char.endswith(s, 'a', start=1, end=2) | |
array([False, True]) | |
""" | |
return _vec_string( | |
a, bool_, 'endswith', [suffix, start] + _clean_args(end)) | |
def _expandtabs_dispatcher(a, tabsize=None): | |
return (a,) | |
@array_function_dispatch(_expandtabs_dispatcher) | |
def expandtabs(a, tabsize=8): | |
""" | |
Return a copy of each string element where all tab characters are | |
replaced by one or more spaces. | |
Calls `str.expandtabs` element-wise. | |
Return a copy of each string element where all tab characters are | |
replaced by one or more spaces, depending on the current column | |
and the given `tabsize`. The column number is reset to zero after | |
each newline occurring in the string. This doesn't understand other | |
non-printing characters or escape sequences. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Input array | |
tabsize : int, optional | |
Replace tabs with `tabsize` number of spaces. If not given defaults | |
to 8 spaces. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.expandtabs | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, 'expandtabs', (tabsize,))) | |
@array_function_dispatch(_count_dispatcher) | |
def find(a, sub, start=0, end=None): | |
""" | |
For each element, return the lowest index in the string where | |
substring `sub` is found. | |
Calls `str.find` element-wise. | |
For each element, return the lowest index in the string where | |
substring `sub` is found, such that `sub` is contained in the | |
range [`start`, `end`]. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
sub : str or unicode | |
start, end : int, optional | |
Optional arguments `start` and `end` are interpreted as in | |
slice notation. | |
Returns | |
------- | |
out : ndarray or int | |
Output array of ints. Returns -1 if `sub` is not found. | |
See also | |
-------- | |
str.find | |
""" | |
return _vec_string( | |
a, integer, 'find', [sub, start] + _clean_args(end)) | |
@array_function_dispatch(_count_dispatcher) | |
def index(a, sub, start=0, end=None): | |
""" | |
Like `find`, but raises `ValueError` when the substring is not found. | |
Calls `str.index` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
sub : str or unicode | |
start, end : int, optional | |
Returns | |
------- | |
out : ndarray | |
Output array of ints. Returns -1 if `sub` is not found. | |
See also | |
-------- | |
find, str.find | |
""" | |
return _vec_string( | |
a, integer, 'index', [sub, start] + _clean_args(end)) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isalnum(a): | |
""" | |
Returns true for each element if all characters in the string are | |
alphanumeric and there is at least one character, false otherwise. | |
Calls `str.isalnum` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.isalnum | |
""" | |
return _vec_string(a, bool_, 'isalnum') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isalpha(a): | |
""" | |
Returns true for each element if all characters in the string are | |
alphabetic and there is at least one character, false otherwise. | |
Calls `str.isalpha` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.isalpha | |
""" | |
return _vec_string(a, bool_, 'isalpha') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isdigit(a): | |
""" | |
Returns true for each element if all characters in the string are | |
digits and there is at least one character, false otherwise. | |
Calls `str.isdigit` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.isdigit | |
""" | |
return _vec_string(a, bool_, 'isdigit') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def islower(a): | |
""" | |
Returns true for each element if all cased characters in the | |
string are lowercase and there is at least one cased character, | |
false otherwise. | |
Calls `str.islower` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.islower | |
""" | |
return _vec_string(a, bool_, 'islower') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isspace(a): | |
""" | |
Returns true for each element if there are only whitespace | |
characters in the string and there is at least one character, | |
false otherwise. | |
Calls `str.isspace` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.isspace | |
""" | |
return _vec_string(a, bool_, 'isspace') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def istitle(a): | |
""" | |
Returns true for each element if the element is a titlecased | |
string and there is at least one character, false otherwise. | |
Call `str.istitle` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.istitle | |
""" | |
return _vec_string(a, bool_, 'istitle') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isupper(a): | |
""" | |
Returns true for each element if all cased characters in the | |
string are uppercase and there is at least one character, false | |
otherwise. | |
Call `str.isupper` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of bools | |
See also | |
-------- | |
str.isupper | |
""" | |
return _vec_string(a, bool_, 'isupper') | |
def _join_dispatcher(sep, seq): | |
return (sep, seq) | |
@array_function_dispatch(_join_dispatcher) | |
def join(sep, seq): | |
""" | |
Return a string which is the concatenation of the strings in the | |
sequence `seq`. | |
Calls `str.join` element-wise. | |
Parameters | |
---------- | |
sep : array_like of str or unicode | |
seq : array_like of str or unicode | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input types | |
See also | |
-------- | |
str.join | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(sep, object_, 'join', (seq,))) | |
def _just_dispatcher(a, width, fillchar=None): | |
return (a,) | |
@array_function_dispatch(_just_dispatcher) | |
def ljust(a, width, fillchar=' '): | |
""" | |
Return an array with the elements of `a` left-justified in a | |
string of length `width`. | |
Calls `str.ljust` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
width : int | |
The length of the resulting strings | |
fillchar : str or unicode, optional | |
The character to use for padding | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.ljust | |
""" | |
a_arr = numpy.asarray(a) | |
width_arr = numpy.asarray(width) | |
size = long(numpy.max(width_arr.flat)) | |
if numpy.issubdtype(a_arr.dtype, numpy.string_): | |
fillchar = asbytes(fillchar) | |
return _vec_string( | |
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar)) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def lower(a): | |
""" | |
Return an array with the elements converted to lowercase. | |
Call `str.lower` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.lower | |
Examples | |
-------- | |
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c | |
array(['A1B C', '1BCA', 'BCA1'], | |
dtype='|S5') | |
>>> np.char.lower(c) | |
array(['a1b c', '1bca', 'bca1'], | |
dtype='|S5') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'lower') | |
def _strip_dispatcher(a, chars=None): | |
return (a,) | |
@array_function_dispatch(_strip_dispatcher) | |
def lstrip(a, chars=None): | |
""" | |
For each element in `a`, return a copy with the leading characters | |
removed. | |
Calls `str.lstrip` element-wise. | |
Parameters | |
---------- | |
a : array-like, {str, unicode} | |
Input array. | |
chars : {str, unicode}, optional | |
The `chars` argument is a string specifying the set of | |
characters to be removed. If omitted or None, the `chars` | |
argument defaults to removing whitespace. The `chars` argument | |
is not a prefix; rather, all combinations of its values are | |
stripped. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.lstrip | |
Examples | |
-------- | |
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) | |
>>> c | |
array(['aAaAaA', ' aA ', 'abBABba'], | |
dtype='|S7') | |
The 'a' variable is unstripped from c[1] because whitespace leading. | |
>>> np.char.lstrip(c, 'a') | |
array(['AaAaA', ' aA ', 'bBABba'], | |
dtype='|S7') | |
>>> np.char.lstrip(c, 'A') # leaves c unchanged | |
array(['aAaAaA', ' aA ', 'abBABba'], | |
dtype='|S7') | |
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() | |
... # XXX: is this a regression? this line now returns False | |
... # np.char.lstrip(c,'') does not modify c at all. | |
True | |
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() | |
True | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,)) | |
def _partition_dispatcher(a, sep): | |
return (a,) | |
@array_function_dispatch(_partition_dispatcher) | |
def partition(a, sep): | |
""" | |
Partition each element in `a` around `sep`. | |
Calls `str.partition` element-wise. | |
For each element in `a`, split the element as the first | |
occurrence of `sep`, and return 3 strings containing the part | |
before the separator, the separator itself, and the part after | |
the separator. If the separator is not found, return 3 strings | |
containing the string itself, followed by two empty strings. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array | |
sep : {str, unicode} | |
Separator to split each string element in `a`. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type. | |
The output array will have an extra dimension with 3 | |
elements per input element. | |
See also | |
-------- | |
str.partition | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, 'partition', (sep,))) | |
def _replace_dispatcher(a, old, new, count=None): | |
return (a,) | |
@array_function_dispatch(_replace_dispatcher) | |
def replace(a, old, new, count=None): | |
""" | |
For each element in `a`, return a copy of the string with all | |
occurrences of substring `old` replaced by `new`. | |
Calls `str.replace` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
old, new : str or unicode | |
count : int, optional | |
If the optional argument `count` is given, only the first | |
`count` occurrences are replaced. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.replace | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string( | |
a, object_, 'replace', [old, new] + _clean_args(count))) | |
@array_function_dispatch(_count_dispatcher) | |
def rfind(a, sub, start=0, end=None): | |
""" | |
For each element in `a`, return the highest index in the string | |
where substring `sub` is found, such that `sub` is contained | |
within [`start`, `end`]. | |
Calls `str.rfind` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
sub : str or unicode | |
start, end : int, optional | |
Optional arguments `start` and `end` are interpreted as in | |
slice notation. | |
Returns | |
------- | |
out : ndarray | |
Output array of ints. Return -1 on failure. | |
See also | |
-------- | |
str.rfind | |
""" | |
return _vec_string( | |
a, integer, 'rfind', [sub, start] + _clean_args(end)) | |
@array_function_dispatch(_count_dispatcher) | |
def rindex(a, sub, start=0, end=None): | |
""" | |
Like `rfind`, but raises `ValueError` when the substring `sub` is | |
not found. | |
Calls `str.rindex` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
sub : str or unicode | |
start, end : int, optional | |
Returns | |
------- | |
out : ndarray | |
Output array of ints. | |
See also | |
-------- | |
rfind, str.rindex | |
""" | |
return _vec_string( | |
a, integer, 'rindex', [sub, start] + _clean_args(end)) | |
@array_function_dispatch(_just_dispatcher) | |
def rjust(a, width, fillchar=' '): | |
""" | |
Return an array with the elements of `a` right-justified in a | |
string of length `width`. | |
Calls `str.rjust` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
width : int | |
The length of the resulting strings | |
fillchar : str or unicode, optional | |
The character to use for padding | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.rjust | |
""" | |
a_arr = numpy.asarray(a) | |
width_arr = numpy.asarray(width) | |
size = long(numpy.max(width_arr.flat)) | |
if numpy.issubdtype(a_arr.dtype, numpy.string_): | |
fillchar = asbytes(fillchar) | |
return _vec_string( | |
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar)) | |
@array_function_dispatch(_partition_dispatcher) | |
def rpartition(a, sep): | |
""" | |
Partition (split) each element around the right-most separator. | |
Calls `str.rpartition` element-wise. | |
For each element in `a`, split the element as the last | |
occurrence of `sep`, and return 3 strings containing the part | |
before the separator, the separator itself, and the part after | |
the separator. If the separator is not found, return 3 strings | |
containing the string itself, followed by two empty strings. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
Input array | |
sep : str or unicode | |
Right-most separator to split each element in array. | |
Returns | |
------- | |
out : ndarray | |
Output array of string or unicode, depending on input | |
type. The output array will have an extra dimension with | |
3 elements per input element. | |
See also | |
-------- | |
str.rpartition | |
""" | |
return _to_string_or_unicode_array( | |
_vec_string(a, object_, 'rpartition', (sep,))) | |
def _split_dispatcher(a, sep=None, maxsplit=None): | |
return (a,) | |
@array_function_dispatch(_split_dispatcher) | |
def rsplit(a, sep=None, maxsplit=None): | |
""" | |
For each element in `a`, return a list of the words in the | |
string, using `sep` as the delimiter string. | |
Calls `str.rsplit` element-wise. | |
Except for splitting from the right, `rsplit` | |
behaves like `split`. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
sep : str or unicode, optional | |
If `sep` is not specified or `None`, any whitespace string | |
is a separator. | |
maxsplit : int, optional | |
If `maxsplit` is given, at most `maxsplit` splits are done, | |
the rightmost ones. | |
Returns | |
------- | |
out : ndarray | |
Array of list objects | |
See also | |
-------- | |
str.rsplit, split | |
""" | |
# This will return an array of lists of different sizes, so we | |
# leave it as an object array | |
return _vec_string( | |
a, object_, 'rsplit', [sep] + _clean_args(maxsplit)) | |
def _strip_dispatcher(a, chars=None): | |
return (a,) | |
@array_function_dispatch(_strip_dispatcher) | |
def rstrip(a, chars=None): | |
""" | |
For each element in `a`, return a copy with the trailing | |
characters removed. | |
Calls `str.rstrip` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
chars : str or unicode, optional | |
The `chars` argument is a string specifying the set of | |
characters to be removed. If omitted or None, the `chars` | |
argument defaults to removing whitespace. The `chars` argument | |
is not a suffix; rather, all combinations of its values are | |
stripped. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.rstrip | |
Examples | |
-------- | |
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c | |
array(['aAaAaA', 'abBABba'], | |
dtype='|S7') | |
>>> np.char.rstrip(c, 'a') | |
array(['aAaAaA', 'abBABb'], | |
dtype='|S7') | |
>>> np.char.rstrip(c, 'A') | |
array(['aAaAa', 'abBABba'], | |
dtype='|S7') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,)) | |
@array_function_dispatch(_split_dispatcher) | |
def split(a, sep=None, maxsplit=None): | |
""" | |
For each element in `a`, return a list of the words in the | |
string, using `sep` as the delimiter string. | |
Calls `str.split` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
sep : str or unicode, optional | |
If `sep` is not specified or `None`, any whitespace string is a | |
separator. | |
maxsplit : int, optional | |
If `maxsplit` is given, at most `maxsplit` splits are done. | |
Returns | |
------- | |
out : ndarray | |
Array of list objects | |
See also | |
-------- | |
str.split, rsplit | |
""" | |
# This will return an array of lists of different sizes, so we | |
# leave it as an object array | |
return _vec_string( | |
a, object_, 'split', [sep] + _clean_args(maxsplit)) | |
def _splitlines_dispatcher(a, keepends=None): | |
return (a,) | |
@array_function_dispatch(_splitlines_dispatcher) | |
def splitlines(a, keepends=None): | |
""" | |
For each element in `a`, return a list of the lines in the | |
element, breaking at line boundaries. | |
Calls `str.splitlines` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
keepends : bool, optional | |
Line breaks are not included in the resulting list unless | |
keepends is given and true. | |
Returns | |
------- | |
out : ndarray | |
Array of list objects | |
See also | |
-------- | |
str.splitlines | |
""" | |
return _vec_string( | |
a, object_, 'splitlines', _clean_args(keepends)) | |
def _startswith_dispatcher(a, prefix, start=None, end=None): | |
return (a,) | |
@array_function_dispatch(_startswith_dispatcher) | |
def startswith(a, prefix, start=0, end=None): | |
""" | |
Returns a boolean array which is `True` where the string element | |
in `a` starts with `prefix`, otherwise `False`. | |
Calls `str.startswith` element-wise. | |
Parameters | |
---------- | |
a : array_like of str or unicode | |
prefix : str | |
start, end : int, optional | |
With optional `start`, test beginning at that position. With | |
optional `end`, stop comparing at that position. | |
Returns | |
------- | |
out : ndarray | |
Array of booleans | |
See also | |
-------- | |
str.startswith | |
""" | |
return _vec_string( | |
a, bool_, 'startswith', [prefix, start] + _clean_args(end)) | |
@array_function_dispatch(_strip_dispatcher) | |
def strip(a, chars=None): | |
""" | |
For each element in `a`, return a copy with the leading and | |
trailing characters removed. | |
Calls `str.strip` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
chars : str or unicode, optional | |
The `chars` argument is a string specifying the set of | |
characters to be removed. If omitted or None, the `chars` | |
argument defaults to removing whitespace. The `chars` argument | |
is not a prefix or suffix; rather, all combinations of its | |
values are stripped. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.strip | |
Examples | |
-------- | |
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) | |
>>> c | |
array(['aAaAaA', ' aA ', 'abBABba'], | |
dtype='|S7') | |
>>> np.char.strip(c) | |
array(['aAaAaA', 'aA', 'abBABba'], | |
dtype='|S7') | |
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads | |
array(['AaAaA', ' aA ', 'bBABb'], | |
dtype='|S7') | |
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails | |
array(['aAaAa', ' aA ', 'abBABba'], | |
dtype='|S7') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars)) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def swapcase(a): | |
""" | |
Return element-wise a copy of the string with | |
uppercase characters converted to lowercase and vice versa. | |
Calls `str.swapcase` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.swapcase | |
Examples | |
-------- | |
>>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c | |
array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'], | |
dtype='|S5') | |
>>> np.char.swapcase(c) | |
array(['A1b C', '1B cA', 'B cA1', 'Ca1B'], | |
dtype='|S5') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'swapcase') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def title(a): | |
""" | |
Return element-wise title cased version of string or unicode. | |
Title case words start with uppercase characters, all remaining cased | |
characters are lowercase. | |
Calls `str.title` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array. | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.title | |
Examples | |
-------- | |
>>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c | |
array(['a1b c', '1b ca', 'b ca1', 'ca1b'], | |
dtype='|S5') | |
>>> np.char.title(c) | |
array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'], | |
dtype='|S5') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'title') | |
def _translate_dispatcher(a, table, deletechars=None): | |
return (a,) | |
@array_function_dispatch(_translate_dispatcher) | |
def translate(a, table, deletechars=None): | |
""" | |
For each element in `a`, return a copy of the string where all | |
characters occurring in the optional argument `deletechars` are | |
removed, and the remaining characters have been mapped through the | |
given translation table. | |
Calls `str.translate` element-wise. | |
Parameters | |
---------- | |
a : array-like of str or unicode | |
table : str of length 256 | |
deletechars : str | |
Returns | |
------- | |
out : ndarray | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.translate | |
""" | |
a_arr = numpy.asarray(a) | |
if issubclass(a_arr.dtype.type, unicode_): | |
return _vec_string( | |
a_arr, a_arr.dtype, 'translate', (table,)) | |
else: | |
return _vec_string( | |
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars)) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def upper(a): | |
""" | |
Return an array with the elements converted to uppercase. | |
Calls `str.upper` element-wise. | |
For 8-bit strings, this method is locale-dependent. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.upper | |
Examples | |
-------- | |
>>> c = np.array(['a1b c', '1bca', 'bca1']); c | |
array(['a1b c', '1bca', 'bca1'], | |
dtype='|S5') | |
>>> np.char.upper(c) | |
array(['A1B C', '1BCA', 'BCA1'], | |
dtype='|S5') | |
""" | |
a_arr = numpy.asarray(a) | |
return _vec_string(a_arr, a_arr.dtype, 'upper') | |
def _zfill_dispatcher(a, width): | |
return (a,) | |
@array_function_dispatch(_zfill_dispatcher) | |
def zfill(a, width): | |
""" | |
Return the numeric string left-filled with zeros | |
Calls `str.zfill` element-wise. | |
Parameters | |
---------- | |
a : array_like, {str, unicode} | |
Input array. | |
width : int | |
Width of string to left-fill elements in `a`. | |
Returns | |
------- | |
out : ndarray, {str, unicode} | |
Output array of str or unicode, depending on input type | |
See also | |
-------- | |
str.zfill | |
""" | |
a_arr = numpy.asarray(a) | |
width_arr = numpy.asarray(width) | |
size = long(numpy.max(width_arr.flat)) | |
return _vec_string( | |
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,)) | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isnumeric(a): | |
""" | |
For each element, return True if there are only numeric | |
characters in the element. | |
Calls `unicode.isnumeric` element-wise. | |
Numeric characters include digit characters, and all characters | |
that have the Unicode numeric value property, e.g. ``U+2155, | |
VULGAR FRACTION ONE FIFTH``. | |
Parameters | |
---------- | |
a : array_like, unicode | |
Input array. | |
Returns | |
------- | |
out : ndarray, bool | |
Array of booleans of same shape as `a`. | |
See also | |
-------- | |
unicode.isnumeric | |
""" | |
if _use_unicode(a) != unicode_: | |
raise TypeError("isnumeric is only available for Unicode strings and arrays") | |
return _vec_string(a, bool_, 'isnumeric') | |
@array_function_dispatch(_unary_op_dispatcher) | |
def isdecimal(a): | |
""" | |
For each element, return True if there are only decimal | |
characters in the element. | |
Calls `unicode.isdecimal` element-wise. | |
Decimal characters include digit characters, and all characters | |
that that can be used to form decimal-radix numbers, | |
e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``. | |
Parameters | |
---------- | |
a : array_like, unicode | |
Input array. | |
Returns | |
------- | |
out : ndarray, bool | |
Array of booleans identical in shape to `a`. | |
See also | |
-------- | |
unicode.isdecimal | |
""" | |
if _use_unicode(a) != unicode_: | |
raise TypeError("isnumeric is only available for Unicode strings and arrays") | |
return _vec_string(a, bool_, 'isdecimal') | |
@set_module('numpy') | |
class chararray(ndarray): | |
""" | |
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0, | |
strides=None, order=None) | |
Provides a convenient view on arrays of string and unicode values. | |
.. note:: | |
The `chararray` class exists for backwards compatibility with | |
Numarray, it is not recommended for new development. Starting from numpy | |
1.4, if one needs arrays of strings, it is recommended to use arrays of | |
`dtype` `object_`, `string_` or `unicode_`, and use the free functions | |
in the `numpy.char` module for fast vectorized string operations. | |
Versus a regular NumPy array of type `str` or `unicode`, this | |
class adds the following functionality: | |
1) values automatically have whitespace removed from the end | |
when indexed | |
2) comparison operators automatically remove whitespace from the | |
end when comparing values | |
3) vectorized string operations are provided as methods | |
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``) | |
chararrays should be created using `numpy.char.array` or | |
`numpy.char.asarray`, rather than this constructor directly. | |
This constructor creates the array, using `buffer` (with `offset` | |
and `strides`) if it is not ``None``. If `buffer` is ``None``, then | |
constructs a new array with `strides` in "C order", unless both | |
``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides` | |
is in "Fortran order". | |
Methods | |
------- | |
astype | |
argsort | |
copy | |
count | |
decode | |
dump | |
dumps | |
encode | |
endswith | |
expandtabs | |
fill | |
find | |
flatten | |
getfield | |
index | |
isalnum | |
isalpha | |
isdecimal | |
isdigit | |
islower | |
isnumeric | |
isspace | |
istitle | |
isupper | |
item | |
join | |
ljust | |
lower | |
lstrip | |
nonzero | |
put | |
ravel | |
repeat | |
replace | |
reshape | |
resize | |
rfind | |
rindex | |
rjust | |
rsplit | |
rstrip | |
searchsorted | |
setfield | |
setflags | |
sort | |
split | |
splitlines | |
squeeze | |
startswith | |
strip | |
swapaxes | |
swapcase | |
take | |
title | |
tofile | |
tolist | |
tostring | |
translate | |
transpose | |
upper | |
view | |
zfill | |
Parameters | |
---------- | |
shape : tuple | |
Shape of the array. | |
itemsize : int, optional | |
Length of each array element, in number of characters. Default is 1. | |
unicode : bool, optional | |
Are the array elements of type unicode (True) or string (False). | |
Default is False. | |
buffer : int, optional | |
Memory address of the start of the array data. Default is None, | |
in which case a new array is created. | |
offset : int, optional | |
Fixed stride displacement from the beginning of an axis? | |
Default is 0. Needs to be >=0. | |
strides : array_like of ints, optional | |
Strides for the array (see `ndarray.strides` for full description). | |
Default is None. | |
order : {'C', 'F'}, optional | |
The order in which the array data is stored in memory: 'C' -> | |
"row major" order (the default), 'F' -> "column major" | |
(Fortran) order. | |
Examples | |
-------- | |
>>> charar = np.chararray((3, 3)) | |
>>> charar[:] = 'a' | |
>>> charar | |
chararray([['a', 'a', 'a'], | |
['a', 'a', 'a'], | |
['a', 'a', 'a']], | |
dtype='|S1') | |
>>> charar = np.chararray(charar.shape, itemsize=5) | |
>>> charar[:] = 'abc' | |
>>> charar | |
chararray([['abc', 'abc', 'abc'], | |
['abc', 'abc', 'abc'], | |
['abc', 'abc', 'abc']], | |
dtype='|S5') | |
""" | |
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, | |
offset=0, strides=None, order='C'): | |
global _globalvar | |
if unicode: | |
dtype = unicode_ | |
else: | |
dtype = string_ | |
# force itemsize to be a Python long, since using NumPy integer | |
# types results in itemsize.itemsize being used as the size of | |
# strings in the new array. | |
itemsize = long(itemsize) | |
if sys.version_info[0] >= 3 and isinstance(buffer, _unicode): | |
# On Py3, unicode objects do not have the buffer interface | |
filler = buffer | |
buffer = None | |
else: | |
filler = None | |
_globalvar = 1 | |
if buffer is None: | |
self = ndarray.__new__(subtype, shape, (dtype, itemsize), | |
order=order) | |
else: | |
self = ndarray.__new__(subtype, shape, (dtype, itemsize), | |
buffer=buffer, | |
offset=offset, strides=strides, | |
order=order) | |
if filler is not None: | |
self[...] = filler | |
_globalvar = 0 | |
return self | |
def __array_finalize__(self, obj): | |
# The b is a special case because it is used for reconstructing. | |
if not _globalvar and self.dtype.char not in 'SUbc': | |
raise ValueError("Can only create a chararray from string data.") | |
def __getitem__(self, obj): | |
val = ndarray.__getitem__(self, obj) | |
if isinstance(val, character): | |
temp = val.rstrip() | |
if _len(temp) == 0: | |
val = '' | |
else: | |
val = temp | |
return val | |
# IMPLEMENTATION NOTE: Most of the methods of this class are | |
# direct delegations to the free functions in this module. | |
# However, those that return an array of strings should instead | |
# return a chararray, so some extra wrapping is required. | |
def __eq__(self, other): | |
""" | |
Return (self == other) element-wise. | |
See also | |
-------- | |
equal | |
""" | |
return equal(self, other) | |
def __ne__(self, other): | |
""" | |
Return (self != other) element-wise. | |
See also | |
-------- | |
not_equal | |
""" | |
return not_equal(self, other) | |
def __ge__(self, other): | |
""" | |
Return (self >= other) element-wise. | |
See also | |
-------- | |
greater_equal | |
""" | |
return greater_equal(self, other) | |
def __le__(self, other): | |
""" | |
Return (self <= other) element-wise. | |
See also | |
-------- | |
less_equal | |
""" | |
return less_equal(self, other) | |
def __gt__(self, other): | |
""" | |
Return (self > other) element-wise. | |
See also | |
-------- | |
greater | |
""" | |
return greater(self, other) | |
def __lt__(self, other): | |
""" | |
Return (self < other) element-wise. | |
See also | |
-------- | |
less | |
""" | |
return less(self, other) | |
def __add__(self, other): | |
""" | |
Return (self + other), that is string concatenation, | |
element-wise for a pair of array_likes of str or unicode. | |
See also | |
-------- | |
add | |
""" | |
return asarray(add(self, other)) | |
def __radd__(self, other): | |
""" | |
Return (other + self), that is string concatenation, | |
element-wise for a pair of array_likes of `string_` or `unicode_`. | |
See also | |
-------- | |
add | |
""" | |
return asarray(add(numpy.asarray(other), self)) | |
def __mul__(self, i): | |
""" | |
Return (self * i), that is string multiple concatenation, | |
element-wise. | |
See also | |
-------- | |
multiply | |
""" | |
return asarray(multiply(self, i)) | |
def __rmul__(self, i): | |
""" | |
Return (self * i), that is string multiple concatenation, | |
element-wise. | |
See also | |
-------- | |
multiply | |
""" | |
return asarray(multiply(self, i)) | |
def __mod__(self, i): | |
""" | |
Return (self % i), that is pre-Python 2.6 string formatting | |
(iterpolation), element-wise for a pair of array_likes of `string_` | |
or `unicode_`. | |
See also | |
-------- | |
mod | |
""" | |
return asarray(mod(self, i)) | |
def __rmod__(self, other): | |
return NotImplemented | |
def argsort(self, axis=-1, kind='quicksort', order=None): | |
""" | |
Return the indices that sort the array lexicographically. | |
For full documentation see `numpy.argsort`, for which this method is | |
in fact merely a "thin wrapper." | |
Examples | |
-------- | |
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5') | |
>>> c = c.view(np.chararray); c | |
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'], | |
dtype='|S5') | |
>>> c[c.argsort()] | |
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'], | |
dtype='|S5') | |
""" | |
return self.__array__().argsort(axis, kind, order) | |
argsort.__doc__ = ndarray.argsort.__doc__ | |
def capitalize(self): | |
""" | |
Return a copy of `self` with only the first character of each element | |
capitalized. | |
See also | |
-------- | |
char.capitalize | |
""" | |
return asarray(capitalize(self)) | |
def center(self, width, fillchar=' '): | |
""" | |
Return a copy of `self` with its elements centered in a | |
string of length `width`. | |
See also | |
-------- | |
center | |
""" | |
return asarray(center(self, width, fillchar)) | |
def count(self, sub, start=0, end=None): | |
""" | |
Returns an array with the number of non-overlapping occurrences of | |
substring `sub` in the range [`start`, `end`]. | |
See also | |
-------- | |
char.count | |
""" | |
return count(self, sub, start, end) | |
def decode(self, encoding=None, errors=None): | |
""" | |
Calls `str.decode` element-wise. | |
See also | |
-------- | |
char.decode | |
""" | |
return decode(self, encoding, errors) | |
def encode(self, encoding=None, errors=None): | |
""" | |
Calls `str.encode` element-wise. | |
See also | |
-------- | |
char.encode | |
""" | |
return encode(self, encoding, errors) | |
def endswith(self, suffix, start=0, end=None): | |
""" | |
Returns a boolean array which is `True` where the string element | |
in `self` ends with `suffix`, otherwise `False`. | |
See also | |
-------- | |
char.endswith | |
""" | |
return endswith(self, suffix, start, end) | |
def expandtabs(self, tabsize=8): | |
""" | |
Return a copy of each string element where all tab characters are | |
replaced by one or more spaces. | |
See also | |
-------- | |
char.expandtabs | |
""" | |
return asarray(expandtabs(self, tabsize)) | |
def find(self, sub, start=0, end=None): | |
""" | |
For each element, return the lowest index in the string where | |
substring `sub` is found. | |
See also | |
-------- | |
char.find | |
""" | |
return find(self, sub, start, end) | |
def index(self, sub, start=0, end=None): | |
""" | |
Like `find`, but raises `ValueError` when the substring is not found. | |
See also | |
-------- | |
char.index | |
""" | |
return index(self, sub, start, end) | |
def isalnum(self): | |
""" | |
Returns true for each element if all characters in the string | |
are alphanumeric and there is at least one character, false | |
otherwise. | |
See also | |
-------- | |
char.isalnum | |
""" | |
return isalnum(self) | |
def isalpha(self): | |
""" | |
Returns true for each element if all characters in the string | |
are alphabetic and there is at least one character, false | |
otherwise. | |
See also | |
-------- | |
char.isalpha | |
""" | |
return isalpha(self) | |
def isdigit(self): | |
""" | |
Returns true for each element if all characters in the string are | |
digits and there is at least one character, false otherwise. | |
See also | |
-------- | |
char.isdigit | |
""" | |
return isdigit(self) | |
def islower(self): | |
""" | |
Returns true for each element if all cased characters in the | |
string are lowercase and there is at least one cased character, | |
false otherwise. | |
See also | |
-------- | |
char.islower | |
""" | |
return islower(self) | |
def isspace(self): | |
""" | |
Returns true for each element if there are only whitespace | |
characters in the string and there is at least one character, | |
false otherwise. | |
See also | |
-------- | |
char.isspace | |
""" | |
return isspace(self) | |
def istitle(self): | |
""" | |
Returns true for each element if the element is a titlecased | |
string and there is at least one character, false otherwise. | |
See also | |
-------- | |
char.istitle | |
""" | |
return istitle(self) | |
def isupper(self): | |
""" | |
Returns true for each element if all cased characters in the | |
string are uppercase and there is at least one character, false | |
otherwise. | |
See also | |
-------- | |
char.isupper | |
""" | |
return isupper(self) | |
def join(self, seq): | |
""" | |
Return a string which is the concatenation of the strings in the | |
sequence `seq`. | |
See also | |
-------- | |
char.join | |
""" | |
return join(self, seq) | |
def ljust(self, width, fillchar=' '): | |
""" | |
Return an array with the elements of `self` left-justified in a | |
string of length `width`. | |
See also | |
-------- | |
char.ljust | |
""" | |
return asarray(ljust(self, width, fillchar)) | |
def lower(self): | |
""" | |
Return an array with the elements of `self` converted to | |
lowercase. | |
See also | |
-------- | |
char.lower | |
""" | |
return asarray(lower(self)) | |
def lstrip(self, chars=None): | |
""" | |
For each element in `self`, return a copy with the leading characters | |
removed. | |
See also | |
-------- | |
char.lstrip | |
""" | |
return asarray(lstrip(self, chars)) | |
def partition(self, sep): | |
""" | |
Partition each element in `self` around `sep`. | |
See also | |
-------- | |
partition | |
""" | |
return asarray(partition(self, sep)) | |
def replace(self, old, new, count=None): | |
""" | |
For each element in `self`, return a copy of the string with all | |
occurrences of substring `old` replaced by `new`. | |
See also | |
-------- | |
char.replace | |
""" | |
return asarray(replace(self, old, new, count)) | |
def rfind(self, sub, start=0, end=None): | |
""" | |
For each element in `self`, return the highest index in the string | |
where substring `sub` is found, such that `sub` is contained | |
within [`start`, `end`]. | |
See also | |
-------- | |
char.rfind | |
""" | |
return rfind(self, sub, start, end) | |
def rindex(self, sub, start=0, end=None): | |
""" | |
Like `rfind`, but raises `ValueError` when the substring `sub` is | |
not found. | |
See also | |
-------- | |
char.rindex | |
""" | |
return rindex(self, sub, start, end) | |
def rjust(self, width, fillchar=' '): | |
""" | |
Return an array with the elements of `self` | |
right-justified in a string of length `width`. | |
See also | |
-------- | |
char.rjust | |
""" | |
return asarray(rjust(self, width, fillchar)) | |
def rpartition(self, sep): | |
""" | |
Partition each element in `self` around `sep`. | |
See also | |
-------- | |
rpartition | |
""" | |
return asarray(rpartition(self, sep)) | |
def rsplit(self, sep=None, maxsplit=None): | |
""" | |
For each element in `self`, return a list of the words in | |
the string, using `sep` as the delimiter string. | |
See also | |
-------- | |
char.rsplit | |
""" | |
return rsplit(self, sep, maxsplit) | |
def rstrip(self, chars=None): | |
""" | |
For each element in `self`, return a copy with the trailing | |
characters removed. | |
See also | |
-------- | |
char.rstrip | |
""" | |
return asarray(rstrip(self, chars)) | |
def split(self, sep=None, maxsplit=None): | |
""" | |
For each element in `self`, return a list of the words in the | |
string, using `sep` as the delimiter string. | |
See also | |
-------- | |
char.split | |
""" | |
return split(self, sep, maxsplit) | |
def splitlines(self, keepends=None): | |
""" | |
For each element in `self`, return a list of the lines in the | |
element, breaking at line boundaries. | |
See also | |
-------- | |
char.splitlines | |
""" | |
return splitlines(self, keepends) | |
def startswith(self, prefix, start=0, end=None): | |
""" | |
Returns a boolean array which is `True` where the string element | |
in `self` starts with `prefix`, otherwise `False`. | |
See also | |
-------- | |
char.startswith | |
""" | |
return startswith(self, prefix, start, end) | |
def strip(self, chars=None): | |
""" | |
For each element in `self`, return a copy with the leading and | |
trailing characters removed. | |
See also | |
-------- | |
char.strip | |
""" | |
return asarray(strip(self, chars)) | |
def swapcase(self): | |
""" | |
For each element in `self`, return a copy of the string with | |
uppercase characters converted to lowercase and vice versa. | |
See also | |
-------- | |
char.swapcase | |
""" | |
return asarray(swapcase(self)) | |
def title(self): | |
""" | |
For each element in `self`, return a titlecased version of the | |
string: words start with uppercase characters, all remaining cased | |
characters are lowercase. | |
See also | |
-------- | |
char.title | |
""" | |
return asarray(title(self)) | |
def translate(self, table, deletechars=None): | |
""" | |
For each element in `self`, return a copy of the string where | |
all characters occurring in the optional argument | |
`deletechars` are removed, and the remaining characters have | |
been mapped through the given translation table. | |
See also | |
-------- | |
char.translate | |
""" | |
return asarray(translate(self, table, deletechars)) | |
def upper(self): | |
""" | |
Return an array with the elements of `self` converted to | |
uppercase. | |
See also | |
-------- | |
char.upper | |
""" | |
return asarray(upper(self)) | |
def zfill(self, width): | |
""" | |
Return the numeric string left-filled with zeros in a string of | |
length `width`. | |
See also | |
-------- | |
char.zfill | |
""" | |
return asarray(zfill(self, width)) | |
def isnumeric(self): | |
""" | |
For each element in `self`, return True if there are only | |
numeric characters in the element. | |
See also | |
-------- | |
char.isnumeric | |
""" | |
return isnumeric(self) | |
def isdecimal(self): | |
""" | |
For each element in `self`, return True if there are only | |
decimal characters in the element. | |
See also | |
-------- | |
char.isdecimal | |
""" | |
return isdecimal(self) | |
def array(obj, itemsize=None, copy=True, unicode=None, order=None): | |
""" | |
Create a `chararray`. | |
.. note:: | |
This class is provided for numarray backward-compatibility. | |
New code (not concerned with numarray compatibility) should use | |
arrays of type `string_` or `unicode_` and use the free functions | |
in :mod:`numpy.char <numpy.core.defchararray>` for fast | |
vectorized string operations instead. | |
Versus a regular NumPy array of type `str` or `unicode`, this | |
class adds the following functionality: | |
1) values automatically have whitespace removed from the end | |
when indexed | |
2) comparison operators automatically remove whitespace from the | |
end when comparing values | |
3) vectorized string operations are provided as methods | |
(e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``) | |
Parameters | |
---------- | |
obj : array of str or unicode-like | |
itemsize : int, optional | |
`itemsize` is the number of characters per scalar in the | |
resulting array. If `itemsize` is None, and `obj` is an | |
object array or a Python list, the `itemsize` will be | |
automatically determined. If `itemsize` is provided and `obj` | |
is of type str or unicode, then the `obj` string will be | |
chunked into `itemsize` pieces. | |
copy : bool, optional | |
If true (default), then the object is copied. Otherwise, a copy | |
will only be made if __array__ returns a copy, if obj is a | |
nested sequence, or if a copy is needed to satisfy any of the other | |
requirements (`itemsize`, unicode, `order`, etc.). | |
unicode : bool, optional | |
When true, the resulting `chararray` can contain Unicode | |
characters, when false only 8-bit characters. If unicode is | |
`None` and `obj` is one of the following: | |
- a `chararray`, | |
- an ndarray of type `str` or `unicode` | |
- a Python str or unicode object, | |
then the unicode setting of the output array will be | |
automatically determined. | |
order : {'C', 'F', 'A'}, optional | |
Specify the order of the array. If order is 'C' (default), then the | |
array will be in C-contiguous order (last-index varies the | |
fastest). If order is 'F', then the returned array | |
will be in Fortran-contiguous order (first-index varies the | |
fastest). If order is 'A', then the returned array may | |
be in any order (either C-, Fortran-contiguous, or even | |
discontiguous). | |
""" | |
if isinstance(obj, (_bytes, _unicode)): | |
if unicode is None: | |
if isinstance(obj, _unicode): | |
unicode = True | |
else: | |
unicode = False | |
if itemsize is None: | |
itemsize = _len(obj) | |
shape = _len(obj) // itemsize | |
if unicode: | |
if sys.maxunicode == 0xffff: | |
# On a narrow Python build, the buffer for Unicode | |
# strings is UCS2, which doesn't match the buffer for | |
# NumPy Unicode types, which is ALWAYS UCS4. | |
# Therefore, we need to convert the buffer. On Python | |
# 2.6 and later, we can use the utf_32 codec. Earlier | |
# versions don't have that codec, so we convert to a | |
# numerical array that matches the input buffer, and | |
# then use NumPy to convert it to UCS4. All of this | |
# should happen in native endianness. | |
obj = obj.encode('utf_32') | |
else: | |
obj = _unicode(obj) | |
else: | |
# Let the default Unicode -> string encoding (if any) take | |
# precedence. | |
obj = _bytes(obj) | |
return chararray(shape, itemsize=itemsize, unicode=unicode, | |
buffer=obj, order=order) | |
if isinstance(obj, (list, tuple)): | |
obj = numpy.asarray(obj) | |
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character): | |
# If we just have a vanilla chararray, create a chararray | |
# view around it. | |
if not isinstance(obj, chararray): | |
obj = obj.view(chararray) | |
if itemsize is None: | |
itemsize = obj.itemsize | |
# itemsize is in 8-bit chars, so for Unicode, we need | |
# to divide by the size of a single Unicode character, | |
# which for NumPy is always 4 | |
if issubclass(obj.dtype.type, unicode_): | |
itemsize //= 4 | |
if unicode is None: | |
if issubclass(obj.dtype.type, unicode_): | |
unicode = True | |
else: | |
unicode = False | |
if unicode: | |
dtype = unicode_ | |
else: | |
dtype = string_ | |
if order is not None: | |
obj = numpy.asarray(obj, order=order) | |
if (copy or | |
(itemsize != obj.itemsize) or | |
(not unicode and isinstance(obj, unicode_)) or | |
(unicode and isinstance(obj, string_))): | |
obj = obj.astype((dtype, long(itemsize))) | |
return obj | |
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object): | |
if itemsize is None: | |
# Since no itemsize was specified, convert the input array to | |
# a list so the ndarray constructor will automatically | |
# determine the itemsize for us. | |
obj = obj.tolist() | |
# Fall through to the default case | |
if unicode: | |
dtype = unicode_ | |
else: | |
dtype = string_ | |
if itemsize is None: | |
val = narray(obj, dtype=dtype, order=order, subok=True) | |
else: | |
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True) | |
return val.view(chararray) | |
def asarray(obj, itemsize=None, unicode=None, order=None): | |
""" | |
Convert the input to a `chararray`, copying the data only if | |
necessary. | |
Versus a regular NumPy array of type `str` or `unicode`, this | |
class adds the following functionality: | |
1) values automatically have whitespace removed from the end | |
when indexed | |
2) comparison operators automatically remove whitespace from the | |
end when comparing values | |
3) vectorized string operations are provided as methods | |
(e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``) | |
Parameters | |
---------- | |
obj : array of str or unicode-like | |
itemsize : int, optional | |
`itemsize` is the number of characters per scalar in the | |
resulting array. If `itemsize` is None, and `obj` is an | |
object array or a Python list, the `itemsize` will be | |
automatically determined. If `itemsize` is provided and `obj` | |
is of type str or unicode, then the `obj` string will be | |
chunked into `itemsize` pieces. | |
unicode : bool, optional | |
When true, the resulting `chararray` can contain Unicode | |
characters, when false only 8-bit characters. If unicode is | |
`None` and `obj` is one of the following: | |
- a `chararray`, | |
- an ndarray of type `str` or 'unicode` | |
- a Python str or unicode object, | |
then the unicode setting of the output array will be | |
automatically determined. | |
order : {'C', 'F'}, optional | |
Specify the order of the array. If order is 'C' (default), then the | |
array will be in C-contiguous order (last-index varies the | |
fastest). If order is 'F', then the returned array | |
will be in Fortran-contiguous order (first-index varies the | |
fastest). | |
""" | |
return array(obj, itemsize, copy=False, | |
unicode=unicode, order=order) |
""" | |
Implementation of optimized einsum. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import itertools | |
from numpy.compat import basestring | |
from numpy.core.multiarray import c_einsum | |
from numpy.core.numeric import asanyarray, tensordot | |
from numpy.core.overrides import array_function_dispatch | |
__all__ = ['einsum', 'einsum_path'] | |
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' | |
einsum_symbols_set = set(einsum_symbols) | |
def _flop_count(idx_contraction, inner, num_terms, size_dictionary): | |
""" | |
Computes the number of FLOPS in the contraction. | |
Parameters | |
---------- | |
idx_contraction : iterable | |
The indices involved in the contraction | |
inner : bool | |
Does this contraction require an inner product? | |
num_terms : int | |
The number of terms in a contraction | |
size_dictionary : dict | |
The size of each of the indices in idx_contraction | |
Returns | |
------- | |
flop_count : int | |
The total number of FLOPS required for the contraction. | |
Examples | |
-------- | |
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) | |
90 | |
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) | |
270 | |
""" | |
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary) | |
op_factor = max(1, num_terms - 1) | |
if inner: | |
op_factor += 1 | |
return overall_size * op_factor | |
def _compute_size_by_dict(indices, idx_dict): | |
""" | |
Computes the product of the elements in indices based on the dictionary | |
idx_dict. | |
Parameters | |
---------- | |
indices : iterable | |
Indices to base the product on. | |
idx_dict : dictionary | |
Dictionary of index sizes | |
Returns | |
------- | |
ret : int | |
The resulting product. | |
Examples | |
-------- | |
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5}) | |
90 | |
""" | |
ret = 1 | |
for i in indices: | |
ret *= idx_dict[i] | |
return ret | |
def _find_contraction(positions, input_sets, output_set): | |
""" | |
Finds the contraction for a given set of input and output sets. | |
Parameters | |
---------- | |
positions : iterable | |
Integer positions of terms used in the contraction. | |
input_sets : list | |
List of sets that represent the lhs side of the einsum subscript | |
output_set : set | |
Set that represents the rhs side of the overall einsum subscript | |
Returns | |
------- | |
new_result : set | |
The indices of the resulting contraction | |
remaining : list | |
List of sets that have not been contracted, the new set is appended to | |
the end of this list | |
idx_removed : set | |
Indices removed from the entire contraction | |
idx_contraction : set | |
The indices used in the current contraction | |
Examples | |
-------- | |
# A simple dot product test case | |
>>> pos = (0, 1) | |
>>> isets = [set('ab'), set('bc')] | |
>>> oset = set('ac') | |
>>> _find_contraction(pos, isets, oset) | |
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'}) | |
# A more complex case with additional terms in the contraction | |
>>> pos = (0, 2) | |
>>> isets = [set('abd'), set('ac'), set('bdc')] | |
>>> oset = set('ac') | |
>>> _find_contraction(pos, isets, oset) | |
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'}) | |
""" | |
idx_contract = set() | |
idx_remain = output_set.copy() | |
remaining = [] | |
for ind, value in enumerate(input_sets): | |
if ind in positions: | |
idx_contract |= value | |
else: | |
remaining.append(value) | |
idx_remain |= value | |
new_result = idx_remain & idx_contract | |
idx_removed = (idx_contract - new_result) | |
remaining.append(new_result) | |
return (new_result, remaining, idx_removed, idx_contract) | |
def _optimal_path(input_sets, output_set, idx_dict, memory_limit): | |
""" | |
Computes all possible pair contractions, sieves the results based | |
on ``memory_limit`` and returns the lowest cost path. This algorithm | |
scales factorial with respect to the elements in the list ``input_sets``. | |
Parameters | |
---------- | |
input_sets : list | |
List of sets that represent the lhs side of the einsum subscript | |
output_set : set | |
Set that represents the rhs side of the overall einsum subscript | |
idx_dict : dictionary | |
Dictionary of index sizes | |
memory_limit : int | |
The maximum number of elements in a temporary array | |
Returns | |
------- | |
path : list | |
The optimal contraction order within the memory limit constraint. | |
Examples | |
-------- | |
>>> isets = [set('abd'), set('ac'), set('bdc')] | |
>>> oset = set() | |
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} | |
>>> _path__optimal_path(isets, oset, idx_sizes, 5000) | |
[(0, 2), (0, 1)] | |
""" | |
full_results = [(0, [], input_sets)] | |
for iteration in range(len(input_sets) - 1): | |
iter_results = [] | |
# Compute all unique pairs | |
for curr in full_results: | |
cost, positions, remaining = curr | |
for con in itertools.combinations(range(len(input_sets) - iteration), 2): | |
# Find the contraction | |
cont = _find_contraction(con, remaining, output_set) | |
new_result, new_input_sets, idx_removed, idx_contract = cont | |
# Sieve the results based on memory_limit | |
new_size = _compute_size_by_dict(new_result, idx_dict) | |
if new_size > memory_limit: | |
continue | |
# Build (total_cost, positions, indices_remaining) | |
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict) | |
new_pos = positions + [con] | |
iter_results.append((total_cost, new_pos, new_input_sets)) | |
# Update combinatorial list, if we did not find anything return best | |
# path + remaining contractions | |
if iter_results: | |
full_results = iter_results | |
else: | |
path = min(full_results, key=lambda x: x[0])[1] | |
path += [tuple(range(len(input_sets) - iteration))] | |
return path | |
# If we have not found anything return single einsum contraction | |
if len(full_results) == 0: | |
return [tuple(range(len(input_sets)))] | |
path = min(full_results, key=lambda x: x[0])[1] | |
return path | |
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost): | |
"""Compute the cost (removed size + flops) and resultant indices for | |
performing the contraction specified by ``positions``. | |
Parameters | |
---------- | |
positions : tuple of int | |
The locations of the proposed tensors to contract. | |
input_sets : list of sets | |
The indices found on each tensors. | |
output_set : set | |
The output indices of the expression. | |
idx_dict : dict | |
Mapping of each index to its size. | |
memory_limit : int | |
The total allowed size for an intermediary tensor. | |
path_cost : int | |
The contraction cost so far. | |
naive_cost : int | |
The cost of the unoptimized expression. | |
Returns | |
------- | |
cost : (int, int) | |
A tuple containing the size of any indices removed, and the flop cost. | |
positions : tuple of int | |
The locations of the proposed tensors to contract. | |
new_input_sets : list of sets | |
The resulting new list of indices if this proposed contraction is performed. | |
""" | |
# Find the contraction | |
contract = _find_contraction(positions, input_sets, output_set) | |
idx_result, new_input_sets, idx_removed, idx_contract = contract | |
# Sieve the results based on memory_limit | |
new_size = _compute_size_by_dict(idx_result, idx_dict) | |
if new_size > memory_limit: | |
return None | |
# Build sort tuple | |
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions) | |
removed_size = sum(old_sizes) - new_size | |
# NB: removed_size used to be just the size of any removed indices i.e.: | |
# helpers.compute_size_by_dict(idx_removed, idx_dict) | |
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict) | |
sort = (-removed_size, cost) | |
# Sieve based on total cost as well | |
if (path_cost + cost) > naive_cost: | |
return None | |
# Add contraction to possible choices | |
return [sort, positions, new_input_sets] | |
def _update_other_results(results, best): | |
"""Update the positions and provisional input_sets of ``results`` based on | |
performing the contraction result ``best``. Remove any involving the tensors | |
contracted. | |
Parameters | |
---------- | |
results : list | |
List of contraction results produced by ``_parse_possible_contraction``. | |
best : list | |
The best contraction of ``results`` i.e. the one that will be performed. | |
Returns | |
------- | |
mod_results : list | |
The list of modifed results, updated with outcome of ``best`` contraction. | |
""" | |
best_con = best[1] | |
bx, by = best_con | |
mod_results = [] | |
for cost, (x, y), con_sets in results: | |
# Ignore results involving tensors just contracted | |
if x in best_con or y in best_con: | |
continue | |
# Update the input_sets | |
del con_sets[by - int(by > x) - int(by > y)] | |
del con_sets[bx - int(bx > x) - int(bx > y)] | |
con_sets.insert(-1, best[2][-1]) | |
# Update the position indices | |
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by) | |
mod_results.append((cost, mod_con, con_sets)) | |
return mod_results | |
def _greedy_path(input_sets, output_set, idx_dict, memory_limit): | |
""" | |
Finds the path by contracting the best pair until the input list is | |
exhausted. The best pair is found by minimizing the tuple | |
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing | |
matrix multiplication or inner product operations, then Hadamard like | |
operations, and finally outer operations. Outer products are limited by | |
``memory_limit``. This algorithm scales cubically with respect to the | |
number of elements in the list ``input_sets``. | |
Parameters | |
---------- | |
input_sets : list | |
List of sets that represent the lhs side of the einsum subscript | |
output_set : set | |
Set that represents the rhs side of the overall einsum subscript | |
idx_dict : dictionary | |
Dictionary of index sizes | |
memory_limit_limit : int | |
The maximum number of elements in a temporary array | |
Returns | |
------- | |
path : list | |
The greedy contraction order within the memory limit constraint. | |
Examples | |
-------- | |
>>> isets = [set('abd'), set('ac'), set('bdc')] | |
>>> oset = set() | |
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} | |
>>> _path__greedy_path(isets, oset, idx_sizes, 5000) | |
[(0, 2), (0, 1)] | |
""" | |
# Handle trivial cases that leaked through | |
if len(input_sets) == 1: | |
return [(0,)] | |
elif len(input_sets) == 2: | |
return [(0, 1)] | |
# Build up a naive cost | |
contract = _find_contraction(range(len(input_sets)), input_sets, output_set) | |
idx_result, new_input_sets, idx_removed, idx_contract = contract | |
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict) | |
# Initially iterate over all pairs | |
comb_iter = itertools.combinations(range(len(input_sets)), 2) | |
known_contractions = [] | |
path_cost = 0 | |
path = [] | |
for iteration in range(len(input_sets) - 1): | |
# Iterate over all pairs on first step, only previously found pairs on subsequent steps | |
for positions in comb_iter: | |
# Always initially ignore outer products | |
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]): | |
continue | |
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, | |
naive_cost) | |
if result is not None: | |
known_contractions.append(result) | |
# If we do not have a inner contraction, rescan pairs including outer products | |
if len(known_contractions) == 0: | |
# Then check the outer products | |
for positions in itertools.combinations(range(len(input_sets)), 2): | |
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, | |
path_cost, naive_cost) | |
if result is not None: | |
known_contractions.append(result) | |
# If we still did not find any remaining contractions, default back to einsum like behavior | |
if len(known_contractions) == 0: | |
path.append(tuple(range(len(input_sets)))) | |
break | |
# Sort based on first index | |
best = min(known_contractions, key=lambda x: x[0]) | |
# Now propagate as many unused contractions as possible to next iteration | |
known_contractions = _update_other_results(known_contractions, best) | |
# Next iteration only compute contractions with the new tensor | |
# All other contractions have been accounted for | |
input_sets = best[2] | |
new_tensor_pos = len(input_sets) - 1 | |
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos)) | |
# Update path and total cost | |
path.append(best[1]) | |
path_cost += best[0][1] | |
return path | |
def _can_dot(inputs, result, idx_removed): | |
""" | |
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so. | |
Parameters | |
---------- | |
inputs : list of str | |
Specifies the subscripts for summation. | |
result : str | |
Resulting summation. | |
idx_removed : set | |
Indices that are removed in the summation | |
Returns | |
------- | |
type : bool | |
Returns true if BLAS should and can be used, else False | |
Notes | |
----- | |
If the operations is BLAS level 1 or 2 and is not already aligned | |
we default back to einsum as the memory movement to copy is more | |
costly than the operation itself. | |
Examples | |
-------- | |
# Standard GEMM operation | |
>>> _can_dot(['ij', 'jk'], 'ik', set('j')) | |
True | |
# Can use the standard BLAS, but requires odd data movement | |
>>> _can_dot(['ijj', 'jk'], 'ik', set('j')) | |
False | |
# DDOT where the memory is not aligned | |
>>> _can_dot(['ijk', 'ikj'], '', set('ijk')) | |
False | |
""" | |
# All `dot` calls remove indices | |
if len(idx_removed) == 0: | |
return False | |
# BLAS can only handle two operands | |
if len(inputs) != 2: | |
return False | |
input_left, input_right = inputs | |
for c in set(input_left + input_right): | |
# can't deal with repeated indices on same input or more than 2 total | |
nl, nr = input_left.count(c), input_right.count(c) | |
if (nl > 1) or (nr > 1) or (nl + nr > 2): | |
return False | |
# can't do implicit summation or dimension collapse e.g. | |
# "ab,bc->c" (implicitly sum over 'a') | |
# "ab,ca->ca" (take diagonal of 'a') | |
if nl + nr - 1 == int(c in result): | |
return False | |
# Build a few temporaries | |
set_left = set(input_left) | |
set_right = set(input_right) | |
keep_left = set_left - idx_removed | |
keep_right = set_right - idx_removed | |
rs = len(idx_removed) | |
# At this point we are a DOT, GEMV, or GEMM operation | |
# Handle inner products | |
# DDOT with aligned data | |
if input_left == input_right: | |
return True | |
# DDOT without aligned data (better to use einsum) | |
if set_left == set_right: | |
return False | |
# Handle the 4 possible (aligned) GEMV or GEMM cases | |
# GEMM or GEMV no transpose | |
if input_left[-rs:] == input_right[:rs]: | |
return True | |
# GEMM or GEMV transpose both | |
if input_left[:rs] == input_right[-rs:]: | |
return True | |
# GEMM or GEMV transpose right | |
if input_left[-rs:] == input_right[-rs:]: | |
return True | |
# GEMM or GEMV transpose left | |
if input_left[:rs] == input_right[:rs]: | |
return True | |
# Einsum is faster than GEMV if we have to copy data | |
if not keep_left or not keep_right: | |
return False | |
# We are a matrix-matrix product, but we need to copy data | |
return True | |
def _parse_einsum_input(operands): | |
""" | |
A reproduction of einsum c side einsum parsing in python. | |
Returns | |
------- | |
input_strings : str | |
Parsed input strings | |
output_string : str | |
Parsed output string | |
operands : list of array_like | |
The operands to use in the numpy contraction | |
Examples | |
-------- | |
The operand list is simplified to reduce printing: | |
>>> a = np.random.rand(4, 4) | |
>>> b = np.random.rand(4, 4, 4) | |
>>> __parse_einsum_input(('...a,...a->...', a, b)) | |
('za,xza', 'xz', [a, b]) | |
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) | |
('za,xza', 'xz', [a, b]) | |
""" | |
if len(operands) == 0: | |
raise ValueError("No input operands") | |
if isinstance(operands[0], basestring): | |
subscripts = operands[0].replace(" ", "") | |
operands = [asanyarray(v) for v in operands[1:]] | |
# Ensure all characters are valid | |
for s in subscripts: | |
if s in '.,->': | |
continue | |
if s not in einsum_symbols: | |
raise ValueError("Character %s is not a valid symbol." % s) | |
else: | |
tmp_operands = list(operands) | |
operand_list = [] | |
subscript_list = [] | |
for p in range(len(operands) // 2): | |
operand_list.append(tmp_operands.pop(0)) | |
subscript_list.append(tmp_operands.pop(0)) | |
output_list = tmp_operands[-1] if len(tmp_operands) else None | |
operands = [asanyarray(v) for v in operand_list] | |
subscripts = "" | |
last = len(subscript_list) - 1 | |
for num, sub in enumerate(subscript_list): | |
for s in sub: | |
if s is Ellipsis: | |
subscripts += "..." | |
elif isinstance(s, int): | |
subscripts += einsum_symbols[s] | |
else: | |
raise TypeError("For this input type lists must contain " | |
"either int or Ellipsis") | |
if num != last: | |
subscripts += "," | |
if output_list is not None: | |
subscripts += "->" | |
for s in output_list: | |
if s is Ellipsis: | |
subscripts += "..." | |
elif isinstance(s, int): | |
subscripts += einsum_symbols[s] | |
else: | |
raise TypeError("For this input type lists must contain " | |
"either int or Ellipsis") | |
# Check for proper "->" | |
if ("-" in subscripts) or (">" in subscripts): | |
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1) | |
if invalid or (subscripts.count("->") != 1): | |
raise ValueError("Subscripts can only contain one '->'.") | |
# Parse ellipses | |
if "." in subscripts: | |
used = subscripts.replace(".", "").replace(",", "").replace("->", "") | |
unused = list(einsum_symbols_set - set(used)) | |
ellipse_inds = "".join(unused) | |
longest = 0 | |
if "->" in subscripts: | |
input_tmp, output_sub = subscripts.split("->") | |
split_subscripts = input_tmp.split(",") | |
out_sub = True | |
else: | |
split_subscripts = subscripts.split(',') | |
out_sub = False | |
for num, sub in enumerate(split_subscripts): | |
if "." in sub: | |
if (sub.count(".") != 3) or (sub.count("...") != 1): | |
raise ValueError("Invalid Ellipses.") | |
# Take into account numerical values | |
if operands[num].shape == (): | |
ellipse_count = 0 | |
else: | |
ellipse_count = max(operands[num].ndim, 1) | |
ellipse_count -= (len(sub) - 3) | |
if ellipse_count > longest: | |
longest = ellipse_count | |
if ellipse_count < 0: | |
raise ValueError("Ellipses lengths do not match.") | |
elif ellipse_count == 0: | |
split_subscripts[num] = sub.replace('...', '') | |
else: | |
rep_inds = ellipse_inds[-ellipse_count:] | |
split_subscripts[num] = sub.replace('...', rep_inds) | |
subscripts = ",".join(split_subscripts) | |
if longest == 0: | |
out_ellipse = "" | |
else: | |
out_ellipse = ellipse_inds[-longest:] | |
if out_sub: | |
subscripts += "->" + output_sub.replace("...", out_ellipse) | |
else: | |
# Special care for outputless ellipses | |
output_subscript = "" | |
tmp_subscripts = subscripts.replace(",", "") | |
for s in sorted(set(tmp_subscripts)): | |
if s not in (einsum_symbols): | |
raise ValueError("Character %s is not a valid symbol." % s) | |
if tmp_subscripts.count(s) == 1: | |
output_subscript += s | |
normal_inds = ''.join(sorted(set(output_subscript) - | |
set(out_ellipse))) | |
subscripts += "->" + out_ellipse + normal_inds | |
# Build output string if does not exist | |
if "->" in subscripts: | |
input_subscripts, output_subscript = subscripts.split("->") | |
else: | |
input_subscripts = subscripts | |
# Build output subscripts | |
tmp_subscripts = subscripts.replace(",", "") | |
output_subscript = "" | |
for s in sorted(set(tmp_subscripts)): | |
if s not in einsum_symbols: | |
raise ValueError("Character %s is not a valid symbol." % s) | |
if tmp_subscripts.count(s) == 1: | |
output_subscript += s | |
# Make sure output subscripts are in the input | |
for char in output_subscript: | |
if char not in input_subscripts: | |
raise ValueError("Output character %s did not appear in the input" | |
% char) | |
# Make sure number operands is equivalent to the number of terms | |
if len(input_subscripts.split(',')) != len(operands): | |
raise ValueError("Number of einsum subscripts must be equal to the " | |
"number of operands.") | |
return (input_subscripts, output_subscript, operands) | |
def _einsum_path_dispatcher(*operands, **kwargs): | |
# NOTE: technically, we should only dispatch on array-like arguments, not | |
# subscripts (given as strings). But separating operands into | |
# arrays/subscripts is a little tricky/slow (given einsum's two supported | |
# signatures), so as a practical shortcut we dispatch on everything. | |
# Strings will be ignored for dispatching since they don't define | |
# __array_function__. | |
return operands | |
@array_function_dispatch(_einsum_path_dispatcher, module='numpy') | |
def einsum_path(*operands, **kwargs): | |
""" | |
einsum_path(subscripts, *operands, optimize='greedy') | |
Evaluates the lowest cost contraction order for an einsum expression by | |
considering the creation of intermediate arrays. | |
Parameters | |
---------- | |
subscripts : str | |
Specifies the subscripts for summation. | |
*operands : list of array_like | |
These are the arrays for the operation. | |
optimize : {bool, list, tuple, 'greedy', 'optimal'} | |
Choose the type of path. If a tuple is provided, the second argument is | |
assumed to be the maximum intermediate size created. If only a single | |
argument is provided the largest input or output array size is used | |
as a maximum intermediate size. | |
* if a list is given that starts with ``einsum_path``, uses this as the | |
contraction path | |
* if False no optimization is taken | |
* if True defaults to the 'greedy' algorithm | |
* 'optimal' An algorithm that combinatorially explores all possible | |
ways of contracting the listed tensors and choosest the least costly | |
path. Scales exponentially with the number of terms in the | |
contraction. | |
* 'greedy' An algorithm that chooses the best pair contraction | |
at each step. Effectively, this algorithm searches the largest inner, | |
Hadamard, and then outer products at each step. Scales cubically with | |
the number of terms in the contraction. Equivalent to the 'optimal' | |
path for most contractions. | |
Default is 'greedy'. | |
Returns | |
------- | |
path : list of tuples | |
A list representation of the einsum path. | |
string_repr : str | |
A printable representation of the einsum path. | |
Notes | |
----- | |
The resulting path indicates which terms of the input contraction should be | |
contracted first, the result of this contraction is then appended to the | |
end of the contraction list. This list can then be iterated over until all | |
intermediate contractions are complete. | |
See Also | |
-------- | |
einsum, linalg.multi_dot | |
Examples | |
-------- | |
We can begin with a chain dot example. In this case, it is optimal to | |
contract the ``b`` and ``c`` tensors first as represented by the first | |
element of the path ``(1, 2)``. The resulting tensor is added to the end | |
of the contraction and the remaining contraction ``(0, 1)`` is then | |
completed. | |
>>> a = np.random.rand(2, 2) | |
>>> b = np.random.rand(2, 5) | |
>>> c = np.random.rand(5, 2) | |
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy') | |
>>> print(path_info[0]) | |
['einsum_path', (1, 2), (0, 1)] | |
>>> print(path_info[1]) | |
Complete contraction: ij,jk,kl->il | |
Naive scaling: 4 | |
Optimized scaling: 3 | |
Naive FLOP count: 1.600e+02 | |
Optimized FLOP count: 5.600e+01 | |
Theoretical speedup: 2.857 | |
Largest intermediate: 4.000e+00 elements | |
------------------------------------------------------------------------- | |
scaling current remaining | |
------------------------------------------------------------------------- | |
3 kl,jk->jl ij,jl->il | |
3 jl,ij->il il->il | |
A more complex index transformation example. | |
>>> I = np.random.rand(10, 10, 10, 10) | |
>>> C = np.random.rand(10, 10) | |
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, | |
optimize='greedy') | |
>>> print(path_info[0]) | |
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] | |
>>> print(path_info[1]) | |
Complete contraction: ea,fb,abcd,gc,hd->efgh | |
Naive scaling: 8 | |
Optimized scaling: 5 | |
Naive FLOP count: 8.000e+08 | |
Optimized FLOP count: 8.000e+05 | |
Theoretical speedup: 1000.000 | |
Largest intermediate: 1.000e+04 elements | |
-------------------------------------------------------------------------- | |
scaling current remaining | |
-------------------------------------------------------------------------- | |
5 abcd,ea->bcde fb,gc,hd,bcde->efgh | |
5 bcde,fb->cdef gc,hd,cdef->efgh | |
5 cdef,gc->defg hd,defg->efgh | |
5 defg,hd->efgh efgh->efgh | |
""" | |
# Make sure all keywords are valid | |
valid_contract_kwargs = ['optimize', 'einsum_call'] | |
unknown_kwargs = [k for (k, v) in kwargs.items() if k | |
not in valid_contract_kwargs] | |
if len(unknown_kwargs): | |
raise TypeError("Did not understand the following kwargs:" | |
" %s" % unknown_kwargs) | |
# Figure out what the path really is | |
path_type = kwargs.pop('optimize', True) | |
if path_type is True: | |
path_type = 'greedy' | |
if path_type is None: | |
path_type = False | |
memory_limit = None | |
# No optimization or a named path algorithm | |
if (path_type is False) or isinstance(path_type, basestring): | |
pass | |
# Given an explicit path | |
elif len(path_type) and (path_type[0] == 'einsum_path'): | |
pass | |
# Path tuple with memory limit | |
elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and | |
isinstance(path_type[1], (int, float))): | |
memory_limit = int(path_type[1]) | |
path_type = path_type[0] | |
else: | |
raise TypeError("Did not understand the path: %s" % str(path_type)) | |
# Hidden option, only einsum should call this | |
einsum_call_arg = kwargs.pop("einsum_call", False) | |
# Python side parsing | |
input_subscripts, output_subscript, operands = _parse_einsum_input(operands) | |
# Build a few useful list and sets | |
input_list = input_subscripts.split(',') | |
input_sets = [set(x) for x in input_list] | |
output_set = set(output_subscript) | |
indices = set(input_subscripts.replace(',', '')) | |
# Get length of each unique dimension and ensure all dimensions are correct | |
dimension_dict = {} | |
broadcast_indices = [[] for x in range(len(input_list))] | |
for tnum, term in enumerate(input_list): | |
sh = operands[tnum].shape | |
if len(sh) != len(term): | |
raise ValueError("Einstein sum subscript %s does not contain the " | |
"correct number of indices for operand %d." | |
% (input_subscripts[tnum], tnum)) | |
for cnum, char in enumerate(term): | |
dim = sh[cnum] | |
# Build out broadcast indices | |
if dim == 1: | |
broadcast_indices[tnum].append(char) | |
if char in dimension_dict.keys(): | |
# For broadcasting cases we always want the largest dim size | |
if dimension_dict[char] == 1: | |
dimension_dict[char] = dim | |
elif dim not in (1, dimension_dict[char]): | |
raise ValueError("Size of label '%s' for operand %d (%d) " | |
"does not match previous terms (%d)." | |
% (char, tnum, dimension_dict[char], dim)) | |
else: | |
dimension_dict[char] = dim | |
# Convert broadcast inds to sets | |
broadcast_indices = [set(x) for x in broadcast_indices] | |
# Compute size of each input array plus the output array | |
size_list = [_compute_size_by_dict(term, dimension_dict) | |
for term in input_list + [output_subscript]] | |
max_size = max(size_list) | |
if memory_limit is None: | |
memory_arg = max_size | |
else: | |
memory_arg = memory_limit | |
# Compute naive cost | |
# This isn't quite right, need to look into exactly how einsum does this | |
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0 | |
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict) | |
# Compute the path | |
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set): | |
# Nothing to be optimized, leave it to einsum | |
path = [tuple(range(len(input_list)))] | |
elif path_type == "greedy": | |
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg) | |
elif path_type == "optimal": | |
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg) | |
elif path_type[0] == 'einsum_path': | |
path = path_type[1:] | |
else: | |
raise KeyError("Path name %s not found", path_type) | |
cost_list, scale_list, size_list, contraction_list = [], [], [], [] | |
# Build contraction tuple (positions, gemm, einsum_str, remaining) | |
for cnum, contract_inds in enumerate(path): | |
# Make sure we remove inds from right to left | |
contract_inds = tuple(sorted(list(contract_inds), reverse=True)) | |
contract = _find_contraction(contract_inds, input_sets, output_set) | |
out_inds, input_sets, idx_removed, idx_contract = contract | |
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict) | |
cost_list.append(cost) | |
scale_list.append(len(idx_contract)) | |
size_list.append(_compute_size_by_dict(out_inds, dimension_dict)) | |
bcast = set() | |
tmp_inputs = [] | |
for x in contract_inds: | |
tmp_inputs.append(input_list.pop(x)) | |
bcast |= broadcast_indices.pop(x) | |
new_bcast_inds = bcast - idx_removed | |
# If we're broadcasting, nix blas | |
if not len(idx_removed & bcast): | |
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed) | |
else: | |
do_blas = False | |
# Last contraction | |
if (cnum - len(path)) == -1: | |
idx_result = output_subscript | |
else: | |
sort_result = [(dimension_dict[ind], ind) for ind in out_inds] | |
idx_result = "".join([x[1] for x in sorted(sort_result)]) | |
input_list.append(idx_result) | |
broadcast_indices.append(new_bcast_inds) | |
einsum_str = ",".join(tmp_inputs) + "->" + idx_result | |
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas) | |
contraction_list.append(contraction) | |
opt_cost = sum(cost_list) + 1 | |
if einsum_call_arg: | |
return (operands, contraction_list) | |
# Return the path along with a nice string representation | |
overall_contraction = input_subscripts + "->" + output_subscript | |
header = ("scaling", "current", "remaining") | |
speedup = naive_cost / opt_cost | |
max_i = max(size_list) | |
path_print = " Complete contraction: %s\n" % overall_contraction | |
path_print += " Naive scaling: %d\n" % len(indices) | |
path_print += " Optimized scaling: %d\n" % max(scale_list) | |
path_print += " Naive FLOP count: %.3e\n" % naive_cost | |
path_print += " Optimized FLOP count: %.3e\n" % opt_cost | |
path_print += " Theoretical speedup: %3.3f\n" % speedup | |
path_print += " Largest intermediate: %.3e elements\n" % max_i | |
path_print += "-" * 74 + "\n" | |
path_print += "%6s %24s %40s\n" % header | |
path_print += "-" * 74 | |
for n, contraction in enumerate(contraction_list): | |
inds, idx_rm, einsum_str, remaining, blas = contraction | |
remaining_str = ",".join(remaining) + "->" + output_subscript | |
path_run = (scale_list[n], einsum_str, remaining_str) | |
path_print += "\n%4d %24s %40s" % path_run | |
path = ['einsum_path'] + path | |
return (path, path_print) | |
def _einsum_dispatcher(*operands, **kwargs): | |
# Arguably we dispatch on more arguments that we really should; see note in | |
# _einsum_path_dispatcher for why. | |
for op in operands: | |
yield op | |
yield kwargs.get('out') | |
# Rewrite einsum to handle different cases | |
@array_function_dispatch(_einsum_dispatcher, module='numpy') | |
def einsum(*operands, **kwargs): | |
""" | |
einsum(subscripts, *operands, out=None, dtype=None, order='K', | |
casting='safe', optimize=False) | |
Evaluates the Einstein summation convention on the operands. | |
Using the Einstein summation convention, many common multi-dimensional, | |
linear algebraic array operations can be represented in a simple fashion. | |
In *implicit* mode `einsum` computes these values. | |
In *explicit* mode, `einsum` provides further flexibility to compute | |
other array operations that might not be considered classical Einstein | |
summation operations, by disabling, or forcing summation over specified | |
subscript labels. | |
See the notes and examples for clarification. | |
Parameters | |
---------- | |
subscripts : str | |
Specifies the subscripts for summation as comma separated list of | |
subscript labels. An implicit (classical Einstein summation) | |
calculation is performed unless the explicit indicator '->' is | |
included as well as subscript labels of the precise output form. | |
operands : list of array_like | |
These are the arrays for the operation. | |
out : ndarray, optional | |
If provided, the calculation is done into this array. | |
dtype : {data-type, None}, optional | |
If provided, forces the calculation to use the data type specified. | |
Note that you may have to also give a more liberal `casting` | |
parameter to allow the conversions. Default is None. | |
order : {'C', 'F', 'A', 'K'}, optional | |
Controls the memory layout of the output. 'C' means it should | |
be C contiguous. 'F' means it should be Fortran contiguous, | |
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise. | |
'K' means it should be as close to the layout as the inputs as | |
is possible, including arbitrarily permuted axes. | |
Default is 'K'. | |
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional | |
Controls what kind of data casting may occur. Setting this to | |
'unsafe' is not recommended, as it can adversely affect accumulations. | |
* 'no' means the data types should not be cast at all. | |
* 'equiv' means only byte-order changes are allowed. | |
* 'safe' means only casts which can preserve values are allowed. | |
* 'same_kind' means only safe casts or casts within a kind, | |
like float64 to float32, are allowed. | |
* 'unsafe' means any data conversions may be done. | |
Default is 'safe'. | |
optimize : {False, True, 'greedy', 'optimal'}, optional | |
Controls if intermediate optimization should occur. No optimization | |
will occur if False and True will default to the 'greedy' algorithm. | |
Also accepts an explicit contraction list from the ``np.einsum_path`` | |
function. See ``np.einsum_path`` for more details. Defaults to False. | |
Returns | |
------- | |
output : ndarray | |
The calculation based on the Einstein summation convention. | |
See Also | |
-------- | |
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot | |
Notes | |
----- | |
.. versionadded:: 1.6.0 | |
The Einstein summation convention can be used to compute | |
many multi-dimensional, linear algebraic array operations. `einsum` | |
provides a succinct way of representing these. | |
A non-exhaustive list of these operations, | |
which can be computed by `einsum`, is shown below along with examples: | |
* Trace of an array, :py:func:`numpy.trace`. | |
* Return a diagonal, :py:func:`numpy.diag`. | |
* Array axis summations, :py:func:`numpy.sum`. | |
* Transpositions and permutations, :py:func:`numpy.transpose`. | |
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`. | |
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`. | |
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`. | |
* Tensor contractions, :py:func:`numpy.tensordot`. | |
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`. | |
The subscripts string is a comma-separated list of subscript labels, | |
where each label refers to a dimension of the corresponding operand. | |
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` | |
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label | |
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a | |
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` | |
describes traditional matrix multiplication and is equivalent to | |
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one | |
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent | |
to :py:func:`np.trace(a) <numpy.trace>`. | |
In *implicit mode*, the chosen subscripts are important | |
since the axes of the output are reordered alphabetically. This | |
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while | |
``np.einsum('ji', a)`` takes its transpose. Additionally, | |
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, | |
``np.einsum('ij,jh', a, b)`` returns the transpose of the | |
multiplication since subscript 'h' precedes subscript 'i'. | |
In *explicit mode* the output can be directly controlled by | |
specifying output subscript labels. This requires the | |
identifier '->' as well as the list of output subscript labels. | |
This feature increases the flexibility of the function since | |
summing can be disabled or forced when required. The call | |
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`, | |
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`. | |
The difference is that `einsum` does not allow broadcasting by default. | |
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the | |
order of the output subscript labels and therefore returns matrix | |
multiplication, unlike the example above in implicit mode. | |
To enable and control broadcasting, use an ellipsis. Default | |
NumPy-style broadcasting is done by adding an ellipsis | |
to the left of each term, like ``np.einsum('...ii->...i', a)``. | |
To take the trace along the first and last axes, | |
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix | |
product with the left-most indices instead of rightmost, one can do | |
``np.einsum('ij...,jk...->ik...', a, b)``. | |
When there is only one operand, no axes are summed, and no output | |
parameter is provided, a view into the operand is returned instead | |
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` | |
produces a view (changed in version 1.10.0). | |
`einsum` also provides an alternative way to provide the subscripts | |
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. | |
If the output shape is not provided in this format `einsum` will be | |
calculated in implicit mode, otherwise it will be performed explicitly. | |
The examples below have corresponding `einsum` calls with the two | |
parameter methods. | |
.. versionadded:: 1.10.0 | |
Views returned from einsum are now writeable whenever the input array | |
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now | |
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>` | |
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal | |
of a 2D array. | |
.. versionadded:: 1.12.0 | |
Added the ``optimize`` argument which will optimize the contraction order | |
of an einsum expression. For a contraction with three or more operands this | |
can greatly increase the computational efficiency at the cost of a larger | |
memory footprint during computation. | |
Typically a 'greedy' algorithm is applied which empirical tests have shown | |
returns the optimal path in the majority of cases. In some cases 'optimal' | |
will return the superlative path through a more expensive, exhaustive search. | |
For iterative calculations it may be advisable to calculate the optimal path | |
once and reuse that path by supplying it as an argument. An example is given | |
below. | |
See :py:func:`numpy.einsum_path` for more details. | |
Examples | |
-------- | |
>>> a = np.arange(25).reshape(5,5) | |
>>> b = np.arange(5) | |
>>> c = np.arange(6).reshape(2,3) | |
Trace of a matrix: | |
>>> np.einsum('ii', a) | |
60 | |
>>> np.einsum(a, [0,0]) | |
60 | |
>>> np.trace(a) | |
60 | |
Extract the diagonal (requires explicit form): | |
>>> np.einsum('ii->i', a) | |
array([ 0, 6, 12, 18, 24]) | |
>>> np.einsum(a, [0,0], [0]) | |
array([ 0, 6, 12, 18, 24]) | |
>>> np.diag(a) | |
array([ 0, 6, 12, 18, 24]) | |
Sum over an axis (requires explicit form): | |
>>> np.einsum('ij->i', a) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.einsum(a, [0,1], [0]) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.sum(a, axis=1) | |
array([ 10, 35, 60, 85, 110]) | |
For higher dimensional arrays summing a single axis can be done with ellipsis: | |
>>> np.einsum('...j->...', a) | |
array([ 10, 35, 60, 85, 110]) | |
>>> np.einsum(a, [Ellipsis,1], [Ellipsis]) | |
array([ 10, 35, 60, 85, 110]) | |
Compute a matrix transpose, or reorder any number of axes: | |
>>> np.einsum('ji', c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.einsum('ij->ji', c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.einsum(c, [1,0]) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
>>> np.transpose(c) | |
array([[0, 3], | |
[1, 4], | |
[2, 5]]) | |
Vector inner products: | |
>>> np.einsum('i,i', b, b) | |
30 | |
>>> np.einsum(b, [0], b, [0]) | |
30 | |
>>> np.inner(b,b) | |
30 | |
Matrix vector multiplication: | |
>>> np.einsum('ij,j', a, b) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.einsum(a, [0,1], b, [1]) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.dot(a, b) | |
array([ 30, 80, 130, 180, 230]) | |
>>> np.einsum('...j,j', a, b) | |
array([ 30, 80, 130, 180, 230]) | |
Broadcasting and scalar multiplication: | |
>>> np.einsum('..., ...', 3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.einsum(',ij', 3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.einsum(3, [Ellipsis], c, [Ellipsis]) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
>>> np.multiply(3, c) | |
array([[ 0, 3, 6], | |
[ 9, 12, 15]]) | |
Vector outer product: | |
>>> np.einsum('i,j', np.arange(2)+1, b) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
>>> np.einsum(np.arange(2)+1, [0], b, [1]) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
>>> np.outer(np.arange(2)+1, b) | |
array([[0, 1, 2, 3, 4], | |
[0, 2, 4, 6, 8]]) | |
Tensor contraction: | |
>>> a = np.arange(60.).reshape(3,4,5) | |
>>> b = np.arange(24.).reshape(4,3,2) | |
>>> np.einsum('ijk,jil->kl', a, b) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
>>> np.tensordot(a,b, axes=([1,0],[0,1])) | |
array([[ 4400., 4730.], | |
[ 4532., 4874.], | |
[ 4664., 5018.], | |
[ 4796., 5162.], | |
[ 4928., 5306.]]) | |
Writeable returned arrays (since version 1.10.0): | |
>>> a = np.zeros((3, 3)) | |
>>> np.einsum('ii->i', a)[:] = 1 | |
>>> a | |
array([[ 1., 0., 0.], | |
[ 0., 1., 0.], | |
[ 0., 0., 1.]]) | |
Example of ellipsis use: | |
>>> a = np.arange(6).reshape((3,2)) | |
>>> b = np.arange(12).reshape((4,3)) | |
>>> np.einsum('ki,jk->ij', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
>>> np.einsum('ki,...k->i...', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
>>> np.einsum('k...,jk', a, b) | |
array([[10, 28, 46, 64], | |
[13, 40, 67, 94]]) | |
Chained array operations. For more complicated contractions, speed ups | |
might be achieved by repeatedly computing a 'greedy' path or pre-computing the | |
'optimal' path and repeatedly applying it, using an | |
`einsum_path` insertion (since version 1.12.0). Performance improvements can be | |
particularly significant with larger arrays: | |
>>> a = np.ones(64).reshape(2,4,8) | |
# Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) | |
>>> for iteration in range(500): | |
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) | |
# Sub-optimal `einsum` (due to repeated path calculation time): ~330ms | |
>>> for iteration in range(500): | |
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') | |
# Greedy `einsum` (faster optimal path approximation): ~160ms | |
>>> for iteration in range(500): | |
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') | |
# Optimal `einsum` (best usage pattern in some use cases): ~110ms | |
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] | |
>>> for iteration in range(500): | |
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) | |
""" | |
# Grab non-einsum kwargs; do not optimize by default. | |
optimize_arg = kwargs.pop('optimize', False) | |
# If no optimization, run pure einsum | |
if optimize_arg is False: | |
return c_einsum(*operands, **kwargs) | |
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting'] | |
einsum_kwargs = {k: v for (k, v) in kwargs.items() if | |
k in valid_einsum_kwargs} | |
# Make sure all keywords are valid | |
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs | |
unknown_kwargs = [k for (k, v) in kwargs.items() if | |
k not in valid_contract_kwargs] | |
if len(unknown_kwargs): | |
raise TypeError("Did not understand the following kwargs: %s" | |
% unknown_kwargs) | |
# Special handeling if out is specified | |
specified_out = False | |
out_array = einsum_kwargs.pop('out', None) | |
if out_array is not None: | |
specified_out = True | |
# Build the contraction list and operand | |
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg, | |
einsum_call=True) | |
handle_out = False | |
# Start contraction loop | |
for num, contraction in enumerate(contraction_list): | |
inds, idx_rm, einsum_str, remaining, blas = contraction | |
tmp_operands = [operands.pop(x) for x in inds] | |
# Do we need to deal with the output? | |
handle_out = specified_out and ((num + 1) == len(contraction_list)) | |
# Call tensordot if still possible | |
if blas: | |
# Checks have already been handled | |
input_str, results_index = einsum_str.split('->') | |
input_left, input_right = input_str.split(',') | |
tensor_result = input_left + input_right | |
for s in idx_rm: | |
tensor_result = tensor_result.replace(s, "") | |
# Find indices to contract over | |
left_pos, right_pos = [], [] | |
for s in sorted(idx_rm): | |
left_pos.append(input_left.find(s)) | |
right_pos.append(input_right.find(s)) | |
# Contract! | |
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))) | |
# Build a new view if needed | |
if (tensor_result != results_index) or handle_out: | |
if handle_out: | |
einsum_kwargs["out"] = out_array | |
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs) | |
# Call einsum | |
else: | |
# If out was specified | |
if handle_out: | |
einsum_kwargs["out"] = out_array | |
# Do the contraction | |
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs) | |
# Append new items and dereference what we can | |
operands.append(new_view) | |
del tmp_operands, new_view | |
if specified_out: | |
return out_array | |
else: | |
return operands[0] |
Copyright (c) 2005-2019, NumPy Developers. | |
All rights reserved. | |
Redistribution and use in source and binary forms, with or without | |
modification, are permitted provided that the following conditions are | |
met: | |
* Redistributions of source code must retain the above copyright | |
notice, this list of conditions and the following disclaimer. | |
* Redistributions in binary form must reproduce the above | |
copyright notice, this list of conditions and the following | |
disclaimer in the documentation and/or other materials provided | |
with the distribution. | |
* Neither the name of the NumPy Developers nor the names of any | |
contributors may be used to endorse or promote products derived | |
from this software without specific prior written permission. | |
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
The NumPy repository and source distributions bundle several libraries that are | |
compatibly licensed. We list these here. | |
Name: Numpydoc | |
Files: doc/sphinxext/numpydoc/* | |
License: 2-clause BSD | |
For details, see doc/sphinxext/LICENSE.txt | |
Name: scipy-sphinx-theme | |
Files: doc/scipy-sphinx-theme/* | |
License: 3-clause BSD, PSF and Apache 2.0 | |
For details, see doc/scipy-sphinx-theme/LICENSE.txt | |
Name: lapack-lite | |
Files: numpy/linalg/lapack_lite/* | |
License: 3-clause BSD | |
For details, see numpy/linalg/lapack_lite/LICENSE.txt | |
Name: tempita | |
Files: tools/npy_tempita/* | |
License: BSD derived | |
For details, see tools/npy_tempita/license.txt | |
Name: dragon4 | |
Files: numpy/core/src/multiarray/dragon4.c | |
License: One of a kind | |
For license text, see numpy/core/src/multiarray/dragon4.c | |
---- | |
This binary distribution of NumPy also bundles the following software: | |
Name: GCC runtime library | |
Files: .dylibs/* | |
Description: dynamically linked to files compiled with gcc | |
Availability: https://gcc.gnu.org/viewcvs/gcc/ | |
License: GPLv3 + runtime exception | |
Copyright (C) 2002-2017 Free Software Foundation, Inc. | |
Libgfortran is free software; you can redistribute it and/or modify | |
it under the terms of the GNU General Public License as published by | |
the Free Software Foundation; either version 3, or (at your option) | |
any later version. | |
Libgfortran is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
Under Section 7 of GPL version 3, you are granted additional | |
permissions described in the GCC Runtime Library Exception, version | |
3.1, as published by the Free Software Foundation. | |
You should have received a copy of the GNU General Public License and | |
a copy of the GCC Runtime Library Exception along with this program; | |
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
<http://www.gnu.org/licenses/>. | |
---- | |
Full text of license texts referred to above follows (that they are | |
listed below does not necessarily imply the conditions apply to the | |
present binary release): | |
---- | |
GCC RUNTIME LIBRARY EXCEPTION | |
Version 3.1, 31 March 2009 | |
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/> | |
Everyone is permitted to copy and distribute verbatim copies of this | |
license document, but changing it is not allowed. | |
This GCC Runtime Library Exception ("Exception") is an additional | |
permission under section 7 of the GNU General Public License, version | |
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that | |
bears a notice placed by the copyright holder of the file stating that | |
the file is governed by GPLv3 along with this Exception. | |
When you use GCC to compile a program, GCC may combine portions of | |
certain GCC header files and runtime libraries with the compiled | |
program. The purpose of this Exception is to allow compilation of | |
non-GPL (including proprietary) programs to use, in this way, the | |
header files and runtime libraries covered by this Exception. | |
0. Definitions. | |
A file is an "Independent Module" if it either requires the Runtime | |
Library for execution after a Compilation Process, or makes use of an | |
interface provided by the Runtime Library, but is not otherwise based | |
on the Runtime Library. | |
"GCC" means a version of the GNU Compiler Collection, with or without | |
modifications, governed by version 3 (or a specified later version) of | |
the GNU General Public License (GPL) with the option of using any | |
subsequent versions published by the FSF. | |
"GPL-compatible Software" is software whose conditions of propagation, | |
modification and use would permit combination with GCC in accord with | |
the license of GCC. | |
"Target Code" refers to output from any compiler for a real or virtual | |
target processor architecture, in executable form or suitable for | |
input to an assembler, loader, linker and/or execution | |
phase. Notwithstanding that, Target Code does not include data in any | |
format that is used as a compiler intermediate representation, or used | |
for producing a compiler intermediate representation. | |
The "Compilation Process" transforms code entirely represented in | |
non-intermediate languages designed for human-written code, and/or in | |
Java Virtual Machine byte code, into Target Code. Thus, for example, | |
use of source code generators and preprocessors need not be considered | |
part of the Compilation Process, since the Compilation Process can be | |
understood as starting with the output of the generators or | |
preprocessors. | |
A Compilation Process is "Eligible" if it is done using GCC, alone or | |
with other GPL-compatible software, or if it is done without using any | |
work based on GCC. For example, using non-GPL-compatible Software to | |
optimize any GCC intermediate representations would not qualify as an | |
Eligible Compilation Process. | |
1. Grant of Additional Permission. | |
You have permission to propagate a work of Target Code formed by | |
combining the Runtime Library with Independent Modules, even if such | |
propagation would otherwise violate the terms of GPLv3, provided that | |
all Target Code was generated by Eligible Compilation Processes. You | |
may then convey such a combination under terms of your choice, | |
consistent with the licensing of the Independent Modules. | |
2. No Weakening of GCC Copyleft. | |
The availability of this Exception does not imply any general | |
presumption that third-party software is unaffected by the copyleft | |
requirements of the license of GCC. | |
---- | |
GNU GENERAL PUBLIC LICENSE | |
Version 3, 29 June 2007 | |
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/> | |
Everyone is permitted to copy and distribute verbatim copies | |
of this license document, but changing it is not allowed. | |
Preamble | |
The GNU General Public License is a free, copyleft license for | |
software and other kinds of works. | |
The licenses for most software and other practical works are designed | |
to take away your freedom to share and change the works. By contrast, | |
the GNU General Public License is intended to guarantee your freedom to | |
share and change all versions of a program--to make sure it remains free | |
software for all its users. We, the Free Software Foundation, use the | |
GNU General Public License for most of our software; it applies also to | |
any other work released this way by its authors. You can apply it to | |
your programs, too. | |
When we speak of free software, we are referring to freedom, not | |
price. Our General Public Licenses are designed to make sure that you | |
have the freedom to distribute copies of free software (and charge for | |
them if you wish), that you receive source code or can get it if you | |
want it, that you can change the software or use pieces of it in new | |
free programs, and that you know you can do these things. | |
To protect your rights, we need to prevent others from denying you | |
these rights or asking you to surrender the rights. Therefore, you have | |
certain responsibilities if you distribute copies of the software, or if | |
you modify it: responsibilities to respect the freedom of others. | |
For example, if you distribute copies of such a program, whether | |
gratis or for a fee, you must pass on to the recipients the same | |
freedoms that you received. You must make sure that they, too, receive | |
or can get the source code. And you must show them these terms so they | |
know their rights. | |
Developers that use the GNU GPL protect your rights with two steps: | |
(1) assert copyright on the software, and (2) offer you this License | |
giving you legal permission to copy, distribute and/or modify it. | |
For the developers' and authors' protection, the GPL clearly explains | |
that there is no warranty for this free software. For both users' and | |
authors' sake, the GPL requires that modified versions be marked as | |
changed, so that their problems will not be attributed erroneously to | |
authors of previous versions. | |
Some devices are designed to deny users access to install or run | |
modified versions of the software inside them, although the manufacturer | |
can do so. This is fundamentally incompatible with the aim of | |
protecting users' freedom to change the software. The systematic | |
pattern of such abuse occurs in the area of products for individuals to | |
use, which is precisely where it is most unacceptable. Therefore, we | |
have designed this version of the GPL to prohibit the practice for those | |
products. If such problems arise substantially in other domains, we | |
stand ready to extend this provision to those domains in future versions | |
of the GPL, as needed to protect the freedom of users. | |
Finally, every program is threatened constantly by software patents. | |
States should not allow patents to restrict development and use of | |
software on general-purpose computers, but in those that do, we wish to | |
avoid the special danger that patents applied to a free program could | |
make it effectively proprietary. To prevent this, the GPL assures that | |
patents cannot be used to render the program non-free. | |
The precise terms and conditions for copying, distribution and | |
modification follow. | |
TERMS AND CONDITIONS | |
0. Definitions. | |
"This License" refers to version 3 of the GNU General Public License. | |
"Copyright" also means copyright-like laws that apply to other kinds of | |
works, such as semiconductor masks. | |
"The Program" refers to any copyrightable work licensed under this | |
License. Each licensee is addressed as "you". "Licensees" and | |
"recipients" may be individuals or organizations. | |
To "modify" a work means to copy from or adapt all or part of the work | |
in a fashion requiring copyright permission, other than the making of an | |
exact copy. The resulting work is called a "modified version" of the | |
earlier work or a work "based on" the earlier work. | |
A "covered work" means either the unmodified Program or a work based | |
on the Program. | |
To "propagate" a work means to do anything with it that, without | |
permission, would make you directly or secondarily liable for | |
infringement under applicable copyright law, except executing it on a | |
computer or modifying a private copy. Propagation includes copying, | |
distribution (with or without modification), making available to the | |
public, and in some countries other activities as well. | |
To "convey" a work means any kind of propagation that enables other | |
parties to make or receive copies. Mere interaction with a user through | |
a computer network, with no transfer of a copy, is not conveying. | |
An interactive user interface displays "Appropriate Legal Notices" | |
to the extent that it includes a convenient and prominently visible | |
feature that (1) displays an appropriate copyright notice, and (2) | |
tells the user that there is no warranty for the work (except to the | |
extent that warranties are provided), that licensees may convey the | |
work under this License, and how to view a copy of this License. If | |
the interface presents a list of user commands or options, such as a | |
menu, a prominent item in the list meets this criterion. | |
1. Source Code. | |
The "source code" for a work means the preferred form of the work | |
for making modifications to it. "Object code" means any non-source | |
form of a work. | |
A "Standard Interface" means an interface that either is an official | |
standard defined by a recognized standards body, or, in the case of | |
interfaces specified for a particular programming language, one that | |
is widely used among developers working in that language. | |
The "System Libraries" of an executable work include anything, other | |
than the work as a whole, that (a) is included in the normal form of | |
packaging a Major Component, but which is not part of that Major | |
Component, and (b) serves only to enable use of the work with that | |
Major Component, or to implement a Standard Interface for which an | |
implementation is available to the public in source code form. A | |
"Major Component", in this context, means a major essential component | |
(kernel, window system, and so on) of the specific operating system | |
(if any) on which the executable work runs, or a compiler used to | |
produce the work, or an object code interpreter used to run it. | |
The "Corresponding Source" for a work in object code form means all | |
the source code needed to generate, install, and (for an executable | |
work) run the object code and to modify the work, including scripts to | |
control those activities. However, it does not include the work's | |
System Libraries, or general-purpose tools or generally available free | |
programs which are used unmodified in performing those activities but | |
which are not part of the work. For example, Corresponding Source | |
includes interface definition files associated with source files for | |
the work, and the source code for shared libraries and dynamically | |
linked subprograms that the work is specifically designed to require, | |
such as by intimate data communication or control flow between those | |
subprograms and other parts of the work. | |
The Corresponding Source need not include anything that users | |
can regenerate automatically from other parts of the Corresponding | |
Source. | |
The Corresponding Source for a work in source code form is that | |
same work. | |
2. Basic Permissions. | |
All rights granted under this License are granted for the term of | |
copyright on the Program, and are irrevocable provided the stated | |
conditions are met. This License explicitly affirms your unlimited | |
permission to run the unmodified Program. The output from running a | |
covered work is covered by this License only if the output, given its | |
content, constitutes a covered work. This License acknowledges your | |
rights of fair use or other equivalent, as provided by copyright law. | |
You may make, run and propagate covered works that you do not | |
convey, without conditions so long as your license otherwise remains | |
in force. You may convey covered works to others for the sole purpose | |
of having them make modifications exclusively for you, or provide you | |
with facilities for running those works, provided that you comply with | |
the terms of this License in conveying all material for which you do | |
not control copyright. Those thus making or running the covered works | |
for you must do so exclusively on your behalf, under your direction | |
and control, on terms that prohibit them from making any copies of | |
your copyrighted material outside their relationship with you. | |
Conveying under any other circumstances is permitted solely under | |
the conditions stated below. Sublicensing is not allowed; section 10 | |
makes it unnecessary. | |
3. Protecting Users' Legal Rights From Anti-Circumvention Law. | |
No covered work shall be deemed part of an effective technological | |
measure under any applicable law fulfilling obligations under article | |
11 of the WIPO copyright treaty adopted on 20 December 1996, or | |
similar laws prohibiting or restricting circumvention of such | |
measures. | |
When you convey a covered work, you waive any legal power to forbid | |
circumvention of technological measures to the extent such circumvention | |
is effected by exercising rights under this License with respect to | |
the covered work, and you disclaim any intention to limit operation or | |
modification of the work as a means of enforcing, against the work's | |
users, your or third parties' legal rights to forbid circumvention of | |
technological measures. | |
4. Conveying Verbatim Copies. | |
You may convey verbatim copies of the Program's source code as you | |
receive it, in any medium, provided that you conspicuously and | |
appropriately publish on each copy an appropriate copyright notice; | |
keep intact all notices stating that this License and any | |
non-permissive terms added in accord with section 7 apply to the code; | |
keep intact all notices of the absence of any warranty; and give all | |
recipients a copy of this License along with the Program. | |
You may charge any price or no price for each copy that you convey, | |
and you may offer support or warranty protection for a fee. | |
5. Conveying Modified Source Versions. | |
You may convey a work based on the Program, or the modifications to | |
produce it from the Program, in the form of source code under the | |
terms of section 4, provided that you also meet all of these conditions: | |
a) The work must carry prominent notices stating that you modified | |
it, and giving a relevant date. | |
b) The work must carry prominent notices stating that it is | |
released under this License and any conditions added under section | |
7. This requirement modifies the requirement in section 4 to | |
"keep intact all notices". | |
c) You must license the entire work, as a whole, under this | |
License to anyone who comes into possession of a copy. This | |
License will therefore apply, along with any applicable section 7 | |
additional terms, to the whole of the work, and all its parts, | |
regardless of how they are packaged. This License gives no | |
permission to license the work in any other way, but it does not | |
invalidate such permission if you have separately received it. | |
d) If the work has interactive user interfaces, each must display | |
Appropriate Legal Notices; however, if the Program has interactive | |
interfaces that do not display Appropriate Legal Notices, your | |
work need not make them do so. | |
A compilation of a covered work with other separate and independent | |
works, which are not by their nature extensions of the covered work, | |
and which are not combined with it such as to form a larger program, | |
in or on a volume of a storage or distribution medium, is called an | |
"aggregate" if the compilation and its resulting copyright are not | |
used to limit the access or legal rights of the compilation's users | |
beyond what the individual works permit. Inclusion of a covered work | |
in an aggregate does not cause this License to apply to the other | |
parts of the aggregate. | |
6. Conveying Non-Source Forms. | |
You may convey a covered work in object code form under the terms | |
of sections 4 and 5, provided that you also convey the | |
machine-readable Corresponding Source under the terms of this License, | |
in one of these ways: | |
a) Convey the object code in, or embodied in, a physical product | |
(including a physical distribution medium), accompanied by the | |
Corresponding Source fixed on a durable physical medium | |
customarily used for software interchange. | |
b) Convey the object code in, or embodied in, a physical product | |
(including a physical distribution medium), accompanied by a | |
written offer, valid for at least three years and valid for as | |
long as you offer spare parts or customer support for that product | |
model, to give anyone who possesses the object code either (1) a | |
copy of the Corresponding Source for all the software in the | |
product that is covered by this License, on a durable physical | |
medium customarily used for software interchange, for a price no | |
more than your reasonable cost of physically performing this | |
conveying of source, or (2) access to copy the | |
Corresponding Source from a network server at no charge. | |
c) Convey individual copies of the object code with a copy of the | |
written offer to provide the Corresponding Source. This | |
alternative is allowed only occasionally and noncommercially, and | |
only if you received the object code with such an offer, in accord | |
with subsection 6b. | |
d) Convey the object code by offering access from a designated | |
place (gratis or for a charge), and offer equivalent access to the | |
Corresponding Source in the same way through the same place at no | |
further charge. You need not require recipients to copy the | |
Corresponding Source along with the object code. If the place to | |
copy the object code is a network server, the Corresponding Source | |
may be on a different server (operated by you or a third party) | |
that supports equivalent copying facilities, provided you maintain | |
clear directions next to the object code saying where to find the | |
Corresponding Source. Regardless of what server hosts the | |
Corresponding Source, you remain obligated to ensure that it is | |
available for as long as needed to satisfy these requirements. | |
e) Convey the object code using peer-to-peer transmission, provided | |
you inform other peers where the object code and Corresponding | |
Source of the work are being offered to the general public at no | |
charge under subsection 6d. | |
A separable portion of the object code, whose source code is excluded | |
from the Corresponding Source as a System Library, need not be | |
included in conveying the object code work. | |
A "User Product" is either (1) a "consumer product", which means any | |
tangible personal property which is normally used for personal, family, | |
or household purposes, or (2) anything designed or sold for incorporation | |
into a dwelling. In determining whether a product is a consumer product, | |
doubtful cases shall be resolved in favor of coverage. For a particular | |
product received by a particular user, "normally used" refers to a | |
typical or common use of that class of product, regardless of the status | |
of the particular user or of the way in which the particular user | |
actually uses, or expects or is expected to use, the product. A product | |
is a consumer product regardless of whether the product has substantial | |
commercial, industrial or non-consumer uses, unless such uses represent | |
the only significant mode of use of the product. | |
"Installation Information" for a User Product means any methods, | |
procedures, authorization keys, or other information required to install | |
and execute modified versions of a covered work in that User Product from | |
a modified version of its Corresponding Source. The information must | |
suffice to ensure that the continued functioning of the modified object | |
code is in no case prevented or interfered with solely because | |
modification has been made. | |
If you convey an object code work under this section in, or with, or | |
specifically for use in, a User Product, and the conveying occurs as | |
part of a transaction in which the right of possession and use of the | |
User Product is transferred to the recipient in perpetuity or for a | |
fixed term (regardless of how the transaction is characterized), the | |
Corresponding Source conveyed under this section must be accompanied | |
by the Installation Information. But this requirement does not apply | |
if neither you nor any third party retains the ability to install | |
modified object code on the User Product (for example, the work has | |
been installed in ROM). | |
The requirement to provide Installation Information does not include a | |
requirement to continue to provide support service, warranty, or updates | |
for a work that has been modified or installed by the recipient, or for | |
the User Product in which it has been modified or installed. Access to a | |
network may be denied when the modification itself materially and | |
adversely affects the operation of the network or violates the rules and | |
protocols for communication across the network. | |
Corresponding Source conveyed, and Installation Information provided, | |
in accord with this section must be in a format that is publicly | |
documented (and with an implementation available to the public in | |
source code form), and must require no special password or key for | |
unpacking, reading or copying. | |
7. Additional Terms. | |
"Additional permissions" are terms that supplement the terms of this | |
License by making exceptions from one or more of its conditions. | |
Additional permissions that are applicable to the entire Program shall | |
be treated as though they were included in this License, to the extent | |
that they are valid under applicable law. If additional permissions | |
apply only to part of the Program, that part may be used separately | |
under those permissions, but the entire Program remains governed by | |
this License without regard to the additional permissions. | |
When you convey a copy of a covered work, you may at your option | |
remove any additional permissions from that copy, or from any part of | |
it. (Additional permissions may be written to require their own | |
removal in certain cases when you modify the work.) You may place | |
additional permissions on material, added by you to a covered work, | |
for which you have or can give appropriate copyright permission. | |
Notwithstanding any other provision of this License, for material you | |
add to a covered work, you may (if authorized by the copyright holders of | |
that material) supplement the terms of this License with terms: | |
a) Disclaiming warranty or limiting liability differently from the | |
terms of sections 15 and 16 of this License; or | |
b) Requiring preservation of specified reasonable legal notices or | |
author attributions in that material or in the Appropriate Legal | |
Notices displayed by works containing it; or | |
c) Prohibiting misrepresentation of the origin of that material, or | |
requiring that modified versions of such material be marked in | |
reasonable ways as different from the original version; or | |
d) Limiting the use for publicity purposes of names of licensors or | |
authors of the material; or | |
e) Declining to grant rights under trademark law for use of some | |
trade names, trademarks, or service marks; or | |
f) Requiring indemnification of licensors and authors of that | |
material by anyone who conveys the material (or modified versions of | |
it) with contractual assumptions of liability to the recipient, for | |
any liability that these contractual assumptions directly impose on | |
those licensors and authors. | |
All other non-permissive additional terms are considered "further | |
restrictions" within the meaning of section 10. If the Program as you | |
received it, or any part of it, contains a notice stating that it is | |
governed by this License along with a term that is a further | |
restriction, you may remove that term. If a license document contains | |
a further restriction but permits relicensing or conveying under this | |
License, you may add to a covered work material governed by the terms | |
of that license document, provided that the further restriction does | |
not survive such relicensing or conveying. | |
If you add terms to a covered work in accord with this section, you | |
must place, in the relevant source files, a statement of the | |
additional terms that apply to those files, or a notice indicating | |
where to find the applicable terms. | |
Additional terms, permissive or non-permissive, may be stated in the | |
form of a separately written license, or stated as exceptions; | |
the above requirements apply either way. | |
8. Termination. | |
You may not propagate or modify a covered work except as expressly | |
provided under this License. Any attempt otherwise to propagate or | |
modify it is void, and will automatically terminate your rights under | |
this License (including any patent licenses granted under the third | |
paragraph of section 11). | |
However, if you cease all violation of this License, then your | |
license from a particular copyright holder is reinstated (a) | |
provisionally, unless and until the copyright holder explicitly and | |
finally terminates your license, and (b) permanently, if the copyright | |
holder fails to notify you of the violation by some reasonable means | |
prior to 60 days after the cessation. | |
Moreover, your license from a particular copyright holder is | |
reinstated permanently if the copyright holder notifies you of the | |
violation by some reasonable means, this is the first time you have | |
received notice of violation of this License (for any work) from that | |
copyright holder, and you cure the violation prior to 30 days after | |
your receipt of the notice. | |
Termination of your rights under this section does not terminate the | |
licenses of parties who have received copies or rights from you under | |
this License. If your rights have been terminated and not permanently | |
reinstated, you do not qualify to receive new licenses for the same | |
material under section 10. | |
9. Acceptance Not Required for Having Copies. | |
You are not required to accept this License in order to receive or | |
run a copy of the Program. Ancillary propagation of a covered work | |
occurring solely as a consequence of using peer-to-peer transmission | |
to receive a copy likewise does not require acceptance. However, | |
nothing other than this License grants you permission to propagate or | |
modify any covered work. These actions infringe copyright if you do | |
not accept this License. Therefore, by modifying or propagating a | |
covered work, you indicate your acceptance of this License to do so. | |
10. Automatic Licensing of Downstream Recipients. | |
Each time you convey a covered work, the recipient automatically | |
receives a license from the original licensors, to run, modify and | |
propagate that work, subject to this License. You are not responsible | |
for enforcing compliance by third parties with this License. | |
An "entity transaction" is a transaction transferring control of an | |
organization, or substantially all assets of one, or subdividing an | |
organization, or merging organizations. If propagation of a covered | |
work results from an entity transaction, each party to that | |
transaction who receives a copy of the work also receives whatever | |
licenses to the work the party's predecessor in interest had or could | |
give under the previous paragraph, plus a right to possession of the | |
Corresponding Source of the work from the predecessor in interest, if | |
the predecessor has it or can get it with reasonable efforts. | |
You may not impose any further restrictions on the exercise of the | |
rights granted or affirmed under this License. For example, you may | |
not impose a license fee, royalty, or other charge for exercise of | |
rights granted under this License, and you may not initiate litigation | |
(including a cross-claim or counterclaim in a lawsuit) alleging that | |
any patent claim is infringed by making, using, selling, offering for | |
sale, or importing the Program or any portion of it. | |
11. Patents. | |
A "contributor" is a copyright holder who authorizes use under this | |
License of the Program or a work on which the Program is based. The | |
work thus licensed is called the contributor's "contributor version". | |
A contributor's "essential patent claims" are all patent claims | |
owned or controlled by the contributor, whether already acquired or | |
hereafter acquired, that would be infringed by some manner, permitted | |
by this License, of making, using, or selling its contributor version, | |
but do not include claims that would be infringed only as a | |
consequence of further modification of the contributor version. For | |
purposes of this definition, "control" includes the right to grant | |
patent sublicenses in a manner consistent with the requirements of | |
this License. | |
Each contributor grants you a non-exclusive, worldwide, royalty-free | |
patent license under the contributor's essential patent claims, to | |
make, use, sell, offer for sale, import and otherwise run, modify and | |
propagate the contents of its contributor version. | |
In the following three paragraphs, a "patent license" is any express | |
agreement or commitment, however denominated, not to enforce a patent | |
(such as an express permission to practice a patent or covenant not to | |
sue for patent infringement). To "grant" such a patent license to a | |
party means to make such an agreement or commitment not to enforce a | |
patent against the party. | |
If you convey a covered work, knowingly relying on a patent license, | |
and the Corresponding Source of the work is not available for anyone | |
to copy, free of charge and under the terms of this License, through a | |
publicly available network server or other readily accessible means, | |
then you must either (1) cause the Corresponding Source to be so | |
available, or (2) arrange to deprive yourself of the benefit of the | |
patent license for this particular work, or (3) arrange, in a manner | |
consistent with the requirements of this License, to extend the patent | |
license to downstream recipients. "Knowingly relying" means you have | |
actual knowledge that, but for the patent license, your conveying the | |
covered work in a country, or your recipient's use of the covered work | |
in a country, would infringe one or more identifiable patents in that | |
country that you have reason to believe are valid. | |
If, pursuant to or in connection with a single transaction or | |
arrangement, you convey, or propagate by procuring conveyance of, a | |
covered work, and grant a patent license to some of the parties | |
receiving the covered work authorizing them to use, propagate, modify | |
or convey a specific copy of the covered work, then the patent license | |
you grant is automatically extended to all recipients of the covered | |
work and works based on it. | |
A patent license is "discriminatory" if it does not include within | |
the scope of its coverage, prohibits the exercise of, or is | |
conditioned on the non-exercise of one or more of the rights that are | |
specifically granted under this License. You may not convey a covered | |
work if you are a party to an arrangement with a third party that is | |
in the business of distributing software, under which you make payment | |
to the third party based on the extent of your activity of conveying | |
the work, and under which the third party grants, to any of the | |
parties who would receive the covered work from you, a discriminatory | |
patent license (a) in connection with copies of the covered work | |
conveyed by you (or copies made from those copies), or (b) primarily | |
for and in connection with specific products or compilations that | |
contain the covered work, unless you entered into that arrangement, | |
or that patent license was granted, prior to 28 March 2007. | |
Nothing in this License shall be construed as excluding or limiting | |
any implied license or other defenses to infringement that may | |
otherwise be available to you under applicable patent law. | |
12. No Surrender of Others' Freedom. | |
If conditions are imposed on you (whether by court order, agreement or | |
otherwise) that contradict the conditions of this License, they do not | |
excuse you from the conditions of this License. If you cannot convey a | |
covered work so as to satisfy simultaneously your obligations under this | |
License and any other pertinent obligations, then as a consequence you may | |
not convey it at all. For example, if you agree to terms that obligate you | |
to collect a royalty for further conveying from those to whom you convey | |
the Program, the only way you could satisfy both those terms and this | |
License would be to refrain entirely from conveying the Program. | |
13. Use with the GNU Affero General Public License. | |
Notwithstanding any other provision of this License, you have | |
permission to link or combine any covered work with a work licensed | |
under version 3 of the GNU Affero General Public License into a single | |
combined work, and to convey the resulting work. The terms of this | |
License will continue to apply to the part which is the covered work, | |
but the special requirements of the GNU Affero General Public License, | |
section 13, concerning interaction through a network will apply to the | |
combination as such. | |
14. Revised Versions of this License. | |
The Free Software Foundation may publish revised and/or new versions of | |
the GNU General Public License from time to time. Such new versions will | |
be similar in spirit to the present version, but may differ in detail to | |
address new problems or concerns. | |
Each version is given a distinguishing version number. If the | |
Program specifies that a certain numbered version of the GNU General | |
Public License "or any later version" applies to it, you have the | |
option of following the terms and conditions either of that numbered | |
version or of any later version published by the Free Software | |
Foundation. If the Program does not specify a version number of the | |
GNU General Public License, you may choose any version ever published | |
by the Free Software Foundation. | |
If the Program specifies that a proxy can decide which future | |
versions of the GNU General Public License can be used, that proxy's | |
public statement of acceptance of a version permanently authorizes you | |
to choose that version for the Program. | |
Later license versions may give you additional or different | |
permissions. However, no additional obligations are imposed on any | |
author or copyright holder as a result of your choosing to follow a | |
later version. | |
15. Disclaimer of Warranty. | |
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY | |
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT | |
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY | |
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, | |
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM | |
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF | |
ALL NECESSARY SERVICING, REPAIR OR CORRECTION. | |
16. Limitation of Liability. | |
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING | |
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS | |
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY | |
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE | |
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF | |
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD | |
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), | |
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF | |
SUCH DAMAGES. | |
17. Interpretation of Sections 15 and 16. | |
If the disclaimer of warranty and limitation of liability provided | |
above cannot be given local legal effect according to their terms, | |
reviewing courts shall apply local law that most closely approximates | |
an absolute waiver of all civil liability in connection with the | |
Program, unless a warranty or assumption of liability accompanies a | |
copy of the Program in return for a fee. | |
END OF TERMS AND CONDITIONS | |
How to Apply These Terms to Your New Programs | |
If you develop a new program, and you want it to be of the greatest | |
possible use to the public, the best way to achieve this is to make it | |
free software which everyone can redistribute and change under these terms. | |
To do so, attach the following notices to the program. It is safest | |
to attach them to the start of each source file to most effectively | |
state the exclusion of warranty; and each file should have at least | |
the "copyright" line and a pointer to where the full notice is found. | |
<one line to give the program's name and a brief idea of what it does.> | |
Copyright (C) <year> <name of author> | |
This program is free software: you can redistribute it and/or modify | |
it under the terms of the GNU General Public License as published by | |
the Free Software Foundation, either version 3 of the License, or | |
(at your option) any later version. | |
This program is distributed in the hope that it will be useful, | |
but WITHOUT ANY WARRANTY; without even the implied warranty of | |
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
GNU General Public License for more details. | |
You should have received a copy of the GNU General Public License | |
along with this program. If not, see <http://www.gnu.org/licenses/>. | |
Also add information on how to contact you by electronic and paper mail. | |
If the program does terminal interaction, make it output a short | |
notice like this when it starts in an interactive mode: | |
<program> Copyright (C) <year> <name of author> | |
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. | |
This is free software, and you are welcome to redistribute it | |
under certain conditions; type `show c' for details. | |
The hypothetical commands `show w' and `show c' should show the appropriate | |
parts of the General Public License. Of course, your program's commands | |
might be different; for a GUI interface, you would use an "about box". | |
You should also get your employer (if you work as a programmer) or school, | |
if any, to sign a "copyright disclaimer" for the program, if necessary. | |
For more information on this, and how to apply and follow the GNU GPL, see | |
<http://www.gnu.org/licenses/>. | |
The GNU General Public License does not permit incorporating your program | |
into proprietary programs. If your program is a subroutine library, you | |
may consider it more useful to permit linking proprietary applications with | |
the library. If this is what you want to do, use the GNU Lesser General | |
Public License instead of this License. But first, please read | |
<http://www.gnu.org/philosophy/why-not-lgpl.html>. |
"""Module containing non-deprecated functions borrowed from Numeric. | |
""" | |
from __future__ import division, absolute_import, print_function | |
import functools | |
import types | |
import warnings | |
import numpy as np | |
from .. import VisibleDeprecationWarning | |
from . import multiarray as mu | |
from . import overrides | |
from . import umath as um | |
from . import numerictypes as nt | |
from .numeric import asarray, array, asanyarray, concatenate | |
from . import _methods | |
_dt_ = nt.sctype2char | |
# functions that are methods | |
__all__ = [ | |
'alen', 'all', 'alltrue', 'amax', 'amin', 'any', 'argmax', | |
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip', | |
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean', | |
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put', | |
'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_', | |
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze', | |
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var', | |
] | |
_gentype = types.GeneratorType | |
# save away Python sum | |
_sum_ = sum | |
array_function_dispatch = functools.partial( | |
overrides.array_function_dispatch, module='numpy') | |
# functions that are now methods | |
def _wrapit(obj, method, *args, **kwds): | |
try: | |
wrap = obj.__array_wrap__ | |
except AttributeError: | |
wrap = None | |
result = getattr(asarray(obj), method)(*args, **kwds) | |
if wrap: | |
if not isinstance(result, mu.ndarray): | |
result = asarray(result) | |
result = wrap(result) | |
return result | |
def _wrapfunc(obj, method, *args, **kwds): | |
try: | |
return getattr(obj, method)(*args, **kwds) | |
# An AttributeError occurs if the object does not have | |
# such a method in its class. | |
# A TypeError occurs if the object does have such a method | |
# in its class, but its signature is not identical to that | |
# of NumPy's. This situation has occurred in the case of | |
# a downstream library like 'pandas'. | |
except (AttributeError, TypeError): | |
return _wrapit(obj, method, *args, **kwds) | |
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs): | |
passkwargs = {k: v for k, v in kwargs.items() | |
if v is not np._NoValue} | |
if type(obj) is not mu.ndarray: | |
try: | |
reduction = getattr(obj, method) | |
except AttributeError: | |
pass | |
else: | |
# This branch is needed for reductions like any which don't | |
# support a dtype. | |
if dtype is not None: | |
return reduction(axis=axis, dtype=dtype, out=out, **passkwargs) | |
else: | |
return reduction(axis=axis, out=out, **passkwargs) | |
return ufunc.reduce(obj, axis, dtype, out, **passkwargs) | |
def _take_dispatcher(a, indices, axis=None, out=None, mode=None): | |
return (a, out) | |
@array_function_dispatch(_take_dispatcher) | |
def take(a, indices, axis=None, out=None, mode='raise'): | |
""" | |
Take elements from an array along an axis. | |
When axis is not None, this function does the same thing as "fancy" | |
indexing (indexing arrays using arrays); however, it can be easier to use | |
if you need elements along a given axis. A call such as | |
``np.take(arr, indices, axis=3)`` is equivalent to | |
``arr[:,:,:,indices,...]``. | |
Explained without fancy indexing, this is equivalent to the following use | |
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of | |
indices:: | |
Ni, Nk = a.shape[:axis], a.shape[axis+1:] | |
Nj = indices.shape | |
for ii in ndindex(Ni): | |
for jj in ndindex(Nj): | |
for kk in ndindex(Nk): | |
out[ii + jj + kk] = a[ii + (indices[jj],) + kk] | |
Parameters | |
---------- | |
a : array_like (Ni..., M, Nk...) | |
The source array. | |
indices : array_like (Nj...) | |
The indices of the values to extract. | |
.. versionadded:: 1.8.0 | |
Also allow scalars for indices. | |
axis : int, optional | |
The axis over which to select values. By default, the flattened | |
input array is used. | |
out : ndarray, optional (Ni..., Nj..., Nk...) | |
If provided, the result will be placed in this array. It should | |
be of the appropriate shape and dtype. | |
mode : {'raise', 'wrap', 'clip'}, optional | |
Specifies how out-of-bounds indices will behave. | |
* 'raise' -- raise an error (default) | |
* 'wrap' -- wrap around | |
* 'clip' -- clip to the range | |
'clip' mode means that all indices that are too large are replaced | |
by the index that addresses the last element along that axis. Note | |
that this disables indexing with negative numbers. | |
Returns | |
------- | |
out : ndarray (Ni..., Nj..., Nk...) | |
The returned array has the same type as `a`. | |
See Also | |
-------- | |
compress : Take elements using a boolean mask | |
ndarray.take : equivalent method | |
take_along_axis : Take elements by matching the array and the index arrays | |
Notes | |
----- | |
By eliminating the inner loop in the description above, and using `s_` to | |
build simple slice objects, `take` can be expressed in terms of applying | |
fancy indexing to each 1-d slice:: | |
Ni, Nk = a.shape[:axis], a.shape[axis+1:] | |
for ii in ndindex(Ni): | |
for kk in ndindex(Nj): | |
out[ii + s_[...,] + kk] = a[ii + s_[:,] + kk][indices] | |
For this reason, it is equivalent to (but faster than) the following use | |
of `apply_along_axis`:: | |
out = np.apply_along_axis(lambda a_1d: a_1d[indices], axis, a) | |
Examples | |
-------- | |
>>> a = [4, 3, 5, 7, 6, 8] | |
>>> indices = [0, 1, 4] | |
>>> np.take(a, indices) | |
array([4, 3, 6]) | |
In this example if `a` is an ndarray, "fancy" indexing can be used. | |
>>> a = np.array(a) | |
>>> a[indices] | |
array([4, 3, 6]) | |
If `indices` is not one dimensional, the output also has these dimensions. | |
>>> np.take(a, [[0, 1], [2, 3]]) | |
array([[4, 3], | |
[5, 7]]) | |
""" | |
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode) | |
def _reshape_dispatcher(a, newshape, order=None): | |
return (a,) | |
# not deprecated --- copy if necessary, view otherwise | |
@array_function_dispatch(_reshape_dispatcher) | |
def reshape(a, newshape, order='C'): | |
""" | |
Gives a new shape to an array without changing its data. | |
Parameters | |
---------- | |
a : array_like | |
Array to be reshaped. | |
newshape : int or tuple of ints | |
The new shape should be compatible with the original shape. If | |
an integer, then the result will be a 1-D array of that length. | |
One shape dimension can be -1. In this case, the value is | |
inferred from the length of the array and remaining dimensions. | |
order : {'C', 'F', 'A'}, optional | |
Read the elements of `a` using this index order, and place the | |
elements into the reshaped array using this index order. 'C' | |
means to read / write the elements using C-like index order, | |
with the last axis index changing fastest, back to the first | |
axis index changing slowest. 'F' means to read / write the | |
elements using Fortran-like index order, with the first index | |
changing fastest, and the last index changing slowest. Note that | |
the 'C' and 'F' options take no account of the memory layout of | |
the underlying array, and only refer to the order of indexing. | |
'A' means to read / write the elements in Fortran-like index | |
order if `a` is Fortran *contiguous* in memory, C-like order | |
otherwise. | |
Returns | |
------- | |
reshaped_array : ndarray | |
This will be a new view object if possible; otherwise, it will | |
be a copy. Note there is no guarantee of the *memory layout* (C- or | |
Fortran- contiguous) of the returned array. | |
See Also | |
-------- | |
ndarray.reshape : Equivalent method. | |
Notes | |
----- | |
It is not always possible to change the shape of an array without | |
copying the data. If you want an error to be raised when the data is copied, | |
you should assign the new shape to the shape attribute of the array:: | |
>>> a = np.zeros((10, 2)) | |
# A transpose makes the array non-contiguous | |
>>> b = a.T | |
# Taking a view makes it possible to modify the shape without modifying | |
# the initial object. | |
>>> c = b.view() | |
>>> c.shape = (20) | |
AttributeError: incompatible shape for a non-contiguous array | |
The `order` keyword gives the index ordering both for *fetching* the values | |
from `a`, and then *placing* the values into the output array. | |
For example, let's say you have an array: | |
>>> a = np.arange(6).reshape((3, 2)) | |
>>> a | |
array([[0, 1], | |
[2, 3], | |
[4, 5]]) | |
You can think of reshaping as first raveling the array (using the given | |
index order), then inserting the elements from the raveled array into the | |
new array using the same kind of index ordering as was used for the | |
raveling. | |
>>> np.reshape(a, (2, 3)) # C-like index ordering | |
array([[0, 1, 2], | |
[3, 4, 5]]) | |
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape | |
array([[0, 1, 2], | |
[3, 4, 5]]) | |
>>> np.reshape(a, (2, 3), order='F') # Fortran-like index ordering | |
array([[0, 4, 3], | |
[2, 1, 5]]) | |
>>> np.reshape(np.ravel(a, order='F'), (2, 3), order='F') | |
array([[0, 4, 3], | |
[2, 1, 5]]) | |
Examples | |
-------- | |
>>> a = np.array([[1,2,3], [4,5,6]]) | |
>>> np.reshape(a, 6) | |
array([1, 2, 3, 4, 5, 6]) | |
>>> np.reshape(a, 6, order='F') | |
array([1, 4, 2, 5, 3, 6]) | |
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2 | |
array([[1, 2], | |
[3, 4], | |
[5, 6]]) | |
""" | |
return _wrapfunc(a, 'reshape', newshape, order=order) | |
def _choose_dispatcher(a, choices, out=None, mode=None): | |
yield a | |
for c in choices: | |
yield c | |
yield out | |
@array_function_dispatch(_choose_dispatcher) | |
def choose(a, choices, out=None, mode='raise'): | |
""" | |
Construct an array from an index array and a set of arrays to choose from. | |
First of all, if confused or uncertain, definitely look at the Examples - | |
in its full generality, this function is less simple than it might | |
seem from the following code description (below ndi = | |
`numpy.lib.index_tricks`): | |
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``. | |
But this omits some subtleties. Here is a fully general summary: | |
Given an "index" array (`a`) of integers and a sequence of `n` arrays | |
(`choices`), `a` and each choice array are first broadcast, as necessary, | |
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i = | |
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape`` | |
for each `i`. Then, a new array with shape ``Ba.shape`` is created as | |
follows: | |
* if ``mode=raise`` (the default), then, first of all, each element of | |
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that | |
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position | |
in `Ba` - then the value at the same position in the new array is the | |
value in `Bchoices[i]` at that same position; | |
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed) | |
integer; modular arithmetic is used to map integers outside the range | |
`[0, n-1]` back into that range; and then the new array is constructed | |
as above; | |
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed) | |
integer; negative integers are mapped to 0; values greater than `n-1` | |
are mapped to `n-1`; and then the new array is constructed as above. | |
Parameters | |
---------- | |
a : int array | |
This array must contain integers in `[0, n-1]`, where `n` is the number | |
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any | |
integers are permissible. | |
choices : sequence of arrays | |
Choice arrays. `a` and all of the choices must be broadcastable to the | |
same shape. If `choices` is itself an array (not recommended), then | |
its outermost dimension (i.e., the one corresponding to | |
``choices.shape[0]``) is taken as defining the "sequence". | |
out : array, optional | |
If provided, the result will be inserted into this array. It should | |
be of the appropriate shape and dtype. | |
mode : {'raise' (default), 'wrap', 'clip'}, optional | |
Specifies how indices outside `[0, n-1]` will be treated: | |
* 'raise' : an exception is raised | |
* 'wrap' : value becomes value mod `n` | |
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1 | |
Returns | |
------- | |
merged_array : array | |
The merged result. | |
Raises | |
------ | |
ValueError: shape mismatch | |
If `a` and each choice array are not all broadcastable to the same | |
shape. | |
See Also | |
-------- | |
ndarray.choose : equivalent method | |
Notes | |
----- | |
To reduce the chance of misinterpretation, even though the following | |
"abuse" is nominally supported, `choices` should neither be, nor be | |
thought of as, a single array, i.e., the outermost sequence-like container | |
should be either a list or a tuple. | |
Examples | |
-------- | |
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13], | |
... [20, 21, 22, 23], [30, 31, 32, 33]] | |
>>> np.choose([2, 3, 1, 0], choices | |
... # the first element of the result will be the first element of the | |
... # third (2+1) "array" in choices, namely, 20; the second element | |
... # will be the second element of the fourth (3+1) choice array, i.e., | |
... # 31, etc. | |
... ) | |
array([20, 31, 12, 3]) | |
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1) | |
array([20, 31, 12, 3]) | |
>>> # because there are 4 choice arrays | |
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4) | |
array([20, 1, 12, 3]) | |
>>> # i.e., 0 | |
A couple examples illustrating how choose broadcasts: | |
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]] | |
>>> choices = [-10, 10] | |
>>> np.choose(a, choices) | |
array([[ 10, -10, 10], | |
[-10, 10, -10], | |
[ 10, -10, 10]]) | |
>>> # With thanks to Anne Archibald | |
>>> a = np.array([0, 1]).reshape((2,1,1)) | |
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1)) | |
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5)) | |
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2 | |
array([[[ 1, 1, 1, 1, 1], | |
[ 2, 2, 2, 2, 2], | |
[ 3, 3, 3, 3, 3]], | |
[[-1, -2, -3, -4, -5], | |
[-1, -2, -3, -4, -5], | |
[-1, -2, -3, -4, -5]]]) | |
""" | |
return _wrapfunc(a, 'choose', choices, out=out, mode=mode) | |
def _repeat_dispatcher(a, repeats, axis=None): | |
return (a,) | |
@array_function_dispatch(_repeat_dispatcher) | |
def repeat(a, repeats, axis=None): | |
""" | |
Repeat elements of an array. | |
Parameters | |
---------- | |
a : array_like | |
Input array. | |
repeats : int or array of ints | |
The number of repetitions for each element. `repeats` is broadcasted | |
to fit the shape of the given axis. | |
axis : int, optional | |
The axis along which to repeat values. By default, use the | |
flattened input array, and return a flat output array. | |
Returns | |
------- | |
repeated_array : ndarray | |
Output array which has the same shape as `a`, except along | |
the given axis. | |
See Also | |
-------- | |
tile : Tile an array. | |
Examples | |
-------- | |
>>> np.repeat(3, 4) | |
array([3, 3, 3, 3]) | |
>>> x = np.array([[1,2],[3,4]]) | |
>>> np.repeat(x, 2) | |
array([1, 1, 2, 2, 3, 3, 4, 4]) | |
>>> np.repeat(x, 3, axis=1) | |
array([[1, 1, 1, 2, 2, 2], | |
[3, 3, 3, 4, 4, 4]]) | |
>>> np.repeat(x, [1, 2], axis=0) | |
array([[1, 2], | |
[3, 4], | |
[3, 4]]) | |
""" | |
return _wrapfunc(a, 'repeat', repeats, axis=axis) | |
def _put_dispatcher(a, ind, v, mode=None): | |
return (a, ind, v) | |
@array_function_dispatch(_put_dispatcher) | |
def put(a, ind, v, mode='raise'): | |
""" | |
Replaces specified elements of an array with given values. | |
The indexing works on the flattened target array. `put` is roughly | |
equivalent to: | |
:: | |
a.flat[ind] = v | |
Parameters | |
---------- | |
a : ndarray | |
Target array. | |
ind : array_like | |
Target indices, interpreted as integers. | |
v : array_like | |
Values to place in `a` at target indices. If `v` is shorter than | |
`ind` it will be repeated as necessary. | |
mode : {'raise', 'wrap', 'clip'}, optional | |
Specifies how out-of-bounds indices will behave. | |
* 'raise' -- raise an error (default) | |
* 'wrap' -- wrap around | |
* 'clip' -- clip to the range | |
'clip' mode means that all indices that are too large are replaced | |
by the index that addresses the last element along that axis. Note | |
that this disables indexing with negative numbers. | |
See Also | |
-------- | |
putmask, place | |
put_along_axis : Put elements by matching the array and the index arrays | |
Examples | |
-------- | |
>>> a = np.arange(5) | |
>>> np.put(a, [0, 2], [-44, -55]) | |
>>> a | |
array([-44, 1, -55, 3, 4]) | |
>>> a = np.arange(5) | |
>>> np.put(a, 22, -5, mode='clip') | |
>>> a | |
array([ 0, 1, 2, 3, -5]) | |
""" | |
try: | |
put = a.put | |
except AttributeError: | |
raise TypeError("argument 1 must be numpy.ndarray, " | |
"not {name}".format(name=type(a).__name__)) | |
return put(ind, v, mode=mode) | |
def _swapaxes_dispatcher(a, axis1, axis2): | |
return (a,) | |
@array_function_dispatch(_swapaxes_dispatcher) | |
def swapaxes(a, axis1, axis2): | |
""" | |
Interchange two axes of an array. | |
Parameters | |
---------- | |
a : array_like | |
Input array. | |
axis1 : int | |
First axis. | |
axis2 : int | |
Second axis. | |
Returns | |
------- | |
a_swapped : ndarray | |
For NumPy >= 1.10.0, if `a` is an ndarray, then a view of `a` is | |
returned; otherwise a new array is created. For earlier NumPy | |
versions a view of `a` is returned only if the order of the | |
axes is changed, otherwise the input array is returned. | |
Examples | |
-------- | |
>>> x = np.array([[1,2,3]]) | |
>>> np.swapaxes(x,0,1) | |
array([[1], | |
[2], | |
[3]]) | |
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) | |
>>> x | |
array([[[0, 1], | |
[2, 3]], | |
[[4, 5], | |
[6, 7]]]) | |
>>> np.swapaxes(x,0,2) | |
array([[[0, 4], | |
[2, 6]], | |
[[1, 5], | |
[3, 7]]]) | |
""" | |
return _wrapfunc(a, 'swapaxes', axis1, axis2) | |
def _transpose_dispatcher(a, axes=None): | |
return (a,) | |
@array_function_dispatch(_transpose_dispatcher) | |
def transpose(a, axes=None): | |
""" | |
Permute the dimensions of an array. | |
Parameters | |
---------- | |
a : array_like | |
Input array. | |
axes : list of ints, optional | |
By default, reverse the dimensions, otherwise permute the axes | |
according to the values given. | |
Returns | |
------- | |
p : ndarray | |
`a` with its axes permuted. A view is returned whenever | |
possible. | |
See Also | |
-------- | |
moveaxis | |
argsort | |
Notes | |
----- | |
Use `transpose(a, argsort(axes))` to invert the transposition of tensors | |
when using the `axes` keyword argument. | |
Transposing a 1-D array returns an unchanged view of the original array. | |
Examples | |
-------- | |
>>> x = np.arange(4).reshape((2,2)) | |
>>> x | |
array([[0, 1], | |
[2, 3]]) | |
>>> np.transpose(x) | |
array([[0, 2], | |
[1, 3]]) | |
>>> x = np.ones((1, 2, 3)) | |
>>> np.transpose(x, (1, 0, 2)).shape | |
(2, 1, 3) | |
""" | |
return _wrapfunc(a, 'transpose', axes) | |
def _partition_dispatcher(a, kth, axis=None, kind=None, order=None): | |
return (a,) | |
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)
(Sorry about that, but we can’t show files that are this big right now.)